aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/video.c7
-rw-r--r--drivers/atm/ambassador.c2
-rw-r--r--drivers/atm/atmtcp.c2
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/eni.h2
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/atm/fore200e.c2
-rw-r--r--drivers/atm/horizon.c2
-rw-r--r--drivers/atm/idt77252.c2
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/atm/nicstar.c2
-rw-r--r--drivers/atm/suni.c2
-rw-r--r--drivers/atm/uPD98402.c2
-rw-r--r--drivers/atm/zatm.c2
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/power/opp.c2
-rw-r--r--drivers/base/power/sysfs.c2
-rw-r--r--drivers/block/cciss_scsi.c2
-rw-r--r--drivers/block/rbd.c46
-rw-r--r--drivers/char/hw_random/n2-drv.c29
-rw-r--r--drivers/char/hw_random/n2rng.h2
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c2
-rw-r--r--drivers/char/mspec.c2
-rw-r--r--drivers/char/ramoops.c93
-rw-r--r--drivers/char/tpm/tpm.c102
-rw-r--r--drivers/char/tpm/tpm.h7
-rw-r--r--drivers/char/tpm/tpm_nsc.c14
-rw-r--r--drivers/char/tpm/tpm_tis.c182
-rw-r--r--drivers/clk/Kconfig3
-rw-r--r--drivers/connector/cn_proc.c3
-rw-r--r--drivers/cpufreq/cpufreq.c20
-rw-r--r--drivers/crypto/n2_core.c33
-rw-r--r--drivers/dma/imx-sdma.c96
-rw-r--r--drivers/edac/edac_stub.c2
-rw-r--r--drivers/edac/mpc85xx_edac.c4
-rw-r--r--drivers/firewire/core-card.c2
-rw-r--r--drivers/firewire/core-device.c2
-rw-r--r--drivers/firewire/core-topology.c2
-rw-r--r--drivers/firewire/core.h2
-rw-r--r--drivers/firewire/nosy.c2
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c5
-rw-r--r--drivers/gpu/drm/drm_edid.c4
-rw-r--r--drivers/gpu/drm/drm_gem.c47
-rw-r--r--drivers/gpu/drm/drm_modes.c87
-rw-r--r--drivers/gpu/drm/drm_platform.c5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c41
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c69
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h46
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c191
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c39
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c13
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h6
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c8
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c142
-rw-r--r--drivers/gpu/drm/i915/intel_display.c699
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c17
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h9
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c15
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c6
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c46
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c50
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c232
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c167
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c34
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h78
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c148
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c119
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c98
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c114
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c28
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c112
-rw-r--r--drivers/gpu/drm/nouveau/nv40_mpeg.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_mpeg.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c41
-rw-r--r--drivers/gpu/drm/nouveau/nv84_crypt.c2
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_copy.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fb.c68
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c270
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.fuc400
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c239
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grgpc.fuc474
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h483
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grhub.fuc808
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h838
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c14
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c22
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c7
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c57
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h2
-rw-r--r--drivers/gpu/drm/radeon/ni.c1
-rw-r--r--drivers/gpu/drm/radeon/r600.c15
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c23
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon.h27
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman2
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen3
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r6001
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c18
-rw-r--r--drivers/hwmon/Kconfig127
-rw-r--r--drivers/hwmon/Makefile13
-rw-r--r--drivers/hwmon/coretemp.c177
-rw-r--r--drivers/hwmon/lm90.c65
-rw-r--r--drivers/hwmon/lm95241.c31
-rw-r--r--drivers/hwmon/lm95245.c543
-rw-r--r--drivers/hwmon/max1668.c502
-rw-r--r--drivers/hwmon/ntc_thermistor.c453
-rw-r--r--drivers/hwmon/pmbus/Kconfig100
-rw-r--r--drivers/hwmon/pmbus/Makefile13
-rw-r--r--drivers/hwmon/pmbus/adm1275.c (renamed from drivers/hwmon/adm1275.c)66
-rw-r--r--drivers/hwmon/pmbus/lm25066.c340
-rw-r--r--drivers/hwmon/pmbus/max16064.c (renamed from drivers/hwmon/max16064.c)57
-rw-r--r--drivers/hwmon/pmbus/max34440.c (renamed from drivers/hwmon/max34440.c)81
-rw-r--r--drivers/hwmon/pmbus/max8688.c (renamed from drivers/hwmon/max8688.c)69
-rw-r--r--drivers/hwmon/pmbus/pmbus.c (renamed from drivers/hwmon/pmbus.c)37
-rw-r--r--drivers/hwmon/pmbus/pmbus.h (renamed from drivers/hwmon/pmbus.h)48
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c (renamed from drivers/hwmon/pmbus_core.c)351
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c (renamed from drivers/hwmon/ucd9000.c)0
-rw-r--r--drivers/hwmon/pmbus/ucd9200.c (renamed from drivers/hwmon/ucd9200.c)0
-rw-r--r--drivers/hwmon/sht15.c2
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c41
-rw-r--r--drivers/i2c/busses/i2c-tegra.c17
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_tools.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c2
-rw-r--r--drivers/input/joystick/xpad.c31
-rw-r--r--drivers/input/keyboard/adp5588-keys.c1
-rw-r--r--drivers/input/keyboard/adp5589-keys.c1
-rw-r--r--drivers/input/keyboard/atkbd.c4
-rw-r--r--drivers/input/keyboard/gpio_keys.c166
-rw-r--r--drivers/input/keyboard/lm8323.c23
-rw-r--r--drivers/input/keyboard/mpr121_touchkey.c16
-rw-r--r--drivers/input/keyboard/pmic8xxx-keypad.c8
-rw-r--r--drivers/input/keyboard/qt1070.c2
-rw-r--r--drivers/input/keyboard/sh_keysc.c2
-rw-r--r--drivers/input/keyboard/tegra-kbc.c2
-rw-r--r--drivers/input/keyboard/tnetv107x-keypad.c2
-rw-r--r--drivers/input/misc/Kconfig51
-rw-r--r--drivers/input/misc/Makefile5
-rw-r--r--drivers/input/misc/bfin_rotary.c1
-rw-r--r--drivers/input/misc/kxtj9.c671
-rw-r--r--drivers/input/misc/mma8450.c256
-rw-r--r--drivers/input/misc/mpu3050.c376
-rw-r--r--drivers/input/misc/twl4030-vibra.c12
-rw-r--r--drivers/input/misc/twl6040-vibra.c423
-rw-r--r--drivers/input/misc/xen-kbdfront.c2
-rw-r--r--drivers/input/mouse/gpio_mouse.c2
-rw-r--r--drivers/input/mouse/lifebook.c4
-rw-r--r--drivers/input/mouse/pxa930_trkball.c1
-rw-r--r--drivers/input/mouse/sentelic.c1
-rw-r--r--drivers/input/mouse/synaptics.c107
-rw-r--r--drivers/input/mouse/synaptics.h18
-rw-r--r--drivers/input/serio/at32psif.c2
-rw-r--r--drivers/input/serio/hp_sdc.c2
-rw-r--r--drivers/input/tablet/aiptek.c1
-rw-r--r--drivers/input/tablet/wacom_wac.c68
-rw-r--r--drivers/input/touchscreen/ads7846.c15
-rw-r--r--drivers/input/touchscreen/atmel-wm97xx.c4
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c190
-rw-r--r--drivers/input/touchscreen/cy8ctmg110_ts.c8
-rw-r--r--drivers/input/touchscreen/intel-mid-touch.c15
-rw-r--r--drivers/input/touchscreen/mainstone-wm97xx.c6
-rw-r--r--drivers/input/touchscreen/tnetv107x-ts.c2
-rw-r--r--drivers/input/touchscreen/wm9705.c25
-rw-r--r--drivers/input/touchscreen/wm9712.c27
-rw-r--r--drivers/input/touchscreen/wm9713.c25
-rw-r--r--drivers/input/touchscreen/zylonite-wm97xx.c6
-rw-r--r--drivers/isdn/gigaset/gigaset.h2
-rw-r--r--drivers/isdn/i4l/isdn_net.c3
-rw-r--r--drivers/md/bitmap.c137
-rw-r--r--drivers/md/bitmap.h5
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/md/dm-kcopyd.c2
-rw-r--r--drivers/md/dm-mpath.c2
-rw-r--r--drivers/md/dm-queue-length.c2
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/md/md.c871
-rw-r--r--drivers/md/md.h110
-rw-r--r--drivers/md/raid1.c962
-rw-r--r--drivers/md/raid1.h26
-rw-r--r--drivers/md/raid10.c1183
-rw-r--r--drivers/md/raid10.h21
-rw-r--r--drivers/md/raid5.c1015
-rw-r--r--drivers/md/raid5.h99
-rw-r--r--drivers/media/Kconfig14
-rw-r--r--drivers/media/common/tuners/Kconfig10
-rw-r--r--drivers/media/common/tuners/Makefile1
-rw-r--r--drivers/media/common/tuners/tuner-types.c4
-rw-r--r--drivers/media/common/tuners/xc4000.c1691
-rw-r--r--drivers/media/common/tuners/xc4000.h67
-rw-r--r--drivers/media/dvb/Kconfig4
-rw-r--r--drivers/media/dvb/Makefile3
-rw-r--r--drivers/media/dvb/bt8xx/dvb-bt8xx.c4
-rw-r--r--drivers/media/dvb/ddbridge/Kconfig18
-rw-r--r--drivers/media/dvb/ddbridge/Makefile14
-rw-r--r--drivers/media/dvb/ddbridge/ddbridge-core.c1719
-rw-r--r--drivers/media/dvb/ddbridge/ddbridge-regs.h151
-rw-r--r--drivers/media/dvb/ddbridge/ddbridge.h187
-rw-r--r--drivers/media/dvb/dvb-core/Makefile4
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c3
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.h21
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig1
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.c135
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.h1
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.c69
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.h16
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c188
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h3
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb.h2
-rw-r--r--drivers/media/dvb/dvb-usb/gp8psk.h3
-rw-r--r--drivers/media/dvb/dvb-usb/technisat-usb2.c4
-rw-r--r--drivers/media/dvb/dvb-usb/vp7045.h3
-rw-r--r--drivers/media/dvb/firewire/firedtv-avc.c2
-rw-r--r--drivers/media/dvb/firewire/firedtv-ci.c34
-rw-r--r--drivers/media/dvb/frontends/Kconfig21
-rw-r--r--drivers/media/dvb/frontends/Makefile3
-rw-r--r--drivers/media/dvb/frontends/au8522_decoder.c2
-rw-r--r--drivers/media/dvb/frontends/cx24113.c20
-rw-r--r--drivers/media/dvb/frontends/cx24116.c6
-rw-r--r--drivers/media/dvb/frontends/cxd2820r.h4
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_core.c22
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_priv.h4
-rw-r--r--drivers/media/dvb/frontends/dib7000p.c5
-rw-r--r--drivers/media/dvb/frontends/drxd_hard.c9
-rw-r--r--drivers/media/dvb/frontends/drxk.h47
-rw-r--r--drivers/media/dvb/frontends/drxk_hard.c6454
-rw-r--r--drivers/media/dvb/frontends/drxk_hard.h348
-rw-r--r--drivers/media/dvb/frontends/drxk_map.h449
-rw-r--r--drivers/media/dvb/frontends/itd1000.c25
-rw-r--r--drivers/media/dvb/frontends/nxt6000.c2
-rw-r--r--drivers/media/dvb/frontends/s5h1420.c12
-rw-r--r--drivers/media/dvb/frontends/tda18271c2dd.c1251
-rw-r--r--drivers/media/dvb/frontends/tda18271c2dd.h16
-rw-r--r--drivers/media/dvb/frontends/tda18271c2dd_maps.h814
-rw-r--r--drivers/media/dvb/ngene/Kconfig2
-rw-r--r--drivers/media/dvb/ngene/ngene-cards.c182
-rw-r--r--drivers/media/dvb/ngene/ngene-core.c26
-rw-r--r--drivers/media/dvb/ngene/ngene-dvb.c46
-rw-r--r--drivers/media/dvb/ngene/ngene.h7
-rw-r--r--drivers/media/dvb/siano/smscoreapi.c2
-rw-r--r--drivers/media/dvb/siano/smscoreapi.h1
-rw-r--r--drivers/media/radio/dsbr100.c7
-rw-r--r--drivers/media/radio/radio-aimslab.c5
-rw-r--r--drivers/media/radio/radio-aztech.c5
-rw-r--r--drivers/media/radio/radio-cadet.c5
-rw-r--r--drivers/media/radio/radio-gemtek.c7
-rw-r--r--drivers/media/radio/radio-maxiradio.c10
-rw-r--r--drivers/media/radio/radio-mr800.c6
-rw-r--r--drivers/media/radio/radio-rtrack2.c5
-rw-r--r--drivers/media/radio/radio-sf16fmi.c5
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c531
-rw-r--r--drivers/media/radio/radio-tea5764.c8
-rw-r--r--drivers/media/radio/radio-terratec.c5
-rw-r--r--drivers/media/radio/radio-timb.c3
-rw-r--r--drivers/media/radio/radio-trust.c5
-rw-r--r--drivers/media/radio/radio-typhoon.c9
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/radio/radio-zoltrix.c5
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c4
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c6
-rw-r--r--drivers/media/radio/si470x/radio-si470x.h1
-rw-r--r--drivers/media/radio/wl128x/fmdrv.h5
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c3
-rw-r--r--drivers/media/rc/Kconfig11
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/ene_ir.c4
-rw-r--r--drivers/media/rc/ene_ir.h2
-rw-r--r--drivers/media/rc/ir-lirc-codec.c15
-rw-r--r--drivers/media/rc/ir-mce_kbd-decoder.c449
-rw-r--r--drivers/media/rc/ir-raw.c1
-rw-r--r--drivers/media/rc/ite-cir.c5
-rw-r--r--drivers/media/rc/keymaps/rc-rc6-mce.c3
-rw-r--r--drivers/media/rc/mceusb.c10
-rw-r--r--drivers/media/rc/nuvoton-cir.c12
-rw-r--r--drivers/media/rc/rc-core-priv.h18
-rw-r--r--drivers/media/rc/rc-loopback.c13
-rw-r--r--drivers/media/rc/rc-main.c4
-rw-r--r--drivers/media/rc/redrat3.c63
-rw-r--r--drivers/media/rc/winbond-cir.c28
-rw-r--r--drivers/media/video/Kconfig44
-rw-r--r--drivers/media/video/Makefile8
-rw-r--r--drivers/media/video/adp1653.c491
-rw-r--r--drivers/media/video/arv.c5
-rw-r--r--drivers/media/video/atmel-isi.c1048
-rw-r--r--drivers/media/video/au0828/au0828-core.c1
-rw-r--r--drivers/media/video/au0828/au0828-video.c5
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c7
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c14
-rw-r--r--drivers/media/video/bt8xx/bttvp.h3
-rw-r--r--drivers/media/video/bw-qcam.c4
-rw-r--r--drivers/media/video/c-qcam.c4
-rw-r--r--drivers/media/video/cafe_ccic-regs.h166
-rw-r--r--drivers/media/video/cafe_ccic.c2267
-rw-r--r--drivers/media/video/cpia2/cpia2.h5
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c12
-rw-r--r--drivers/media/video/cx18/cx18-alsa-main.c1
-rw-r--r--drivers/media/video/cx18/cx18-driver.h1
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.c1
-rw-r--r--drivers/media/video/cx18/cx18-version.h8
-rw-r--r--drivers/media/video/cx231xx/cx231xx-avcore.c4
-rw-r--r--drivers/media/video/cx231xx/cx231xx-cards.c78
-rw-r--r--drivers/media/video/cx231xx/cx231xx-core.c4
-rw-r--r--drivers/media/video/cx231xx/cx231xx-video.c29
-rw-r--r--drivers/media/video/cx231xx/cx231xx.h5
-rw-r--r--drivers/media/video/cx23885/altera-ci.c1
-rw-r--r--drivers/media/video/cx23885/cx23885-417.c1
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c70
-rw-r--r--drivers/media/video/cx23885/cx23885-core.c13
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c23
-rw-r--r--drivers/media/video/cx23885/cx23885-input.c6
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c1
-rw-r--r--drivers/media/video/cx23885/cx23885.h4
-rw-r--r--drivers/media/video/cx88/cx88-alsa.c19
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c20
-rw-r--r--drivers/media/video/cx88/cx88-cards.c150
-rw-r--r--drivers/media/video/cx88/cx88-core.c11
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c77
-rw-r--r--drivers/media/video/cx88/cx88-input.c4
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c35
-rw-r--r--drivers/media/video/cx88/cx88-video.c65
-rw-r--r--drivers/media/video/cx88/cx88.h7
-rw-r--r--drivers/media/video/davinci/Kconfig23
-rw-r--r--drivers/media/video/davinci/Makefile2
-rw-r--r--drivers/media/video/davinci/vpbe.c864
-rw-r--r--drivers/media/video/davinci/vpbe_display.c1860
-rw-r--r--drivers/media/video/davinci/vpbe_osd.c1231
-rw-r--r--drivers/media/video/davinci/vpbe_osd_regs.h364
-rw-r--r--drivers/media/video/davinci/vpbe_venc.c566
-rw-r--r--drivers/media/video/davinci/vpbe_venc_regs.h177
-rw-r--r--drivers/media/video/davinci/vpif_capture.c9
-rw-r--r--drivers/media/video/davinci/vpif_capture.h7
-rw-r--r--drivers/media/video/davinci/vpif_display.c9
-rw-r--r--drivers/media/video/davinci/vpif_display.h8
-rw-r--r--drivers/media/video/em28xx/Kconfig12
-rw-r--r--drivers/media/video/em28xx/Makefile6
-rw-r--r--drivers/media/video/em28xx/em28xx-audio.c251
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c159
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c84
-rw-r--r--drivers/media/video/em28xx/em28xx-dvb.c126
-rw-r--r--drivers/media/video/em28xx/em28xx-i2c.c17
-rw-r--r--drivers/media/video/em28xx/em28xx-input.c1
-rw-r--r--drivers/media/video/em28xx/em28xx-reg.h1
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c14
-rw-r--r--drivers/media/video/em28xx/em28xx.h24
-rw-r--r--drivers/media/video/et61x251/et61x251.h1
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c16
-rw-r--r--drivers/media/video/fsl-viu.c10
-rw-r--r--drivers/media/video/gspca/Kconfig10
-rw-r--r--drivers/media/video/gspca/Makefile2
-rw-r--r--drivers/media/video/gspca/gl860/gl860.h1
-rw-r--r--drivers/media/video/gspca/gspca.c23
-rw-r--r--drivers/media/video/gspca/ov519.c115
-rw-r--r--drivers/media/video/gspca/se401.c774
-rw-r--r--drivers/media/video/gspca/se401.h90
-rw-r--r--drivers/media/video/gspca/sunplus.c3
-rw-r--r--drivers/media/video/gspca/t613.c2
-rw-r--r--drivers/media/video/hdpvr/hdpvr-core.c3
-rw-r--r--drivers/media/video/hdpvr/hdpvr-video.c2
-rw-r--r--drivers/media/video/hdpvr/hdpvr.h6
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.h1
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c19
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c5
-rw-r--r--drivers/media/video/ivtv/ivtv-version.h7
-rw-r--r--drivers/media/video/m5mols/m5mols_capture.c1
-rw-r--r--drivers/media/video/m5mols/m5mols_core.c1
-rw-r--r--drivers/media/video/marvell-ccic/Kconfig23
-rw-r--r--drivers/media/video/marvell-ccic/Makefile6
-rw-r--r--drivers/media/video/marvell-ccic/cafe-driver.c654
-rw-r--r--drivers/media/video/marvell-ccic/mcam-core.c1843
-rw-r--r--drivers/media/video/marvell-ccic/mcam-core.h323
-rw-r--r--drivers/media/video/marvell-ccic/mmp-driver.c340
-rw-r--r--drivers/media/video/mem2mem_testdev.c4
-rw-r--r--drivers/media/video/mt9m001.c14
-rw-r--r--drivers/media/video/mt9m111.c359
-rw-r--r--drivers/media/video/mt9t031.c3
-rw-r--r--drivers/media/video/mt9t112.c10
-rw-r--r--drivers/media/video/mt9v011.c85
-rw-r--r--drivers/media/video/mt9v022.c10
-rw-r--r--drivers/media/video/mt9v032.c20
-rw-r--r--drivers/media/video/mx1_camera.c47
-rw-r--r--drivers/media/video/mx2_camera.c66
-rw-r--r--drivers/media/video/mx3_camera.c71
-rw-r--r--drivers/media/video/omap/Kconfig7
-rw-r--r--drivers/media/video/omap/Makefile1
-rw-r--r--drivers/media/video/omap/omap_vout.c645
-rw-r--r--drivers/media/video/omap/omap_vout_vrfb.c390
-rw-r--r--drivers/media/video/omap/omap_vout_vrfb.h40
-rw-r--r--drivers/media/video/omap/omap_voutdef.h78
-rw-r--r--drivers/media/video/omap/omap_voutlib.c46
-rw-r--r--drivers/media/video/omap/omap_voutlib.h12
-rw-r--r--drivers/media/video/omap1_camera.c57
-rw-r--r--drivers/media/video/omap24xxcam.c9
-rw-r--r--drivers/media/video/omap3isp/isp.c1
-rw-r--r--drivers/media/video/omap3isp/isp.h6
-rw-r--r--drivers/media/video/omap3isp/ispccdc.c7
-rw-r--r--drivers/media/video/omap3isp/ispccp2.c27
-rw-r--r--drivers/media/video/omap3isp/ispccp2.h1
-rw-r--r--drivers/media/video/omap3isp/ispstat.c3
-rw-r--r--drivers/media/video/omap3isp/ispvideo.c1
-rw-r--r--drivers/media/video/omap3isp/ispvideo.h3
-rw-r--r--drivers/media/video/ov2640.c13
-rw-r--r--drivers/media/video/ov5642.c1012
-rw-r--r--drivers/media/video/ov7670.c3
-rw-r--r--drivers/media/video/ov7670.h20
-rw-r--r--drivers/media/video/ov772x.c10
-rw-r--r--drivers/media/video/ov9640.c13
-rw-r--r--drivers/media/video/ov9740.c556
-rw-r--r--drivers/media/video/pms.c4
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-main.c1
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c9
-rw-r--r--drivers/media/video/pwc/Kconfig1
-rw-r--r--drivers/media/video/pwc/pwc-ctrl.c803
-rw-r--r--drivers/media/video/pwc/pwc-dec1.c28
-rw-r--r--drivers/media/video/pwc/pwc-dec1.h8
-rw-r--r--drivers/media/video/pwc/pwc-dec23.c22
-rw-r--r--drivers/media/video/pwc/pwc-dec23.h10
-rw-r--r--drivers/media/video/pwc/pwc-if.c1259
-rw-r--r--drivers/media/video/pwc/pwc-ioctl.h323
-rw-r--r--drivers/media/video/pwc/pwc-kiara.c1
-rw-r--r--drivers/media/video/pwc/pwc-misc.c4
-rw-r--r--drivers/media/video/pwc/pwc-uncompress.c17
-rw-r--r--drivers/media/video/pwc/pwc-uncompress.h40
-rw-r--r--drivers/media/video/pwc/pwc-v4l.c1257
-rw-r--r--drivers/media/video/pwc/pwc.h409
-rw-r--r--drivers/media/video/pxa_camera.c92
-rw-r--r--drivers/media/video/rj54n1cb0c.c7
-rw-r--r--drivers/media/video/s2255drv.c35
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c2
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c3
-rw-r--r--drivers/media/video/s5p-mfc/Makefile5
-rw-r--r--drivers/media/video/s5p-mfc/regs-mfc.h413
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc.c1274
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_cmd.c120
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_cmd.h30
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_common.h572
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c343
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_ctrl.h29
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_debug.h48
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_dec.c1036
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_dec.h23
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_enc.c1829
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_enc.h23
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_intr.c92
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_intr.h26
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_opr.c1397
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_opr.h91
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_pm.c117
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_pm.h24
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_shm.c47
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_shm.h91
-rw-r--r--drivers/media/video/s5p-tv/Kconfig76
-rw-r--r--drivers/media/video/s5p-tv/Makefile17
-rw-r--r--drivers/media/video/s5p-tv/hdmi_drv.c1042
-rw-r--r--drivers/media/video/s5p-tv/hdmiphy_drv.c188
-rw-r--r--drivers/media/video/s5p-tv/mixer.h354
-rw-r--r--drivers/media/video/s5p-tv/mixer_drv.c487
-rw-r--r--drivers/media/video/s5p-tv/mixer_grp_layer.c185
-rw-r--r--drivers/media/video/s5p-tv/mixer_reg.c541
-rw-r--r--drivers/media/video/s5p-tv/mixer_video.c1006
-rw-r--r--drivers/media/video/s5p-tv/mixer_vp_layer.c211
-rw-r--r--drivers/media/video/s5p-tv/regs-hdmi.h141
-rw-r--r--drivers/media/video/s5p-tv/regs-mixer.h121
-rw-r--r--drivers/media/video/s5p-tv/regs-sdo.h63
-rw-r--r--drivers/media/video/s5p-tv/regs-vp.h88
-rw-r--r--drivers/media/video/s5p-tv/sdo_drv.c479
-rw-r--r--drivers/media/video/saa7115.c4
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c13
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c12
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c25
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c1
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c2
-rw-r--r--drivers/media/video/saa7134/saa7134.h3
-rw-r--r--drivers/media/video/saa7164/saa7164-encoder.c6
-rw-r--r--drivers/media/video/saa7164/saa7164-vbi.c6
-rw-r--r--drivers/media/video/saa7164/saa7164.h1
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c207
-rw-r--r--drivers/media/video/sh_mobile_csi2.c135
-rw-r--r--drivers/media/video/sh_vou.c3
-rw-r--r--drivers/media/video/sn9c102/sn9c102.h1
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c16
-rw-r--r--drivers/media/video/soc_camera.c281
-rw-r--r--drivers/media/video/soc_camera_platform.c10
-rw-r--r--drivers/media/video/sr030pc30.c7
-rw-r--r--drivers/media/video/tda7432.c5
-rw-r--r--drivers/media/video/timblogiw.c1
-rw-r--r--drivers/media/video/tlg2300/pd-common.h1
-rw-r--r--drivers/media/video/tlg2300/pd-dvb.c2
-rw-r--r--drivers/media/video/tlg2300/pd-main.c1
-rw-r--r--drivers/media/video/tlg2300/pd-radio.c2
-rw-r--r--drivers/media/video/tuner-core.c18
-rw-r--r--drivers/media/video/tw9910.c21
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c12
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c6
-rw-r--r--drivers/media/video/uvc/uvc_driver.c12
-rw-r--r--drivers/media/video/uvc/uvc_queue.c2
-rw-r--r--drivers/media/video/uvc/uvc_v4l2.c6
-rw-r--r--drivers/media/video/uvc/uvc_video.c2
-rw-r--r--drivers/media/video/uvc/uvcvideo.h3
-rw-r--r--drivers/media/video/v4l2-common.c3
-rw-r--r--drivers/media/video/v4l2-compat-ioctl32.c37
-rw-r--r--drivers/media/video/v4l2-ctrls.c826
-rw-r--r--drivers/media/video/v4l2-device.c1
-rw-r--r--drivers/media/video/v4l2-event.c282
-rw-r--r--drivers/media/video/v4l2-fh.c23
-rw-r--r--drivers/media/video/v4l2-ioctl.c50
-rw-r--r--drivers/media/video/v4l2-subdev.c31
-rw-r--r--drivers/media/video/videobuf-dma-sg.c5
-rw-r--r--drivers/media/video/videobuf2-dma-sg.c8
-rw-r--r--drivers/media/video/videobuf2-memops.c6
-rw-r--r--drivers/media/video/vino.c5
-rw-r--r--drivers/media/video/vivi.c91
-rw-r--r--drivers/media/video/w9966.c4
-rw-r--r--drivers/media/video/zoran/zoran.h4
-rw-r--r--drivers/media/video/zoran/zoran_card.c7
-rw-r--r--drivers/media/video/zoran/zoran_driver.c3
-rw-r--r--drivers/media/video/zr364xx.c6
-rw-r--r--drivers/message/fusion/mptscsih.c2
-rw-r--r--drivers/message/i2o/i2o_scsi.c2
-rw-r--r--drivers/mfd/Kconfig8
-rw-r--r--drivers/mfd/Makefile3
-rw-r--r--drivers/mfd/timberdale.c8
-rw-r--r--drivers/mfd/twl-core.c13
-rw-r--r--drivers/mfd/twl4030-audio.c277
-rw-r--r--drivers/mfd/twl4030-codec.c277
-rw-r--r--drivers/mfd/twl6040-core.c620
-rw-r--r--drivers/mfd/twl6040-irq.c191
-rw-r--r--drivers/misc/phantom.c2
-rw-r--r--drivers/misc/vmw_balloon.c31
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c261
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c3
-rw-r--r--drivers/mmc/host/tmio_mmc.h1
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/acenic.c45
-rw-r--r--drivers/net/acenic.h6
-rw-r--r--drivers/net/atlx/atl1.c2
-rw-r--r--drivers/net/atlx/atl2.c2
-rw-r--r--drivers/net/atlx/atl2.h2
-rw-r--r--drivers/net/bonding/bond_main.c8
-rw-r--r--drivers/net/bonding/bond_sysfs.c133
-rw-r--r--drivers/net/cassini.c2
-rw-r--r--drivers/net/cpmac.c2
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/cxgb3/l2t.h2
-rw-r--r--drivers/net/cxgb3/t3cdev.h2
-rw-r--r--drivers/net/cxgb4/cxgb4_uld.h2
-rw-r--r--drivers/net/cxgb4/l2t.h2
-rw-r--r--drivers/net/fec.c125
-rw-r--r--drivers/net/forcedeth.c16
-rw-r--r--drivers/net/gianfar.c6
-rw-r--r--drivers/net/hamradio/6pack.c2
-rw-r--r--drivers/net/hamradio/dmascc.c2
-rw-r--r--drivers/net/ibm_newemac/core.c33
-rw-r--r--drivers/net/ibm_newemac/emac.h19
-rw-r--r--drivers/net/ibm_newemac/phy.c7
-rw-r--r--drivers/net/ibmveth.c2
-rw-r--r--drivers/net/ifb.c2
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/ppp_generic.c2
-rw-r--r--drivers/net/tg3.c287
-rw-r--r--drivers/net/tg3.h9
-rw-r--r--drivers/net/tun.c1
-rw-r--r--drivers/net/usb/asix.c7
-rw-r--r--drivers/net/veth.c2
-rw-r--r--drivers/net/wan/hdlc_fr.c5
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h2
-rw-r--r--drivers/net/wireless/airo.c1
-rw-r--r--drivers/net/wireless/b43/Kconfig2
-rw-r--r--drivers/net/wireless/b43/bus.c2
-rw-r--r--drivers/net/wireless/b43/main.c5
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h2
-rw-r--r--drivers/net/wireless/b43legacy/dma.h2
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c1
-rw-r--r--drivers/nfc/pn533.c2
-rw-r--r--drivers/of/of_net.c45
-rw-r--r--drivers/oprofile/oprofile_stats.h2
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c6
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c2
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c17
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c3
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c11
-rw-r--r--drivers/pci/pci-label.c2
-rw-r--r--drivers/pci/pci.c11
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c76
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c3
-rw-r--r--drivers/pci/probe.c87
-rw-r--r--drivers/pci/setup-bus.c3
-rw-r--r--drivers/pci/setup-irq.c4
-rw-r--r--drivers/pci/setup-res.c3
-rw-r--r--drivers/pci/xen-pcifront.c2
-rw-r--r--drivers/rtc/interface.c55
-rw-r--r--drivers/s390/block/dasd_eer.c2
-rw-r--r--drivers/s390/char/sclp_quiesce.c2
-rw-r--r--drivers/s390/char/vmlogrdr.c2
-rw-r--r--drivers/s390/cio/device.h2
-rw-r--r--drivers/s390/cio/qdio_main.c2
-rw-r--r--drivers/s390/cio/qdio_thinint.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c2
-rw-r--r--drivers/s390/crypto/zcrypt_api.c2
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c2
-rw-r--r--drivers/s390/crypto/zcrypt_mono.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c2
-rw-r--r--drivers/s390/net/fsm.h2
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c2
-rw-r--r--drivers/sbus/char/display7seg.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.h4
-rw-r--r--drivers/scsi/bfa/bfa.h51
-rw-r--r--drivers/scsi/bfa/bfa_core.c60
-rw-r--r--drivers/scsi/bfa/bfa_defs.h171
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h99
-rw-r--r--drivers/scsi/bfa/bfa_fc.h155
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c736
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h45
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c26
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h1
-rw-r--r--drivers/scsi/bfa/bfa_fcs_fcpim.c37
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c74
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c49
-rw-r--r--drivers/scsi/bfa/bfa_hw_cb.c38
-rw-r--r--drivers/scsi/bfa/bfa_hw_ct.c25
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c569
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h48
-rw-r--r--drivers/scsi/bfa/bfa_modules.h3
-rw-r--r--drivers/scsi/bfa/bfa_svc.c249
-rw-r--r--drivers/scsi/bfa/bfa_svc.h29
-rw-r--r--drivers/scsi/bfa/bfad.c8
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c1082
-rw-r--r--drivers/scsi/bfa/bfad_bsg.h237
-rw-r--r--drivers/scsi/bfa/bfad_drv.h6
-rw-r--r--drivers/scsi/bfa/bfad_im.c26
-rw-r--r--drivers/scsi/bfa/bfad_im.h22
-rw-r--r--drivers/scsi/bfa/bfi.h20
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h107
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_debug.h16
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c434
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c732
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c433
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c194
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c51
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c8
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c82
-rw-r--r--drivers/scsi/dpt/dpti_i2o.h2
-rw-r--r--drivers/scsi/fcoe/fcoe.c69
-rw-r--r--drivers/scsi/hpsa.c6
-rw-r--r--drivers/scsi/hpsa.h2
-rw-r--r--drivers/scsi/ipr.c12
-rw-r--r--drivers/scsi/libfc/fc_exch.c9
-rw-r--r--drivers/scsi/libfc/fc_fcp.c9
-rw-r--r--drivers/scsi/libfc/fc_lport.c1
-rw-r--r--drivers/scsi/libiscsi.c22
-rw-r--r--drivers/scsi/libsas/sas_expander.c3
-rw-r--r--drivers/scsi/lpfc/lpfc.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c161
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c89
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h12
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c1354
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h125
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c105
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c222
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h30
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c90
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c97
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c399
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h29
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c18
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c1
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c4
-rw-r--r--drivers/scsi/mvsas/Kconfig9
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c101
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c508
-rw-r--r--drivers/scsi/mvsas/mv_94xx.h99
-rw-r--r--drivers/scsi/mvsas/mv_chips.h17
-rw-r--r--drivers/scsi/mvsas/mv_defs.h11
-rw-r--r--drivers/scsi/mvsas/mv_init.c187
-rw-r--r--drivers/scsi/mvsas/mv_sas.c422
-rw-r--r--drivers/scsi/mvsas/mv_sas.h105
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h2
-rw-r--r--drivers/scsi/pmcraid.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c183
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c441
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c396
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h187
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c371
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c856
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c120
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c663
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c1091
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c160
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c556
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c747
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c275
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/scsi/scsi_transport_spi.c24
-rw-r--r--drivers/staging/ath6kl/os/linux/ar6000_drv.c1
-rw-r--r--drivers/staging/brcm80211/brcmsmac/mac80211_if.h2
-rw-r--r--drivers/staging/cxd2099/Kconfig11
-rw-r--r--drivers/staging/cxd2099/cxd2099.c311
-rw-r--r--drivers/staging/cxd2099/cxd2099.h18
-rw-r--r--drivers/staging/gma500/mrst_hdmi.c2
-rw-r--r--drivers/staging/octeon/ethernet-rx.c2
-rw-r--r--drivers/staging/octeon/ethernet-tx.c2
-rw-r--r--drivers/staging/solo6x10/solo6x10.h2
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/host_os.h2
-rw-r--r--drivers/staging/tm6000/tm6000-alsa.c12
-rw-r--r--drivers/staging/winbond/mds_s.h2
-rw-r--r--drivers/staging/winbond/wb35reg_s.h2
-rw-r--r--drivers/target/Kconfig1
-rw-r--r--drivers/target/Makefile2
-rw-r--r--drivers/target/iscsi/Kconfig8
-rw-r--r--drivers/target/iscsi/Makefile20
-rw-r--r--drivers/target/iscsi/iscsi_target.c4559
-rw-r--r--drivers/target/iscsi/iscsi_target.h42
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c490
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.h31
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c1882
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.h7
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h859
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.c531
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.h12
-rw-r--r--drivers/target/iscsi/iscsi_target_device.c87
-rw-r--r--drivers/target/iscsi/iscsi_target_device.h9
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c1004
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.h15
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c1299
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.h26
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c474
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.h18
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c1232
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h12
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c1067
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.h17
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.c263
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.h14
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c1905
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h269
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.c664
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.h86
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c950
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.h64
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c849
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.h14
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c759
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h41
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.c551
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.h88
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c1819
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h60
-rw-r--r--drivers/target/target_core_transport.c7
-rw-r--r--drivers/tty/bfin_jtag_comm.c2
-rw-r--r--drivers/tty/rocket.c2
-rw-r--r--drivers/tty/serial/dz.c2
-rw-r--r--drivers/tty/serial/imx.c166
-rw-r--r--drivers/tty/serial/sb1250-duart.c2
-rw-r--r--drivers/tty/serial/zs.c2
-rw-r--r--drivers/usb/gadget/f_audio.c2
-rw-r--r--drivers/usb/gadget/f_rndis.c2
-rw-r--r--drivers/usb/gadget/uvc_queue.c2
-rw-r--r--drivers/usb/gadget/uvc_v4l2.c22
-rw-r--r--drivers/usb/image/microtek.c2
-rw-r--r--drivers/usb/misc/appledisplay.c2
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c1
-rw-r--r--drivers/usb/serial/garmin_gps.c2
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c2
-rw-r--r--drivers/vhost/vhost.h2
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c2
-rw-r--r--drivers/video/vermilion/vermilion.h2
-rw-r--r--drivers/w1/masters/matrox_w1.c2
-rw-r--r--drivers/w1/slaves/w1_therm.c9
-rw-r--r--drivers/w1/w1.c2
-rw-r--r--drivers/w1/w1_family.h3
-rw-r--r--drivers/watchdog/Kconfig36
-rw-r--r--drivers/watchdog/Makefile8
-rw-r--r--drivers/watchdog/at91sam9_wdt.c21
-rw-r--r--drivers/watchdog/at91sam9_wdt.h37
-rw-r--r--drivers/watchdog/dw_wdt.c376
-rw-r--r--drivers/watchdog/hpwdt.c104
-rw-r--r--drivers/watchdog/iTCO_wdt.c412
-rw-r--r--drivers/watchdog/imx2_wdt.c6
-rw-r--r--drivers/watchdog/intel_scu_watchdog.c2
-rw-r--r--drivers/watchdog/it8712f_wdt.c63
-rw-r--r--drivers/watchdog/it87_wdt.c168
-rw-r--r--drivers/watchdog/mpcore_wdt.c23
-rw-r--r--drivers/watchdog/mtx-1_wdt.c4
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c433
-rw-r--r--drivers/watchdog/pc87413_wdt.c96
-rw-r--r--drivers/watchdog/s3c2410_wdt.c10
-rw-r--r--drivers/watchdog/sbc7240_wdt.c2
-rw-r--r--drivers/watchdog/sch311x_wdt.c5
-rw-r--r--drivers/watchdog/sp805_wdt.c5
-rw-r--r--drivers/watchdog/watchdog_core.c111
-rw-r--r--drivers/watchdog/watchdog_dev.c395
-rw-r--r--drivers/watchdog/watchdog_dev.h33
-rw-r--r--drivers/xen/grant-table.c2
-rw-r--r--drivers/xen/xen-pciback/xenbus.c1
-rw-r--r--drivers/xen/xen-selfballoon.c4
857 files changed, 97156 insertions, 18050 deletions
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index db39e9e607d8..ada4b4d9bdc8 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -46,7 +46,6 @@
46 46
47#define PREFIX "ACPI: " 47#define PREFIX "ACPI: "
48 48
49#define ACPI_VIDEO_CLASS "video"
50#define ACPI_VIDEO_BUS_NAME "Video Bus" 49#define ACPI_VIDEO_BUS_NAME "Video Bus"
51#define ACPI_VIDEO_DEVICE_NAME "Video Device" 50#define ACPI_VIDEO_DEVICE_NAME "Video Device"
52#define ACPI_VIDEO_NOTIFY_SWITCH 0x80 51#define ACPI_VIDEO_NOTIFY_SWITCH 0x80
@@ -1445,7 +1444,8 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
1445 case ACPI_VIDEO_NOTIFY_SWITCH: /* User requested a switch, 1444 case ACPI_VIDEO_NOTIFY_SWITCH: /* User requested a switch,
1446 * most likely via hotkey. */ 1445 * most likely via hotkey. */
1447 acpi_bus_generate_proc_event(device, event, 0); 1446 acpi_bus_generate_proc_event(device, event, 0);
1448 keycode = KEY_SWITCHVIDEOMODE; 1447 if (!acpi_notifier_call_chain(device, event, 0))
1448 keycode = KEY_SWITCHVIDEOMODE;
1449 break; 1449 break;
1450 1450
1451 case ACPI_VIDEO_NOTIFY_PROBE: /* User plugged in or removed a video 1451 case ACPI_VIDEO_NOTIFY_PROBE: /* User plugged in or removed a video
@@ -1475,7 +1475,8 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
1475 break; 1475 break;
1476 } 1476 }
1477 1477
1478 acpi_notifier_call_chain(device, event, 0); 1478 if (event != ACPI_VIDEO_NOTIFY_SWITCH)
1479 acpi_notifier_call_chain(device, event, 0);
1479 1480
1480 if (keycode) { 1481 if (keycode) {
1481 input_report_key(input, keycode, 1); 1482 input_report_key(input, keycode, 1);
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index bb3b016b6ce8..f8f41e0e8a8c 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -38,7 +38,7 @@
38#include <linux/ihex.h> 38#include <linux/ihex.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40 40
41#include <asm/atomic.h> 41#include <linux/atomic.h>
42#include <asm/io.h> 42#include <asm/io.h>
43#include <asm/byteorder.h> 43#include <asm/byteorder.h>
44 44
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index 0b0625054a87..b22d71cac54c 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -11,7 +11,7 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <asm/uaccess.h> 13#include <asm/uaccess.h>
14#include <asm/atomic.h> 14#include <linux/atomic.h>
15 15
16 16
17extern int atm_init_aal5(struct atm_vcc *vcc); /* "raw" AAL5 transport */ 17extern int atm_init_aal5(struct atm_vcc *vcc); /* "raw" AAL5 transport */
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 3230ea0df83c..93071417315f 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -21,7 +21,7 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <asm/system.h> 22#include <asm/system.h>
23#include <asm/io.h> 23#include <asm/io.h>
24#include <asm/atomic.h> 24#include <linux/atomic.h>
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <asm/string.h> 26#include <asm/string.h>
27#include <asm/byteorder.h> 27#include <asm/byteorder.h>
diff --git a/drivers/atm/eni.h b/drivers/atm/eni.h
index 493a6932507e..dc9a62cc2605 100644
--- a/drivers/atm/eni.h
+++ b/drivers/atm/eni.h
@@ -14,7 +14,7 @@
14#include <linux/time.h> 14#include <linux/time.h>
15#include <linux/pci.h> 15#include <linux/pci.h>
16#include <linux/spinlock.h> 16#include <linux/spinlock.h>
17#include <asm/atomic.h> 17#include <linux/atomic.h>
18 18
19#include "midway.h" 19#include "midway.h"
20 20
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 7c7b571647f9..5072f8ac16fd 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -52,7 +52,7 @@
52#include <asm/system.h> 52#include <asm/system.h>
53#include <asm/string.h> 53#include <asm/string.h>
54#include <asm/io.h> 54#include <asm/io.h>
55#include <asm/atomic.h> 55#include <linux/atomic.h>
56#include <asm/uaccess.h> 56#include <asm/uaccess.h>
57#include <linux/wait.h> 57#include <linux/wait.h>
58 58
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index bc9e702186dd..361f5aee3be1 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -44,7 +44,7 @@
44#include <asm/dma.h> 44#include <asm/dma.h>
45#include <asm/byteorder.h> 45#include <asm/byteorder.h>
46#include <asm/uaccess.h> 46#include <asm/uaccess.h>
47#include <asm/atomic.h> 47#include <linux/atomic.h>
48 48
49#ifdef CONFIG_SBUS 49#ifdef CONFIG_SBUS
50#include <linux/of.h> 50#include <linux/of.h>
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 287506183893..b81210330aca 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -45,7 +45,7 @@
45 45
46#include <asm/system.h> 46#include <asm/system.h>
47#include <asm/io.h> 47#include <asm/io.h>
48#include <asm/atomic.h> 48#include <linux/atomic.h>
49#include <asm/uaccess.h> 49#include <asm/uaccess.h>
50#include <asm/string.h> 50#include <asm/string.h>
51#include <asm/byteorder.h> 51#include <asm/byteorder.h>
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index be0dbfeb541c..db06f34419cf 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -46,7 +46,7 @@
46 46
47#include <asm/io.h> 47#include <asm/io.h>
48#include <asm/uaccess.h> 48#include <asm/uaccess.h>
49#include <asm/atomic.h> 49#include <linux/atomic.h>
50#include <asm/byteorder.h> 50#include <asm/byteorder.h>
51 51
52#ifdef CONFIG_ATM_IDT77252_USE_SUNI 52#ifdef CONFIG_ATM_IDT77252_USE_SUNI
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 957106f636ea..cb90f7a3e074 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -58,7 +58,7 @@
58#include <linux/slab.h> 58#include <linux/slab.h>
59#include <asm/system.h> 59#include <asm/system.h>
60#include <asm/io.h> 60#include <asm/io.h>
61#include <asm/atomic.h> 61#include <linux/atomic.h>
62#include <asm/uaccess.h> 62#include <asm/uaccess.h>
63#include <asm/string.h> 63#include <asm/string.h>
64#include <asm/byteorder.h> 64#include <asm/byteorder.h>
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 6b313ee9231b..1c70c45fa044 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -51,7 +51,7 @@
51#include <linux/idr.h> 51#include <linux/idr.h>
52#include <asm/io.h> 52#include <asm/io.h>
53#include <asm/uaccess.h> 53#include <asm/uaccess.h>
54#include <asm/atomic.h> 54#include <linux/atomic.h>
55#include "nicstar.h" 55#include "nicstar.h"
56#ifdef CONFIG_ATM_NICSTAR_USE_SUNI 56#ifdef CONFIG_ATM_NICSTAR_USE_SUNI
57#include "suni.h" 57#include "suni.h"
diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
index 41c56eae4c81..90f1ccca9e52 100644
--- a/drivers/atm/suni.c
+++ b/drivers/atm/suni.c
@@ -25,7 +25,7 @@
25#include <asm/system.h> 25#include <asm/system.h>
26#include <asm/param.h> 26#include <asm/param.h>
27#include <asm/uaccess.h> 27#include <asm/uaccess.h>
28#include <asm/atomic.h> 28#include <linux/atomic.h>
29 29
30#include "suni.h" 30#include "suni.h"
31 31
diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
index c45ae0573bbd..5120a96b3a89 100644
--- a/drivers/atm/uPD98402.c
+++ b/drivers/atm/uPD98402.c
@@ -11,7 +11,7 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <asm/uaccess.h> 13#include <asm/uaccess.h>
14#include <asm/atomic.h> 14#include <linux/atomic.h>
15 15
16#include "uPD98402.h" 16#include "uPD98402.h"
17 17
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 7f8c5132ff32..d889f56e8d8c 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -27,7 +27,7 @@
27#include <asm/system.h> 27#include <asm/system.h>
28#include <asm/string.h> 28#include <asm/string.h>
29#include <asm/io.h> 29#include <asm/io.h>
30#include <asm/atomic.h> 30#include <linux/atomic.h>
31#include <asm/uaccess.h> 31#include <asm/uaccess.h>
32 32
33#include "uPD98401.h" 33#include "uPD98401.h"
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 45d7c8fc73bd..2840ed4668c1 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -24,7 +24,7 @@
24#include <linux/stat.h> 24#include <linux/stat.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26 26
27#include <asm/atomic.h> 27#include <linux/atomic.h>
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29 29
30static DEFINE_MUTEX(mem_sysfs_mutex); 30static DEFINE_MUTEX(mem_sysfs_mutex);
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 5cc12322ef32..b23de185cb04 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -453,7 +453,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
453static int opp_set_availability(struct device *dev, unsigned long freq, 453static int opp_set_availability(struct device *dev, unsigned long freq,
454 bool availability_req) 454 bool availability_req)
455{ 455{
456 struct device_opp *tmp_dev_opp, *dev_opp = NULL; 456 struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
457 struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); 457 struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
458 int r = 0; 458 int r = 0;
459 459
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 942d6a7c9ae1..17b7934f31cb 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -5,7 +5,7 @@
5#include <linux/device.h> 5#include <linux/device.h>
6#include <linux/string.h> 6#include <linux/string.h>
7#include <linux/pm_runtime.h> 7#include <linux/pm_runtime.h>
8#include <asm/atomic.h> 8#include <linux/atomic.h>
9#include <linux/jiffies.h> 9#include <linux/jiffies.h>
10#include "power.h" 10#include "power.h"
11 11
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 696100241a6f..951a4e33b92b 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -33,7 +33,7 @@
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/string.h> 34#include <linux/string.h>
35 35
36#include <asm/atomic.h> 36#include <linux/atomic.h>
37 37
38#include <scsi/scsi_cmnd.h> 38#include <scsi/scsi_cmnd.h>
39#include <scsi/scsi_device.h> 39#include <scsi/scsi_device.h>
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 1278098624e6..15f65b5f3fc7 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -630,6 +630,14 @@ static int rbd_get_num_segments(struct rbd_image_header *header,
630} 630}
631 631
632/* 632/*
633 * returns the size of an object in the image
634 */
635static u64 rbd_obj_bytes(struct rbd_image_header *header)
636{
637 return 1 << header->obj_order;
638}
639
640/*
633 * bio helpers 641 * bio helpers
634 */ 642 */
635 643
@@ -1253,6 +1261,35 @@ fail:
1253 return ret; 1261 return ret;
1254} 1262}
1255 1263
1264/*
1265 * Request sync osd unwatch
1266 */
1267static int rbd_req_sync_unwatch(struct rbd_device *dev,
1268 const char *obj)
1269{
1270 struct ceph_osd_req_op *ops;
1271
1272 int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_WATCH, 0);
1273 if (ret < 0)
1274 return ret;
1275
1276 ops[0].watch.ver = 0;
1277 ops[0].watch.cookie = cpu_to_le64(dev->watch_event->cookie);
1278 ops[0].watch.flag = 0;
1279
1280 ret = rbd_req_sync_op(dev, NULL,
1281 CEPH_NOSNAP,
1282 0,
1283 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1284 ops,
1285 1, obj, 0, 0, NULL, NULL, NULL);
1286
1287 rbd_destroy_ops(ops);
1288 ceph_osdc_cancel_event(dev->watch_event);
1289 dev->watch_event = NULL;
1290 return ret;
1291}
1292
1256struct rbd_notify_info { 1293struct rbd_notify_info {
1257 struct rbd_device *dev; 1294 struct rbd_device *dev;
1258}; 1295};
@@ -1736,6 +1773,13 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
1736 q = blk_init_queue(rbd_rq_fn, &rbd_dev->lock); 1773 q = blk_init_queue(rbd_rq_fn, &rbd_dev->lock);
1737 if (!q) 1774 if (!q)
1738 goto out_disk; 1775 goto out_disk;
1776
1777 /* set io sizes to object size */
1778 blk_queue_max_hw_sectors(q, rbd_obj_bytes(&rbd_dev->header) / 512ULL);
1779 blk_queue_max_segment_size(q, rbd_obj_bytes(&rbd_dev->header));
1780 blk_queue_io_min(q, rbd_obj_bytes(&rbd_dev->header));
1781 blk_queue_io_opt(q, rbd_obj_bytes(&rbd_dev->header));
1782
1739 blk_queue_merge_bvec(q, rbd_merge_bvec); 1783 blk_queue_merge_bvec(q, rbd_merge_bvec);
1740 disk->queue = q; 1784 disk->queue = q;
1741 1785
@@ -2290,7 +2334,7 @@ static void rbd_dev_release(struct device *dev)
2290 ceph_osdc_unregister_linger_request(&rbd_dev->client->osdc, 2334 ceph_osdc_unregister_linger_request(&rbd_dev->client->osdc,
2291 rbd_dev->watch_request); 2335 rbd_dev->watch_request);
2292 if (rbd_dev->watch_event) 2336 if (rbd_dev->watch_event)
2293 ceph_osdc_cancel_event(rbd_dev->watch_event); 2337 rbd_req_sync_unwatch(rbd_dev, rbd_dev->obj_md_name);
2294 2338
2295 rbd_put_client(rbd_dev); 2339 rbd_put_client(rbd_dev);
2296 2340
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index ac6739e085e3..c3de70de00d4 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -1,6 +1,6 @@
1/* n2-drv.c: Niagara-2 RNG driver. 1/* n2-drv.c: Niagara-2 RNG driver.
2 * 2 *
3 * Copyright (C) 2008 David S. Miller <davem@davemloft.net> 3 * Copyright (C) 2008, 2011 David S. Miller <davem@davemloft.net>
4 */ 4 */
5 5
6#include <linux/kernel.h> 6#include <linux/kernel.h>
@@ -22,8 +22,8 @@
22 22
23#define DRV_MODULE_NAME "n2rng" 23#define DRV_MODULE_NAME "n2rng"
24#define PFX DRV_MODULE_NAME ": " 24#define PFX DRV_MODULE_NAME ": "
25#define DRV_MODULE_VERSION "0.1" 25#define DRV_MODULE_VERSION "0.2"
26#define DRV_MODULE_RELDATE "May 15, 2008" 26#define DRV_MODULE_RELDATE "July 27, 2011"
27 27
28static char version[] __devinitdata = 28static char version[] __devinitdata =
29 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 29 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -623,14 +623,14 @@ static const struct of_device_id n2rng_match[];
623static int __devinit n2rng_probe(struct platform_device *op) 623static int __devinit n2rng_probe(struct platform_device *op)
624{ 624{
625 const struct of_device_id *match; 625 const struct of_device_id *match;
626 int victoria_falls; 626 int multi_capable;
627 int err = -ENOMEM; 627 int err = -ENOMEM;
628 struct n2rng *np; 628 struct n2rng *np;
629 629
630 match = of_match_device(n2rng_match, &op->dev); 630 match = of_match_device(n2rng_match, &op->dev);
631 if (!match) 631 if (!match)
632 return -EINVAL; 632 return -EINVAL;
633 victoria_falls = (match->data != NULL); 633 multi_capable = (match->data != NULL);
634 634
635 n2rng_driver_version(); 635 n2rng_driver_version();
636 np = kzalloc(sizeof(*np), GFP_KERNEL); 636 np = kzalloc(sizeof(*np), GFP_KERNEL);
@@ -640,8 +640,8 @@ static int __devinit n2rng_probe(struct platform_device *op)
640 640
641 INIT_DELAYED_WORK(&np->work, n2rng_work); 641 INIT_DELAYED_WORK(&np->work, n2rng_work);
642 642
643 if (victoria_falls) 643 if (multi_capable)
644 np->flags |= N2RNG_FLAG_VF; 644 np->flags |= N2RNG_FLAG_MULTI;
645 645
646 err = -ENODEV; 646 err = -ENODEV;
647 np->hvapi_major = 2; 647 np->hvapi_major = 2;
@@ -658,10 +658,10 @@ static int __devinit n2rng_probe(struct platform_device *op)
658 } 658 }
659 } 659 }
660 660
661 if (np->flags & N2RNG_FLAG_VF) { 661 if (np->flags & N2RNG_FLAG_MULTI) {
662 if (np->hvapi_major < 2) { 662 if (np->hvapi_major < 2) {
663 dev_err(&op->dev, "VF RNG requires HVAPI major " 663 dev_err(&op->dev, "multi-unit-capable RNG requires "
664 "version 2 or later, got %lu\n", 664 "HVAPI major version 2 or later, got %lu\n",
665 np->hvapi_major); 665 np->hvapi_major);
666 goto out_hvapi_unregister; 666 goto out_hvapi_unregister;
667 } 667 }
@@ -688,8 +688,8 @@ static int __devinit n2rng_probe(struct platform_device *op)
688 goto out_free_units; 688 goto out_free_units;
689 689
690 dev_info(&op->dev, "Found %s RNG, units: %d\n", 690 dev_info(&op->dev, "Found %s RNG, units: %d\n",
691 ((np->flags & N2RNG_FLAG_VF) ? 691 ((np->flags & N2RNG_FLAG_MULTI) ?
692 "Victoria Falls" : "Niagara2"), 692 "multi-unit-capable" : "single-unit"),
693 np->num_units); 693 np->num_units);
694 694
695 np->hwrng.name = "n2rng"; 695 np->hwrng.name = "n2rng";
@@ -751,6 +751,11 @@ static const struct of_device_id n2rng_match[] = {
751 .compatible = "SUNW,vf-rng", 751 .compatible = "SUNW,vf-rng",
752 .data = (void *) 1, 752 .data = (void *) 1,
753 }, 753 },
754 {
755 .name = "random-number-generator",
756 .compatible = "SUNW,kt-rng",
757 .data = (void *) 1,
758 },
754 {}, 759 {},
755}; 760};
756MODULE_DEVICE_TABLE(of, n2rng_match); 761MODULE_DEVICE_TABLE(of, n2rng_match);
diff --git a/drivers/char/hw_random/n2rng.h b/drivers/char/hw_random/n2rng.h
index 4bea07f30978..f244ac89087f 100644
--- a/drivers/char/hw_random/n2rng.h
+++ b/drivers/char/hw_random/n2rng.h
@@ -68,7 +68,7 @@ struct n2rng {
68 struct platform_device *op; 68 struct platform_device *op;
69 69
70 unsigned long flags; 70 unsigned long flags;
71#define N2RNG_FLAG_VF 0x00000001 /* Victoria Falls RNG, else N2 */ 71#define N2RNG_FLAG_MULTI 0x00000001 /* Multi-unit capable RNG */
72#define N2RNG_FLAG_CONTROL 0x00000002 /* Operating in control domain */ 72#define N2RNG_FLAG_CONTROL 0x00000002 /* Operating in control domain */
73#define N2RNG_FLAG_READY 0x00000008 /* Ready for hw-rng layer */ 73#define N2RNG_FLAG_READY 0x00000008 /* Ready for hw-rng layer */
74#define N2RNG_FLAG_SHUTDOWN 0x00000010 /* Driver unregistering */ 74#define N2RNG_FLAG_SHUTDOWN 0x00000010 /* Driver unregistering */
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 320668f4c3aa..3302586655c4 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -52,7 +52,7 @@
52#include <linux/string.h> 52#include <linux/string.h>
53#include <linux/ctype.h> 53#include <linux/ctype.h>
54#include <linux/delay.h> 54#include <linux/delay.h>
55#include <asm/atomic.h> 55#include <linux/atomic.h>
56 56
57#ifdef CONFIG_X86 57#ifdef CONFIG_X86
58/* 58/*
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 25d139c9dbed..5c0d96a820fa 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -46,7 +46,7 @@
46#include <asm/page.h> 46#include <asm/page.h>
47#include <asm/system.h> 47#include <asm/system.h>
48#include <asm/pgtable.h> 48#include <asm/pgtable.h>
49#include <asm/atomic.h> 49#include <linux/atomic.h>
50#include <asm/tlbflush.h> 50#include <asm/tlbflush.h>
51#include <asm/uncached.h> 51#include <asm/uncached.h>
52#include <asm/sn/addrs.h> 52#include <asm/sn/addrs.h>
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
index 1a9f5f6d6ac5..fca0c51bbc90 100644
--- a/drivers/char/ramoops.c
+++ b/drivers/char/ramoops.c
@@ -19,18 +19,26 @@
19 * 19 *
20 */ 20 */
21 21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
22#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/err.h>
23#include <linux/module.h> 26#include <linux/module.h>
24#include <linux/kmsg_dump.h> 27#include <linux/kmsg_dump.h>
25#include <linux/time.h> 28#include <linux/time.h>
26#include <linux/io.h> 29#include <linux/io.h>
27#include <linux/ioport.h> 30#include <linux/ioport.h>
28#include <linux/platform_device.h> 31#include <linux/platform_device.h>
32#include <linux/slab.h>
29#include <linux/ramoops.h> 33#include <linux/ramoops.h>
30 34
31#define RAMOOPS_KERNMSG_HDR "====" 35#define RAMOOPS_KERNMSG_HDR "===="
36#define MIN_MEM_SIZE 4096UL
32 37
33#define RECORD_SIZE 4096UL 38static ulong record_size = MIN_MEM_SIZE;
39module_param(record_size, ulong, 0400);
40MODULE_PARM_DESC(record_size,
41 "size of each dump done on oops/panic");
34 42
35static ulong mem_address; 43static ulong mem_address;
36module_param(mem_address, ulong, 0400); 44module_param(mem_address, ulong, 0400);
@@ -52,10 +60,15 @@ static struct ramoops_context {
52 void *virt_addr; 60 void *virt_addr;
53 phys_addr_t phys_addr; 61 phys_addr_t phys_addr;
54 unsigned long size; 62 unsigned long size;
63 unsigned long record_size;
64 int dump_oops;
55 int count; 65 int count;
56 int max_count; 66 int max_count;
57} oops_cxt; 67} oops_cxt;
58 68
69static struct platform_device *dummy;
70static struct ramoops_platform_data *dummy_data;
71
59static void ramoops_do_dump(struct kmsg_dumper *dumper, 72static void ramoops_do_dump(struct kmsg_dumper *dumper,
60 enum kmsg_dump_reason reason, const char *s1, unsigned long l1, 73 enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
61 const char *s2, unsigned long l2) 74 const char *s2, unsigned long l2)
@@ -74,13 +87,13 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper,
74 return; 87 return;
75 88
76 /* Only dump oopses if dump_oops is set */ 89 /* Only dump oopses if dump_oops is set */
77 if (reason == KMSG_DUMP_OOPS && !dump_oops) 90 if (reason == KMSG_DUMP_OOPS && !cxt->dump_oops)
78 return; 91 return;
79 92
80 buf = cxt->virt_addr + (cxt->count * RECORD_SIZE); 93 buf = cxt->virt_addr + (cxt->count * cxt->record_size);
81 buf_orig = buf; 94 buf_orig = buf;
82 95
83 memset(buf, '\0', RECORD_SIZE); 96 memset(buf, '\0', cxt->record_size);
84 res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR); 97 res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR);
85 buf += res; 98 buf += res;
86 do_gettimeofday(&timestamp); 99 do_gettimeofday(&timestamp);
@@ -88,8 +101,8 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper,
88 buf += res; 101 buf += res;
89 102
90 hdr_size = buf - buf_orig; 103 hdr_size = buf - buf_orig;
91 l2_cpy = min(l2, RECORD_SIZE - hdr_size); 104 l2_cpy = min(l2, cxt->record_size - hdr_size);
92 l1_cpy = min(l1, RECORD_SIZE - hdr_size - l2_cpy); 105 l1_cpy = min(l1, cxt->record_size - hdr_size - l2_cpy);
93 106
94 s2_start = l2 - l2_cpy; 107 s2_start = l2 - l2_cpy;
95 s1_start = l1 - l1_cpy; 108 s1_start = l1 - l1_cpy;
@@ -106,44 +119,51 @@ static int __init ramoops_probe(struct platform_device *pdev)
106 struct ramoops_context *cxt = &oops_cxt; 119 struct ramoops_context *cxt = &oops_cxt;
107 int err = -EINVAL; 120 int err = -EINVAL;
108 121
109 if (pdata) { 122 if (!pdata->mem_size || !pdata->record_size) {
110 mem_size = pdata->mem_size; 123 pr_err("The memory size and the record size must be "
111 mem_address = pdata->mem_address; 124 "non-zero\n");
125 goto fail3;
112 } 126 }
113 127
114 if (!mem_size) { 128 rounddown_pow_of_two(pdata->mem_size);
115 printk(KERN_ERR "ramoops: invalid size specification"); 129 rounddown_pow_of_two(pdata->record_size);
130
131 /* Check for the minimum memory size */
132 if (pdata->mem_size < MIN_MEM_SIZE &&
133 pdata->record_size < MIN_MEM_SIZE) {
134 pr_err("memory size too small, minium is %lu\n", MIN_MEM_SIZE);
116 goto fail3; 135 goto fail3;
117 } 136 }
118 137
119 rounddown_pow_of_two(mem_size); 138 if (pdata->mem_size < pdata->record_size) {
120 139 pr_err("The memory size must be larger than the "
121 if (mem_size < RECORD_SIZE) { 140 "records size\n");
122 printk(KERN_ERR "ramoops: size too small");
123 goto fail3; 141 goto fail3;
124 } 142 }
125 143
126 cxt->max_count = mem_size / RECORD_SIZE; 144 cxt->max_count = pdata->mem_size / pdata->record_size;
127 cxt->count = 0; 145 cxt->count = 0;
128 cxt->size = mem_size; 146 cxt->size = pdata->mem_size;
129 cxt->phys_addr = mem_address; 147 cxt->phys_addr = pdata->mem_address;
148 cxt->record_size = pdata->record_size;
149 cxt->dump_oops = pdata->dump_oops;
130 150
131 if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) { 151 if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) {
132 printk(KERN_ERR "ramoops: request mem region failed"); 152 pr_err("request mem region failed\n");
133 err = -EINVAL; 153 err = -EINVAL;
134 goto fail3; 154 goto fail3;
135 } 155 }
136 156
137 cxt->virt_addr = ioremap(cxt->phys_addr, cxt->size); 157 cxt->virt_addr = ioremap(cxt->phys_addr, cxt->size);
138 if (!cxt->virt_addr) { 158 if (!cxt->virt_addr) {
139 printk(KERN_ERR "ramoops: ioremap failed"); 159 pr_err("ioremap failed\n");
140 goto fail2; 160 goto fail2;
141 } 161 }
142 162
143 cxt->dump.dump = ramoops_do_dump; 163 cxt->dump.dump = ramoops_do_dump;
144 err = kmsg_dump_register(&cxt->dump); 164 err = kmsg_dump_register(&cxt->dump);
145 if (err) { 165 if (err) {
146 printk(KERN_ERR "ramoops: registering kmsg dumper failed"); 166 pr_err("registering kmsg dumper failed\n");
147 goto fail1; 167 goto fail1;
148 } 168 }
149 169
@@ -162,7 +182,7 @@ static int __exit ramoops_remove(struct platform_device *pdev)
162 struct ramoops_context *cxt = &oops_cxt; 182 struct ramoops_context *cxt = &oops_cxt;
163 183
164 if (kmsg_dump_unregister(&cxt->dump) < 0) 184 if (kmsg_dump_unregister(&cxt->dump) < 0)
165 printk(KERN_WARNING "ramoops: could not unregister kmsg_dumper"); 185 pr_warn("could not unregister kmsg_dumper\n");
166 186
167 iounmap(cxt->virt_addr); 187 iounmap(cxt->virt_addr);
168 release_mem_region(cxt->phys_addr, cxt->size); 188 release_mem_region(cxt->phys_addr, cxt->size);
@@ -179,12 +199,39 @@ static struct platform_driver ramoops_driver = {
179 199
180static int __init ramoops_init(void) 200static int __init ramoops_init(void)
181{ 201{
182 return platform_driver_probe(&ramoops_driver, ramoops_probe); 202 int ret;
203 ret = platform_driver_probe(&ramoops_driver, ramoops_probe);
204 if (ret == -ENODEV) {
205 /*
206 * If we didn't find a platform device, we use module parameters
207 * building platform data on the fly.
208 */
209 pr_info("platform device not found, using module parameters\n");
210 dummy_data = kzalloc(sizeof(struct ramoops_platform_data),
211 GFP_KERNEL);
212 if (!dummy_data)
213 return -ENOMEM;
214 dummy_data->mem_size = mem_size;
215 dummy_data->mem_address = mem_address;
216 dummy_data->record_size = record_size;
217 dummy_data->dump_oops = dump_oops;
218 dummy = platform_create_bundle(&ramoops_driver, ramoops_probe,
219 NULL, 0, dummy_data,
220 sizeof(struct ramoops_platform_data));
221
222 if (IS_ERR(dummy))
223 ret = PTR_ERR(dummy);
224 else
225 ret = 0;
226 }
227
228 return ret;
183} 229}
184 230
185static void __exit ramoops_exit(void) 231static void __exit ramoops_exit(void)
186{ 232{
187 platform_driver_unregister(&ramoops_driver); 233 platform_driver_unregister(&ramoops_driver);
234 kfree(dummy_data);
188} 235}
189 236
190module_init(ramoops_init); 237module_init(ramoops_init);
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 7beb0e25f1e1..caf8012ef47c 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -534,6 +534,7 @@ void tpm_get_timeouts(struct tpm_chip *chip)
534 struct duration_t *duration_cap; 534 struct duration_t *duration_cap;
535 ssize_t rc; 535 ssize_t rc;
536 u32 timeout; 536 u32 timeout;
537 unsigned int scale = 1;
537 538
538 tpm_cmd.header.in = tpm_getcap_header; 539 tpm_cmd.header.in = tpm_getcap_header;
539 tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP; 540 tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
@@ -545,24 +546,30 @@ void tpm_get_timeouts(struct tpm_chip *chip)
545 if (rc) 546 if (rc)
546 goto duration; 547 goto duration;
547 548
548 if (be32_to_cpu(tpm_cmd.header.out.length) 549 if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
549 != 4 * sizeof(u32)) 550 be32_to_cpu(tpm_cmd.header.out.length)
550 goto duration; 551 != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
552 return;
551 553
552 timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout; 554 timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
553 /* Don't overwrite default if value is 0 */ 555 /* Don't overwrite default if value is 0 */
554 timeout = be32_to_cpu(timeout_cap->a); 556 timeout = be32_to_cpu(timeout_cap->a);
557 if (timeout && timeout < 1000) {
558 /* timeouts in msec rather usec */
559 scale = 1000;
560 chip->vendor.timeout_adjusted = true;
561 }
555 if (timeout) 562 if (timeout)
556 chip->vendor.timeout_a = usecs_to_jiffies(timeout); 563 chip->vendor.timeout_a = usecs_to_jiffies(timeout * scale);
557 timeout = be32_to_cpu(timeout_cap->b); 564 timeout = be32_to_cpu(timeout_cap->b);
558 if (timeout) 565 if (timeout)
559 chip->vendor.timeout_b = usecs_to_jiffies(timeout); 566 chip->vendor.timeout_b = usecs_to_jiffies(timeout * scale);
560 timeout = be32_to_cpu(timeout_cap->c); 567 timeout = be32_to_cpu(timeout_cap->c);
561 if (timeout) 568 if (timeout)
562 chip->vendor.timeout_c = usecs_to_jiffies(timeout); 569 chip->vendor.timeout_c = usecs_to_jiffies(timeout * scale);
563 timeout = be32_to_cpu(timeout_cap->d); 570 timeout = be32_to_cpu(timeout_cap->d);
564 if (timeout) 571 if (timeout)
565 chip->vendor.timeout_d = usecs_to_jiffies(timeout); 572 chip->vendor.timeout_d = usecs_to_jiffies(timeout * scale);
566 573
567duration: 574duration:
568 tpm_cmd.header.in = tpm_getcap_header; 575 tpm_cmd.header.in = tpm_getcap_header;
@@ -575,23 +582,31 @@ duration:
575 if (rc) 582 if (rc)
576 return; 583 return;
577 584
578 if (be32_to_cpu(tpm_cmd.header.out.return_code) 585 if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
579 != 3 * sizeof(u32)) 586 be32_to_cpu(tpm_cmd.header.out.length)
587 != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
580 return; 588 return;
589
581 duration_cap = &tpm_cmd.params.getcap_out.cap.duration; 590 duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
582 chip->vendor.duration[TPM_SHORT] = 591 chip->vendor.duration[TPM_SHORT] =
583 usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short)); 592 usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
593 chip->vendor.duration[TPM_MEDIUM] =
594 usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium));
595 chip->vendor.duration[TPM_LONG] =
596 usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long));
597
584 /* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above 598 /* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
585 * value wrong and apparently reports msecs rather than usecs. So we 599 * value wrong and apparently reports msecs rather than usecs. So we
586 * fix up the resulting too-small TPM_SHORT value to make things work. 600 * fix up the resulting too-small TPM_SHORT value to make things work.
601 * We also scale the TPM_MEDIUM and -_LONG values by 1000.
587 */ 602 */
588 if (chip->vendor.duration[TPM_SHORT] < (HZ/100)) 603 if (chip->vendor.duration[TPM_SHORT] < (HZ / 100)) {
589 chip->vendor.duration[TPM_SHORT] = HZ; 604 chip->vendor.duration[TPM_SHORT] = HZ;
590 605 chip->vendor.duration[TPM_MEDIUM] *= 1000;
591 chip->vendor.duration[TPM_MEDIUM] = 606 chip->vendor.duration[TPM_LONG] *= 1000;
592 usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium)); 607 chip->vendor.duration_adjusted = true;
593 chip->vendor.duration[TPM_LONG] = 608 dev_info(chip->dev, "Adjusting TPM timeout parameters.");
594 usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long)); 609 }
595} 610}
596EXPORT_SYMBOL_GPL(tpm_get_timeouts); 611EXPORT_SYMBOL_GPL(tpm_get_timeouts);
597 612
@@ -600,7 +615,7 @@ void tpm_continue_selftest(struct tpm_chip *chip)
600 u8 data[] = { 615 u8 data[] = {
601 0, 193, /* TPM_TAG_RQU_COMMAND */ 616 0, 193, /* TPM_TAG_RQU_COMMAND */
602 0, 0, 0, 10, /* length */ 617 0, 0, 0, 10, /* length */
603 0, 0, 0, 83, /* TPM_ORD_GetCapability */ 618 0, 0, 0, 83, /* TPM_ORD_ContinueSelfTest */
604 }; 619 };
605 620
606 tpm_transmit(chip, data, sizeof(data)); 621 tpm_transmit(chip, data, sizeof(data));
@@ -863,18 +878,24 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
863 data = tpm_cmd.params.readpubek_out_buffer; 878 data = tpm_cmd.params.readpubek_out_buffer;
864 str += 879 str +=
865 sprintf(str, 880 sprintf(str,
866 "Algorithm: %02X %02X %02X %02X\nEncscheme: %02X %02X\n" 881 "Algorithm: %02X %02X %02X %02X\n"
867 "Sigscheme: %02X %02X\nParameters: %02X %02X %02X %02X" 882 "Encscheme: %02X %02X\n"
868 " %02X %02X %02X %02X %02X %02X %02X %02X\n" 883 "Sigscheme: %02X %02X\n"
869 "Modulus length: %d\nModulus: \n", 884 "Parameters: %02X %02X %02X %02X "
870 data[10], data[11], data[12], data[13], data[14], 885 "%02X %02X %02X %02X "
871 data[15], data[16], data[17], data[22], data[23], 886 "%02X %02X %02X %02X\n"
872 data[24], data[25], data[26], data[27], data[28], 887 "Modulus length: %d\n"
873 data[29], data[30], data[31], data[32], data[33], 888 "Modulus:\n",
874 be32_to_cpu(*((__be32 *) (data + 34)))); 889 data[0], data[1], data[2], data[3],
890 data[4], data[5],
891 data[6], data[7],
892 data[12], data[13], data[14], data[15],
893 data[16], data[17], data[18], data[19],
894 data[20], data[21], data[22], data[23],
895 be32_to_cpu(*((__be32 *) (data + 24))));
875 896
876 for (i = 0; i < 256; i++) { 897 for (i = 0; i < 256; i++) {
877 str += sprintf(str, "%02X ", data[i + 38]); 898 str += sprintf(str, "%02X ", data[i + 28]);
878 if ((i + 1) % 16 == 0) 899 if ((i + 1) % 16 == 0)
879 str += sprintf(str, "\n"); 900 str += sprintf(str, "\n");
880 } 901 }
@@ -937,6 +958,35 @@ ssize_t tpm_show_caps_1_2(struct device * dev,
937} 958}
938EXPORT_SYMBOL_GPL(tpm_show_caps_1_2); 959EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
939 960
961ssize_t tpm_show_durations(struct device *dev, struct device_attribute *attr,
962 char *buf)
963{
964 struct tpm_chip *chip = dev_get_drvdata(dev);
965
966 return sprintf(buf, "%d %d %d [%s]\n",
967 jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
968 jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
969 jiffies_to_usecs(chip->vendor.duration[TPM_LONG]),
970 chip->vendor.duration_adjusted
971 ? "adjusted" : "original");
972}
973EXPORT_SYMBOL_GPL(tpm_show_durations);
974
975ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr,
976 char *buf)
977{
978 struct tpm_chip *chip = dev_get_drvdata(dev);
979
980 return sprintf(buf, "%d %d %d %d [%s]\n",
981 jiffies_to_usecs(chip->vendor.timeout_a),
982 jiffies_to_usecs(chip->vendor.timeout_b),
983 jiffies_to_usecs(chip->vendor.timeout_c),
984 jiffies_to_usecs(chip->vendor.timeout_d),
985 chip->vendor.timeout_adjusted
986 ? "adjusted" : "original");
987}
988EXPORT_SYMBOL_GPL(tpm_show_timeouts);
989
940ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr, 990ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
941 const char *buf, size_t count) 991 const char *buf, size_t count)
942{ 992{
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 72ddb031b69a..9c4163cfa3ce 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -56,6 +56,10 @@ extern ssize_t tpm_show_owned(struct device *, struct device_attribute *attr,
56 char *); 56 char *);
57extern ssize_t tpm_show_temp_deactivated(struct device *, 57extern ssize_t tpm_show_temp_deactivated(struct device *,
58 struct device_attribute *attr, char *); 58 struct device_attribute *attr, char *);
59extern ssize_t tpm_show_durations(struct device *,
60 struct device_attribute *attr, char *);
61extern ssize_t tpm_show_timeouts(struct device *,
62 struct device_attribute *attr, char *);
59 63
60struct tpm_chip; 64struct tpm_chip;
61 65
@@ -67,6 +71,7 @@ struct tpm_vendor_specific {
67 unsigned long base; /* TPM base address */ 71 unsigned long base; /* TPM base address */
68 72
69 int irq; 73 int irq;
74 int probed_irq;
70 75
71 int region_size; 76 int region_size;
72 int have_region; 77 int have_region;
@@ -81,7 +86,9 @@ struct tpm_vendor_specific {
81 struct list_head list; 86 struct list_head list;
82 int locality; 87 int locality;
83 unsigned long timeout_a, timeout_b, timeout_c, timeout_d; /* jiffies */ 88 unsigned long timeout_a, timeout_b, timeout_c, timeout_d; /* jiffies */
89 bool timeout_adjusted;
84 unsigned long duration[3]; /* jiffies */ 90 unsigned long duration[3]; /* jiffies */
91 bool duration_adjusted;
85 92
86 wait_queue_head_t read_queue; 93 wait_queue_head_t read_queue;
87 wait_queue_head_t int_queue; 94 wait_queue_head_t int_queue;
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index a605cb7dd898..82facc9104c7 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -330,12 +330,12 @@ static int __init init_nsc(void)
330 pdev->dev.driver = &nsc_drv.driver; 330 pdev->dev.driver = &nsc_drv.driver;
331 pdev->dev.release = tpm_nsc_remove; 331 pdev->dev.release = tpm_nsc_remove;
332 332
333 if ((rc = platform_device_register(pdev)) < 0) 333 if ((rc = platform_device_add(pdev)) < 0)
334 goto err_free_dev; 334 goto err_put_dev;
335 335
336 if (request_region(base, 2, "tpm_nsc0") == NULL ) { 336 if (request_region(base, 2, "tpm_nsc0") == NULL ) {
337 rc = -EBUSY; 337 rc = -EBUSY;
338 goto err_unreg_dev; 338 goto err_del_dev;
339 } 339 }
340 340
341 if (!(chip = tpm_register_hardware(&pdev->dev, &tpm_nsc))) { 341 if (!(chip = tpm_register_hardware(&pdev->dev, &tpm_nsc))) {
@@ -382,10 +382,10 @@ static int __init init_nsc(void)
382 382
383err_rel_reg: 383err_rel_reg:
384 release_region(base, 2); 384 release_region(base, 2);
385err_unreg_dev: 385err_del_dev:
386 platform_device_unregister(pdev); 386 platform_device_del(pdev);
387err_free_dev: 387err_put_dev:
388 kfree(pdev); 388 platform_device_put(pdev);
389err_unreg_drv: 389err_unreg_drv:
390 platform_driver_unregister(&nsc_drv); 390 platform_driver_unregister(&nsc_drv);
391 return rc; 391 return rc;
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index dd21df55689d..7fc2f108f490 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -26,6 +26,7 @@
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/wait.h> 27#include <linux/wait.h>
28#include <linux/acpi.h> 28#include <linux/acpi.h>
29#include <linux/freezer.h>
29#include "tpm.h" 30#include "tpm.h"
30 31
31#define TPM_HEADER_SIZE 10 32#define TPM_HEADER_SIZE 10
@@ -79,7 +80,7 @@ enum tis_defaults {
79static LIST_HEAD(tis_chips); 80static LIST_HEAD(tis_chips);
80static DEFINE_SPINLOCK(tis_lock); 81static DEFINE_SPINLOCK(tis_lock);
81 82
82#ifdef CONFIG_ACPI 83#ifdef CONFIG_PNP
83static int is_itpm(struct pnp_dev *dev) 84static int is_itpm(struct pnp_dev *dev)
84{ 85{
85 struct acpi_device *acpi = pnp_acpi_device(dev); 86 struct acpi_device *acpi = pnp_acpi_device(dev);
@@ -92,11 +93,6 @@ static int is_itpm(struct pnp_dev *dev)
92 93
93 return 0; 94 return 0;
94} 95}
95#else
96static int is_itpm(struct pnp_dev *dev)
97{
98 return 0;
99}
100#endif 96#endif
101 97
102static int check_locality(struct tpm_chip *chip, int l) 98static int check_locality(struct tpm_chip *chip, int l)
@@ -120,7 +116,7 @@ static void release_locality(struct tpm_chip *chip, int l, int force)
120 116
121static int request_locality(struct tpm_chip *chip, int l) 117static int request_locality(struct tpm_chip *chip, int l)
122{ 118{
123 unsigned long stop; 119 unsigned long stop, timeout;
124 long rc; 120 long rc;
125 121
126 if (check_locality(chip, l) >= 0) 122 if (check_locality(chip, l) >= 0)
@@ -129,17 +125,25 @@ static int request_locality(struct tpm_chip *chip, int l)
129 iowrite8(TPM_ACCESS_REQUEST_USE, 125 iowrite8(TPM_ACCESS_REQUEST_USE,
130 chip->vendor.iobase + TPM_ACCESS(l)); 126 chip->vendor.iobase + TPM_ACCESS(l));
131 127
128 stop = jiffies + chip->vendor.timeout_a;
129
132 if (chip->vendor.irq) { 130 if (chip->vendor.irq) {
131again:
132 timeout = stop - jiffies;
133 if ((long)timeout <= 0)
134 return -1;
133 rc = wait_event_interruptible_timeout(chip->vendor.int_queue, 135 rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
134 (check_locality 136 (check_locality
135 (chip, l) >= 0), 137 (chip, l) >= 0),
136 chip->vendor.timeout_a); 138 timeout);
137 if (rc > 0) 139 if (rc > 0)
138 return l; 140 return l;
139 141 if (rc == -ERESTARTSYS && freezing(current)) {
142 clear_thread_flag(TIF_SIGPENDING);
143 goto again;
144 }
140 } else { 145 } else {
141 /* wait for burstcount */ 146 /* wait for burstcount */
142 stop = jiffies + chip->vendor.timeout_a;
143 do { 147 do {
144 if (check_locality(chip, l) >= 0) 148 if (check_locality(chip, l) >= 0)
145 return l; 149 return l;
@@ -196,15 +200,24 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
196 if ((status & mask) == mask) 200 if ((status & mask) == mask)
197 return 0; 201 return 0;
198 202
203 stop = jiffies + timeout;
204
199 if (chip->vendor.irq) { 205 if (chip->vendor.irq) {
206again:
207 timeout = stop - jiffies;
208 if ((long)timeout <= 0)
209 return -ETIME;
200 rc = wait_event_interruptible_timeout(*queue, 210 rc = wait_event_interruptible_timeout(*queue,
201 ((tpm_tis_status 211 ((tpm_tis_status
202 (chip) & mask) == 212 (chip) & mask) ==
203 mask), timeout); 213 mask), timeout);
204 if (rc > 0) 214 if (rc > 0)
205 return 0; 215 return 0;
216 if (rc == -ERESTARTSYS && freezing(current)) {
217 clear_thread_flag(TIF_SIGPENDING);
218 goto again;
219 }
206 } else { 220 } else {
207 stop = jiffies + timeout;
208 do { 221 do {
209 msleep(TPM_TIMEOUT); 222 msleep(TPM_TIMEOUT);
210 status = tpm_tis_status(chip); 223 status = tpm_tis_status(chip);
@@ -288,11 +301,10 @@ MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
288 * tpm.c can skip polling for the data to be available as the interrupt is 301 * tpm.c can skip polling for the data to be available as the interrupt is
289 * waited for here 302 * waited for here
290 */ 303 */
291static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) 304static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
292{ 305{
293 int rc, status, burstcnt; 306 int rc, status, burstcnt;
294 size_t count = 0; 307 size_t count = 0;
295 u32 ordinal;
296 308
297 if (request_locality(chip, 0) < 0) 309 if (request_locality(chip, 0) < 0)
298 return -EBUSY; 310 return -EBUSY;
@@ -327,8 +339,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
327 339
328 /* write last byte */ 340 /* write last byte */
329 iowrite8(buf[count], 341 iowrite8(buf[count],
330 chip->vendor.iobase + 342 chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality));
331 TPM_DATA_FIFO(chip->vendor.locality));
332 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, 343 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
333 &chip->vendor.int_queue); 344 &chip->vendor.int_queue);
334 status = tpm_tis_status(chip); 345 status = tpm_tis_status(chip);
@@ -337,6 +348,28 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
337 goto out_err; 348 goto out_err;
338 } 349 }
339 350
351 return 0;
352
353out_err:
354 tpm_tis_ready(chip);
355 release_locality(chip, chip->vendor.locality, 0);
356 return rc;
357}
358
359/*
360 * If interrupts are used (signaled by an irq set in the vendor structure)
361 * tpm.c can skip polling for the data to be available as the interrupt is
362 * waited for here
363 */
364static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
365{
366 int rc;
367 u32 ordinal;
368
369 rc = tpm_tis_send_data(chip, buf, len);
370 if (rc < 0)
371 return rc;
372
340 /* go and do it */ 373 /* go and do it */
341 iowrite8(TPM_STS_GO, 374 iowrite8(TPM_STS_GO,
342 chip->vendor.iobase + TPM_STS(chip->vendor.locality)); 375 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
@@ -358,6 +391,47 @@ out_err:
358 return rc; 391 return rc;
359} 392}
360 393
394/*
395 * Early probing for iTPM with STS_DATA_EXPECT flaw.
396 * Try sending command without itpm flag set and if that
397 * fails, repeat with itpm flag set.
398 */
399static int probe_itpm(struct tpm_chip *chip)
400{
401 int rc = 0;
402 u8 cmd_getticks[] = {
403 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
404 0x00, 0x00, 0x00, 0xf1
405 };
406 size_t len = sizeof(cmd_getticks);
407 int rem_itpm = itpm;
408
409 itpm = 0;
410
411 rc = tpm_tis_send_data(chip, cmd_getticks, len);
412 if (rc == 0)
413 goto out;
414
415 tpm_tis_ready(chip);
416 release_locality(chip, chip->vendor.locality, 0);
417
418 itpm = 1;
419
420 rc = tpm_tis_send_data(chip, cmd_getticks, len);
421 if (rc == 0) {
422 dev_info(chip->dev, "Detected an iTPM.\n");
423 rc = 1;
424 } else
425 rc = -EFAULT;
426
427out:
428 itpm = rem_itpm;
429 tpm_tis_ready(chip);
430 release_locality(chip, chip->vendor.locality, 0);
431
432 return rc;
433}
434
361static const struct file_operations tis_ops = { 435static const struct file_operations tis_ops = {
362 .owner = THIS_MODULE, 436 .owner = THIS_MODULE,
363 .llseek = no_llseek, 437 .llseek = no_llseek,
@@ -376,6 +450,8 @@ static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
376 NULL); 450 NULL);
377static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); 451static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
378static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); 452static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
453static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
454static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
379 455
380static struct attribute *tis_attrs[] = { 456static struct attribute *tis_attrs[] = {
381 &dev_attr_pubek.attr, 457 &dev_attr_pubek.attr,
@@ -385,7 +461,9 @@ static struct attribute *tis_attrs[] = {
385 &dev_attr_owned.attr, 461 &dev_attr_owned.attr,
386 &dev_attr_temp_deactivated.attr, 462 &dev_attr_temp_deactivated.attr,
387 &dev_attr_caps.attr, 463 &dev_attr_caps.attr,
388 &dev_attr_cancel.attr, NULL, 464 &dev_attr_cancel.attr,
465 &dev_attr_durations.attr,
466 &dev_attr_timeouts.attr, NULL,
389}; 467};
390 468
391static struct attribute_group tis_attr_grp = { 469static struct attribute_group tis_attr_grp = {
@@ -416,7 +494,7 @@ static irqreturn_t tis_int_probe(int irq, void *dev_id)
416 if (interrupt == 0) 494 if (interrupt == 0)
417 return IRQ_NONE; 495 return IRQ_NONE;
418 496
419 chip->vendor.irq = irq; 497 chip->vendor.probed_irq = irq;
420 498
421 /* Clear interrupts handled with TPM_EOI */ 499 /* Clear interrupts handled with TPM_EOI */
422 iowrite32(interrupt, 500 iowrite32(interrupt,
@@ -464,7 +542,7 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
464 resource_size_t len, unsigned int irq) 542 resource_size_t len, unsigned int irq)
465{ 543{
466 u32 vendor, intfcaps, intmask; 544 u32 vendor, intfcaps, intmask;
467 int rc, i; 545 int rc, i, irq_s, irq_e;
468 struct tpm_chip *chip; 546 struct tpm_chip *chip;
469 547
470 if (!(chip = tpm_register_hardware(dev, &tpm_tis))) 548 if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
@@ -493,6 +571,14 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
493 "1.2 TPM (device-id 0x%X, rev-id %d)\n", 571 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
494 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); 572 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
495 573
574 if (!itpm) {
575 itpm = probe_itpm(chip);
576 if (itpm < 0) {
577 rc = -ENODEV;
578 goto out_err;
579 }
580 }
581
496 if (itpm) 582 if (itpm)
497 dev_info(dev, "Intel iTPM workaround enabled\n"); 583 dev_info(dev, "Intel iTPM workaround enabled\n");
498 584
@@ -522,6 +608,9 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
522 if (intfcaps & TPM_INTF_DATA_AVAIL_INT) 608 if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
523 dev_dbg(dev, "\tData Avail Int Support\n"); 609 dev_dbg(dev, "\tData Avail Int Support\n");
524 610
611 /* get the timeouts before testing for irqs */
612 tpm_get_timeouts(chip);
613
525 /* INTERRUPT Setup */ 614 /* INTERRUPT Setup */
526 init_waitqueue_head(&chip->vendor.read_queue); 615 init_waitqueue_head(&chip->vendor.read_queue);
527 init_waitqueue_head(&chip->vendor.int_queue); 616 init_waitqueue_head(&chip->vendor.int_queue);
@@ -540,13 +629,19 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
540 if (interrupts) 629 if (interrupts)
541 chip->vendor.irq = irq; 630 chip->vendor.irq = irq;
542 if (interrupts && !chip->vendor.irq) { 631 if (interrupts && !chip->vendor.irq) {
543 chip->vendor.irq = 632 irq_s =
544 ioread8(chip->vendor.iobase + 633 ioread8(chip->vendor.iobase +
545 TPM_INT_VECTOR(chip->vendor.locality)); 634 TPM_INT_VECTOR(chip->vendor.locality));
635 if (irq_s) {
636 irq_e = irq_s;
637 } else {
638 irq_s = 3;
639 irq_e = 15;
640 }
546 641
547 for (i = 3; i < 16 && chip->vendor.irq == 0; i++) { 642 for (i = irq_s; i <= irq_e && chip->vendor.irq == 0; i++) {
548 iowrite8(i, chip->vendor.iobase + 643 iowrite8(i, chip->vendor.iobase +
549 TPM_INT_VECTOR(chip->vendor.locality)); 644 TPM_INT_VECTOR(chip->vendor.locality));
550 if (request_irq 645 if (request_irq
551 (i, tis_int_probe, IRQF_SHARED, 646 (i, tis_int_probe, IRQF_SHARED,
552 chip->vendor.miscdev.name, chip) != 0) { 647 chip->vendor.miscdev.name, chip) != 0) {
@@ -568,9 +663,22 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
568 chip->vendor.iobase + 663 chip->vendor.iobase +
569 TPM_INT_ENABLE(chip->vendor.locality)); 664 TPM_INT_ENABLE(chip->vendor.locality));
570 665
666 chip->vendor.probed_irq = 0;
667
571 /* Generate Interrupts */ 668 /* Generate Interrupts */
572 tpm_gen_interrupt(chip); 669 tpm_gen_interrupt(chip);
573 670
671 chip->vendor.irq = chip->vendor.probed_irq;
672
673 /* free_irq will call into tis_int_probe;
674 clear all irqs we haven't seen while doing
675 tpm_gen_interrupt */
676 iowrite32(ioread32
677 (chip->vendor.iobase +
678 TPM_INT_STATUS(chip->vendor.locality)),
679 chip->vendor.iobase +
680 TPM_INT_STATUS(chip->vendor.locality));
681
574 /* Turn off */ 682 /* Turn off */
575 iowrite32(intmask, 683 iowrite32(intmask,
576 chip->vendor.iobase + 684 chip->vendor.iobase +
@@ -609,7 +717,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
609 list_add(&chip->vendor.list, &tis_chips); 717 list_add(&chip->vendor.list, &tis_chips);
610 spin_unlock(&tis_lock); 718 spin_unlock(&tis_lock);
611 719
612 tpm_get_timeouts(chip);
613 tpm_continue_selftest(chip); 720 tpm_continue_selftest(chip);
614 721
615 return 0; 722 return 0;
@@ -619,6 +726,29 @@ out_err:
619 tpm_remove_hardware(chip->dev); 726 tpm_remove_hardware(chip->dev);
620 return rc; 727 return rc;
621} 728}
729
730static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
731{
732 u32 intmask;
733
734 /* reenable interrupts that device may have lost or
735 BIOS/firmware may have disabled */
736 iowrite8(chip->vendor.irq, chip->vendor.iobase +
737 TPM_INT_VECTOR(chip->vendor.locality));
738
739 intmask =
740 ioread32(chip->vendor.iobase +
741 TPM_INT_ENABLE(chip->vendor.locality));
742
743 intmask |= TPM_INTF_CMD_READY_INT
744 | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
745 | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
746
747 iowrite32(intmask,
748 chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
749}
750
751
622#ifdef CONFIG_PNP 752#ifdef CONFIG_PNP
623static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev, 753static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
624 const struct pnp_device_id *pnp_id) 754 const struct pnp_device_id *pnp_id)
@@ -650,6 +780,9 @@ static int tpm_tis_pnp_resume(struct pnp_dev *dev)
650 struct tpm_chip *chip = pnp_get_drvdata(dev); 780 struct tpm_chip *chip = pnp_get_drvdata(dev);
651 int ret; 781 int ret;
652 782
783 if (chip->vendor.irq)
784 tpm_tis_reenable_interrupts(chip);
785
653 ret = tpm_pm_resume(&dev->dev); 786 ret = tpm_pm_resume(&dev->dev);
654 if (!ret) 787 if (!ret)
655 tpm_continue_selftest(chip); 788 tpm_continue_selftest(chip);
@@ -702,6 +835,11 @@ static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
702 835
703static int tpm_tis_resume(struct platform_device *dev) 836static int tpm_tis_resume(struct platform_device *dev)
704{ 837{
838 struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
839
840 if (chip->vendor.irq)
841 tpm_tis_reenable_interrupts(chip);
842
705 return tpm_pm_resume(&dev->dev); 843 return tpm_pm_resume(&dev->dev);
706} 844}
707static struct platform_driver tis_drv = { 845static struct platform_driver tis_drv = {
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 4168c8896e16..35309274ad68 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -2,3 +2,6 @@
2config CLKDEV_LOOKUP 2config CLKDEV_LOOKUP
3 bool 3 bool
4 select HAVE_CLK 4 select HAVE_CLK
5
6config HAVE_MACH_CLKDEV
7 bool
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 0debc17c8e28..3ee1fdb31ea7 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -29,7 +29,8 @@
29#include <linux/connector.h> 29#include <linux/connector.h>
30#include <linux/gfp.h> 30#include <linux/gfp.h>
31#include <linux/ptrace.h> 31#include <linux/ptrace.h>
32#include <asm/atomic.h> 32#include <linux/atomic.h>
33
33#include <asm/unaligned.h> 34#include <asm/unaligned.h>
34 35
35#include <linux/cn_proc.h> 36#include <linux/cn_proc.h>
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 0a5bea9e3585..987a165ede26 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1199,6 +1199,26 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
1199} 1199}
1200EXPORT_SYMBOL(cpufreq_quick_get); 1200EXPORT_SYMBOL(cpufreq_quick_get);
1201 1201
1202/**
1203 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1204 * @cpu: CPU number
1205 *
1206 * Just return the max possible frequency for a given CPU.
1207 */
1208unsigned int cpufreq_quick_get_max(unsigned int cpu)
1209{
1210 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1211 unsigned int ret_freq = 0;
1212
1213 if (policy) {
1214 ret_freq = policy->max;
1215 cpufreq_cpu_put(policy);
1216 }
1217
1218 return ret_freq;
1219}
1220EXPORT_SYMBOL(cpufreq_quick_get_max);
1221
1202 1222
1203static unsigned int __cpufreq_get(unsigned int cpu) 1223static unsigned int __cpufreq_get(unsigned int cpu)
1204{ 1224{
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 2e5b2044c96f..d0183ddb3076 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1,6 +1,6 @@
1/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support. 1/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
2 * 2 *
3 * Copyright (C) 2010 David S. Miller <davem@davemloft.net> 3 * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
4 */ 4 */
5 5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -31,8 +31,8 @@
31#include "n2_core.h" 31#include "n2_core.h"
32 32
33#define DRV_MODULE_NAME "n2_crypto" 33#define DRV_MODULE_NAME "n2_crypto"
34#define DRV_MODULE_VERSION "0.1" 34#define DRV_MODULE_VERSION "0.2"
35#define DRV_MODULE_RELDATE "April 29, 2010" 35#define DRV_MODULE_RELDATE "July 28, 2011"
36 36
37static char version[] __devinitdata = 37static char version[] __devinitdata =
38 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 38 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -1823,22 +1823,17 @@ static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *de
1823static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node, 1823static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
1824 struct spu_mdesc_info *ip) 1824 struct spu_mdesc_info *ip)
1825{ 1825{
1826 const u64 *intr, *ino; 1826 const u64 *ino;
1827 int intr_len, ino_len; 1827 int ino_len;
1828 int i; 1828 int i;
1829 1829
1830 intr = mdesc_get_property(mdesc, node, "intr", &intr_len);
1831 if (!intr)
1832 return -ENODEV;
1833
1834 ino = mdesc_get_property(mdesc, node, "ino", &ino_len); 1830 ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
1835 if (!ino) 1831 if (!ino) {
1832 printk("NO 'ino'\n");
1836 return -ENODEV; 1833 return -ENODEV;
1834 }
1837 1835
1838 if (intr_len != ino_len) 1836 ip->num_intrs = ino_len / sizeof(u64);
1839 return -EINVAL;
1840
1841 ip->num_intrs = intr_len / sizeof(u64);
1842 ip->ino_table = kzalloc((sizeof(struct ino_blob) * 1837 ip->ino_table = kzalloc((sizeof(struct ino_blob) *
1843 ip->num_intrs), 1838 ip->num_intrs),
1844 GFP_KERNEL); 1839 GFP_KERNEL);
@@ -1847,7 +1842,7 @@ static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
1847 1842
1848 for (i = 0; i < ip->num_intrs; i++) { 1843 for (i = 0; i < ip->num_intrs; i++) {
1849 struct ino_blob *b = &ip->ino_table[i]; 1844 struct ino_blob *b = &ip->ino_table[i];
1850 b->intr = intr[i]; 1845 b->intr = i + 1;
1851 b->ino = ino[i]; 1846 b->ino = ino[i];
1852 } 1847 }
1853 1848
@@ -2204,6 +2199,10 @@ static struct of_device_id n2_crypto_match[] = {
2204 .name = "n2cp", 2199 .name = "n2cp",
2205 .compatible = "SUNW,vf-cwq", 2200 .compatible = "SUNW,vf-cwq",
2206 }, 2201 },
2202 {
2203 .name = "n2cp",
2204 .compatible = "SUNW,kt-cwq",
2205 },
2207 {}, 2206 {},
2208}; 2207};
2209 2208
@@ -2228,6 +2227,10 @@ static struct of_device_id n2_mau_match[] = {
2228 .name = "ncp", 2227 .name = "ncp",
2229 .compatible = "SUNW,vf-mau", 2228 .compatible = "SUNW,vf-mau",
2230 }, 2229 },
2230 {
2231 .name = "ncp",
2232 .compatible = "SUNW,kt-mau",
2233 },
2231 {}, 2234 {},
2232}; 2235};
2233 2236
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index b6d1455fa936..1eb60ded2f0d 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -32,6 +32,8 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/platform_device.h> 33#include <linux/platform_device.h>
34#include <linux/dmaengine.h> 34#include <linux/dmaengine.h>
35#include <linux/of.h>
36#include <linux/of_device.h>
35 37
36#include <asm/irq.h> 38#include <asm/irq.h>
37#include <mach/sdma.h> 39#include <mach/sdma.h>
@@ -65,8 +67,8 @@
65#define SDMA_ONCE_RTB 0x060 67#define SDMA_ONCE_RTB 0x060
66#define SDMA_XTRIG_CONF1 0x070 68#define SDMA_XTRIG_CONF1 0x070
67#define SDMA_XTRIG_CONF2 0x074 69#define SDMA_XTRIG_CONF2 0x074
68#define SDMA_CHNENBL0_V2 0x200 70#define SDMA_CHNENBL0_IMX35 0x200
69#define SDMA_CHNENBL0_V1 0x080 71#define SDMA_CHNENBL0_IMX31 0x080
70#define SDMA_CHNPRI_0 0x100 72#define SDMA_CHNPRI_0 0x100
71 73
72/* 74/*
@@ -299,13 +301,18 @@ struct sdma_firmware_header {
299 u32 ram_code_size; 301 u32 ram_code_size;
300}; 302};
301 303
304enum sdma_devtype {
305 IMX31_SDMA, /* runs on i.mx31 */
306 IMX35_SDMA, /* runs on i.mx35 and later */
307};
308
302struct sdma_engine { 309struct sdma_engine {
303 struct device *dev; 310 struct device *dev;
304 struct device_dma_parameters dma_parms; 311 struct device_dma_parameters dma_parms;
305 struct sdma_channel channel[MAX_DMA_CHANNELS]; 312 struct sdma_channel channel[MAX_DMA_CHANNELS];
306 struct sdma_channel_control *channel_control; 313 struct sdma_channel_control *channel_control;
307 void __iomem *regs; 314 void __iomem *regs;
308 unsigned int version; 315 enum sdma_devtype devtype;
309 unsigned int num_events; 316 unsigned int num_events;
310 struct sdma_context_data *context; 317 struct sdma_context_data *context;
311 dma_addr_t context_phys; 318 dma_addr_t context_phys;
@@ -314,6 +321,26 @@ struct sdma_engine {
314 struct sdma_script_start_addrs *script_addrs; 321 struct sdma_script_start_addrs *script_addrs;
315}; 322};
316 323
324static struct platform_device_id sdma_devtypes[] = {
325 {
326 .name = "imx31-sdma",
327 .driver_data = IMX31_SDMA,
328 }, {
329 .name = "imx35-sdma",
330 .driver_data = IMX35_SDMA,
331 }, {
332 /* sentinel */
333 }
334};
335MODULE_DEVICE_TABLE(platform, sdma_devtypes);
336
337static const struct of_device_id sdma_dt_ids[] = {
338 { .compatible = "fsl,imx31-sdma", .data = &sdma_devtypes[IMX31_SDMA], },
339 { .compatible = "fsl,imx35-sdma", .data = &sdma_devtypes[IMX35_SDMA], },
340 { /* sentinel */ }
341};
342MODULE_DEVICE_TABLE(of, sdma_dt_ids);
343
317#define SDMA_H_CONFIG_DSPDMA (1 << 12) /* indicates if the DSPDMA is used */ 344#define SDMA_H_CONFIG_DSPDMA (1 << 12) /* indicates if the DSPDMA is used */
318#define SDMA_H_CONFIG_RTD_PINS (1 << 11) /* indicates if Real-Time Debug pins are enabled */ 345#define SDMA_H_CONFIG_RTD_PINS (1 << 11) /* indicates if Real-Time Debug pins are enabled */
319#define SDMA_H_CONFIG_ACR (1 << 4) /* indicates if AHB freq /core freq = 2 or 1 */ 346#define SDMA_H_CONFIG_ACR (1 << 4) /* indicates if AHB freq /core freq = 2 or 1 */
@@ -321,8 +348,8 @@ struct sdma_engine {
321 348
322static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event) 349static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
323{ 350{
324 u32 chnenbl0 = (sdma->version == 2 ? SDMA_CHNENBL0_V2 : SDMA_CHNENBL0_V1); 351 u32 chnenbl0 = (sdma->devtype == IMX31_SDMA ? SDMA_CHNENBL0_IMX31 :
325 352 SDMA_CHNENBL0_IMX35);
326 return chnenbl0 + event * 4; 353 return chnenbl0 + event * 4;
327} 354}
328 355
@@ -1105,25 +1132,17 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
1105} 1132}
1106 1133
1107static int __init sdma_get_firmware(struct sdma_engine *sdma, 1134static int __init sdma_get_firmware(struct sdma_engine *sdma,
1108 const char *cpu_name, int to_version) 1135 const char *fw_name)
1109{ 1136{
1110 const struct firmware *fw; 1137 const struct firmware *fw;
1111 char *fwname;
1112 const struct sdma_firmware_header *header; 1138 const struct sdma_firmware_header *header;
1113 int ret; 1139 int ret;
1114 const struct sdma_script_start_addrs *addr; 1140 const struct sdma_script_start_addrs *addr;
1115 unsigned short *ram_code; 1141 unsigned short *ram_code;
1116 1142
1117 fwname = kasprintf(GFP_KERNEL, "sdma-%s-to%d.bin", cpu_name, to_version); 1143 ret = request_firmware(&fw, fw_name, sdma->dev);
1118 if (!fwname) 1144 if (ret)
1119 return -ENOMEM;
1120
1121 ret = request_firmware(&fw, fwname, sdma->dev);
1122 if (ret) {
1123 kfree(fwname);
1124 return ret; 1145 return ret;
1125 }
1126 kfree(fwname);
1127 1146
1128 if (fw->size < sizeof(*header)) 1147 if (fw->size < sizeof(*header))
1129 goto err_firmware; 1148 goto err_firmware;
@@ -1162,15 +1181,16 @@ static int __init sdma_init(struct sdma_engine *sdma)
1162 int i, ret; 1181 int i, ret;
1163 dma_addr_t ccb_phys; 1182 dma_addr_t ccb_phys;
1164 1183
1165 switch (sdma->version) { 1184 switch (sdma->devtype) {
1166 case 1: 1185 case IMX31_SDMA:
1167 sdma->num_events = 32; 1186 sdma->num_events = 32;
1168 break; 1187 break;
1169 case 2: 1188 case IMX35_SDMA:
1170 sdma->num_events = 48; 1189 sdma->num_events = 48;
1171 break; 1190 break;
1172 default: 1191 default:
1173 dev_err(sdma->dev, "Unknown version %d. aborting\n", sdma->version); 1192 dev_err(sdma->dev, "Unknown sdma type %d. aborting\n",
1193 sdma->devtype);
1174 return -ENODEV; 1194 return -ENODEV;
1175 } 1195 }
1176 1196
@@ -1239,6 +1259,10 @@ err_dma_alloc:
1239 1259
1240static int __init sdma_probe(struct platform_device *pdev) 1260static int __init sdma_probe(struct platform_device *pdev)
1241{ 1261{
1262 const struct of_device_id *of_id =
1263 of_match_device(sdma_dt_ids, &pdev->dev);
1264 struct device_node *np = pdev->dev.of_node;
1265 const char *fw_name;
1242 int ret; 1266 int ret;
1243 int irq; 1267 int irq;
1244 struct resource *iores; 1268 struct resource *iores;
@@ -1254,7 +1278,7 @@ static int __init sdma_probe(struct platform_device *pdev)
1254 1278
1255 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1279 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1256 irq = platform_get_irq(pdev, 0); 1280 irq = platform_get_irq(pdev, 0);
1257 if (!iores || irq < 0 || !pdata) { 1281 if (!iores || irq < 0) {
1258 ret = -EINVAL; 1282 ret = -EINVAL;
1259 goto err_irq; 1283 goto err_irq;
1260 } 1284 }
@@ -1284,7 +1308,9 @@ static int __init sdma_probe(struct platform_device *pdev)
1284 if (!sdma->script_addrs) 1308 if (!sdma->script_addrs)
1285 goto err_alloc; 1309 goto err_alloc;
1286 1310
1287 sdma->version = pdata->sdma_version; 1311 if (of_id)
1312 pdev->id_entry = of_id->data;
1313 sdma->devtype = pdev->id_entry->driver_data;
1288 1314
1289 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); 1315 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1290 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); 1316 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
@@ -1314,10 +1340,30 @@ static int __init sdma_probe(struct platform_device *pdev)
1314 if (ret) 1340 if (ret)
1315 goto err_init; 1341 goto err_init;
1316 1342
1317 if (pdata->script_addrs) 1343 if (pdata && pdata->script_addrs)
1318 sdma_add_scripts(sdma, pdata->script_addrs); 1344 sdma_add_scripts(sdma, pdata->script_addrs);
1319 1345
1320 sdma_get_firmware(sdma, pdata->cpu_name, pdata->to_version); 1346 if (pdata) {
1347 sdma_get_firmware(sdma, pdata->fw_name);
1348 } else {
1349 /*
1350 * Because that device tree does not encode ROM script address,
1351 * the RAM script in firmware is mandatory for device tree
1352 * probe, otherwise it fails.
1353 */
1354 ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
1355 &fw_name);
1356 if (ret) {
1357 dev_err(&pdev->dev, "failed to get firmware name\n");
1358 goto err_init;
1359 }
1360
1361 ret = sdma_get_firmware(sdma, fw_name);
1362 if (ret) {
1363 dev_err(&pdev->dev, "failed to get firmware\n");
1364 goto err_init;
1365 }
1366 }
1321 1367
1322 sdma->dma_device.dev = &pdev->dev; 1368 sdma->dma_device.dev = &pdev->dev;
1323 1369
@@ -1365,7 +1411,9 @@ static int __exit sdma_remove(struct platform_device *pdev)
1365static struct platform_driver sdma_driver = { 1411static struct platform_driver sdma_driver = {
1366 .driver = { 1412 .driver = {
1367 .name = "imx-sdma", 1413 .name = "imx-sdma",
1414 .of_match_table = sdma_dt_ids,
1368 }, 1415 },
1416 .id_table = sdma_devtypes,
1369 .remove = __exit_p(sdma_remove), 1417 .remove = __exit_p(sdma_remove),
1370}; 1418};
1371 1419
diff --git a/drivers/edac/edac_stub.c b/drivers/edac/edac_stub.c
index aab970760b75..86ad2eee1201 100644
--- a/drivers/edac/edac_stub.c
+++ b/drivers/edac/edac_stub.c
@@ -14,7 +14,7 @@
14 */ 14 */
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/edac.h> 16#include <linux/edac.h>
17#include <asm/atomic.h> 17#include <linux/atomic.h>
18#include <asm/edac.h> 18#include <asm/edac.h>
19 19
20int edac_op_state = EDAC_OPSTATE_INVAL; 20int edac_op_state = EDAC_OPSTATE_INVAL;
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 11e1a5dad96f..8af8e864a9cf 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -854,11 +854,11 @@ static void mpc85xx_mc_check(struct mem_ctl_info *mci)
854 mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n"); 854 mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
855 855
856 if (err_detect & DDR_EDE_SBE) 856 if (err_detect & DDR_EDE_SBE)
857 edac_mc_handle_ce(mci, pfn, err_addr & PAGE_MASK, 857 edac_mc_handle_ce(mci, pfn, err_addr & ~PAGE_MASK,
858 syndrome, row_index, 0, mci->ctl_name); 858 syndrome, row_index, 0, mci->ctl_name);
859 859
860 if (err_detect & DDR_EDE_MBE) 860 if (err_detect & DDR_EDE_MBE)
861 edac_mc_handle_ue(mci, pfn, err_addr & PAGE_MASK, 861 edac_mc_handle_ue(mci, pfn, err_addr & ~PAGE_MASK,
862 row_index, mci->ctl_name); 862 row_index, mci->ctl_name);
863 863
864 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect); 864 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 29d2423fae6d..85661b060ed7 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -32,7 +32,7 @@
32#include <linux/spinlock.h> 32#include <linux/spinlock.h>
33#include <linux/workqueue.h> 33#include <linux/workqueue.h>
34 34
35#include <asm/atomic.h> 35#include <linux/atomic.h>
36#include <asm/byteorder.h> 36#include <asm/byteorder.h>
37 37
38#include "core.h" 38#include "core.h"
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 95a471401892..8ba7f7928f1f 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -38,7 +38,7 @@
38#include <linux/string.h> 38#include <linux/string.h>
39#include <linux/workqueue.h> 39#include <linux/workqueue.h>
40 40
41#include <asm/atomic.h> 41#include <linux/atomic.h>
42#include <asm/byteorder.h> 42#include <asm/byteorder.h>
43#include <asm/system.h> 43#include <asm/system.h>
44 44
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 193ed9233144..94d3b494ddfb 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -29,7 +29,7 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/spinlock.h> 30#include <linux/spinlock.h>
31 31
32#include <asm/atomic.h> 32#include <linux/atomic.h>
33#include <asm/byteorder.h> 33#include <asm/byteorder.h>
34#include <asm/system.h> 34#include <asm/system.h>
35 35
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 0fe4e4e6eda7..b45be5767529 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -9,7 +9,7 @@
9#include <linux/slab.h> 9#include <linux/slab.h>
10#include <linux/types.h> 10#include <linux/types.h>
11 11
12#include <asm/atomic.h> 12#include <linux/atomic.h>
13 13
14struct device; 14struct device;
15struct fw_card; 15struct fw_card;
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 0618145376ad..763626b739d1 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -37,7 +37,7 @@
37#include <linux/uaccess.h> 37#include <linux/uaccess.h>
38#include <linux/wait.h> 38#include <linux/wait.h>
39 39
40#include <asm/atomic.h> 40#include <linux/atomic.h>
41#include <asm/byteorder.h> 41#include <asm/byteorder.h>
42 42
43#include "nosy.h" 43#include "nosy.h"
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 92369655dca3..f88a9b2c977b 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -560,6 +560,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
560 mode_changed = true; 560 mode_changed = true;
561 } else if (set->fb == NULL) { 561 } else if (set->fb == NULL) {
562 mode_changed = true; 562 mode_changed = true;
563 } else if (set->fb->depth != set->crtc->fb->depth) {
564 mode_changed = true;
565 } else if (set->fb->bits_per_pixel !=
566 set->crtc->fb->bits_per_pixel) {
567 mode_changed = true;
563 } else 568 } else
564 fb_changed = true; 569 fb_changed = true;
565 } 570 }
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 09292193dafe..756af4d7ec74 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -185,8 +185,8 @@ drm_edid_block_valid(u8 *raw_edid)
185bad: 185bad:
186 if (raw_edid) { 186 if (raw_edid) {
187 printk(KERN_ERR "Raw EDID:\n"); 187 printk(KERN_ERR "Raw EDID:\n");
188 print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH); 188 print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
189 printk(KERN_ERR "\n"); 189 raw_edid, EDID_LENGTH, false);
190 } 190 }
191 return 0; 191 return 0;
192} 192}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 4012fe423460..186d62eb063b 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -129,7 +129,7 @@ drm_gem_destroy(struct drm_device *dev)
129} 129}
130 130
131/** 131/**
132 * Initialize an already allocate GEM object of the specified size with 132 * Initialize an already allocated GEM object of the specified size with
133 * shmfs backing store. 133 * shmfs backing store.
134 */ 134 */
135int drm_gem_object_init(struct drm_device *dev, 135int drm_gem_object_init(struct drm_device *dev,
@@ -151,6 +151,27 @@ int drm_gem_object_init(struct drm_device *dev,
151EXPORT_SYMBOL(drm_gem_object_init); 151EXPORT_SYMBOL(drm_gem_object_init);
152 152
153/** 153/**
154 * Initialize an already allocated GEM object of the specified size with
155 * no GEM provided backing store. Instead the caller is responsible for
156 * backing the object and handling it.
157 */
158int drm_gem_private_object_init(struct drm_device *dev,
159 struct drm_gem_object *obj, size_t size)
160{
161 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
162
163 obj->dev = dev;
164 obj->filp = NULL;
165
166 kref_init(&obj->refcount);
167 atomic_set(&obj->handle_count, 0);
168 obj->size = size;
169
170 return 0;
171}
172EXPORT_SYMBOL(drm_gem_private_object_init);
173
174/**
154 * Allocate a GEM object of the specified size with shmfs backing store 175 * Allocate a GEM object of the specified size with shmfs backing store
155 */ 176 */
156struct drm_gem_object * 177struct drm_gem_object *
@@ -211,6 +232,8 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
211 idr_remove(&filp->object_idr, handle); 232 idr_remove(&filp->object_idr, handle);
212 spin_unlock(&filp->table_lock); 233 spin_unlock(&filp->table_lock);
213 234
235 if (dev->driver->gem_close_object)
236 dev->driver->gem_close_object(obj, filp);
214 drm_gem_object_handle_unreference_unlocked(obj); 237 drm_gem_object_handle_unreference_unlocked(obj);
215 238
216 return 0; 239 return 0;
@@ -227,7 +250,8 @@ drm_gem_handle_create(struct drm_file *file_priv,
227 struct drm_gem_object *obj, 250 struct drm_gem_object *obj,
228 u32 *handlep) 251 u32 *handlep)
229{ 252{
230 int ret; 253 struct drm_device *dev = obj->dev;
254 int ret;
231 255
232 /* 256 /*
233 * Get the user-visible handle using idr. 257 * Get the user-visible handle using idr.
@@ -248,6 +272,15 @@ again:
248 return ret; 272 return ret;
249 273
250 drm_gem_object_handle_reference(obj); 274 drm_gem_object_handle_reference(obj);
275
276 if (dev->driver->gem_open_object) {
277 ret = dev->driver->gem_open_object(obj, file_priv);
278 if (ret) {
279 drm_gem_handle_delete(file_priv, *handlep);
280 return ret;
281 }
282 }
283
251 return 0; 284 return 0;
252} 285}
253EXPORT_SYMBOL(drm_gem_handle_create); 286EXPORT_SYMBOL(drm_gem_handle_create);
@@ -402,7 +435,12 @@ drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
402static int 435static int
403drm_gem_object_release_handle(int id, void *ptr, void *data) 436drm_gem_object_release_handle(int id, void *ptr, void *data)
404{ 437{
438 struct drm_file *file_priv = data;
405 struct drm_gem_object *obj = ptr; 439 struct drm_gem_object *obj = ptr;
440 struct drm_device *dev = obj->dev;
441
442 if (dev->driver->gem_close_object)
443 dev->driver->gem_close_object(obj, file_priv);
406 444
407 drm_gem_object_handle_unreference_unlocked(obj); 445 drm_gem_object_handle_unreference_unlocked(obj);
408 446
@@ -418,7 +456,7 @@ void
418drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 456drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
419{ 457{
420 idr_for_each(&file_private->object_idr, 458 idr_for_each(&file_private->object_idr,
421 &drm_gem_object_release_handle, NULL); 459 &drm_gem_object_release_handle, file_private);
422 460
423 idr_remove_all(&file_private->object_idr); 461 idr_remove_all(&file_private->object_idr);
424 idr_destroy(&file_private->object_idr); 462 idr_destroy(&file_private->object_idr);
@@ -427,7 +465,8 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
427void 465void
428drm_gem_object_release(struct drm_gem_object *obj) 466drm_gem_object_release(struct drm_gem_object *obj)
429{ 467{
430 fput(obj->filp); 468 if (obj->filp)
469 fput(obj->filp);
431} 470}
432EXPORT_SYMBOL(drm_gem_object_release); 471EXPORT_SYMBOL(drm_gem_object_release);
433 472
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index c2d32f20e2fb..ad74fb4dc542 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -994,9 +994,10 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
994{ 994{
995 const char *name; 995 const char *name;
996 unsigned int namelen; 996 unsigned int namelen;
997 int res_specified = 0, bpp_specified = 0, refresh_specified = 0; 997 bool res_specified = false, bpp_specified = false, refresh_specified = false;
998 unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0; 998 unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
999 int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0; 999 bool yres_specified = false, cvt = false, rb = false;
1000 bool interlace = false, margins = false, was_digit = false;
1000 int i; 1001 int i;
1001 enum drm_connector_force force = DRM_FORCE_UNSPECIFIED; 1002 enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
1002 1003
@@ -1015,54 +1016,65 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
1015 for (i = namelen-1; i >= 0; i--) { 1016 for (i = namelen-1; i >= 0; i--) {
1016 switch (name[i]) { 1017 switch (name[i]) {
1017 case '@': 1018 case '@':
1018 namelen = i;
1019 if (!refresh_specified && !bpp_specified && 1019 if (!refresh_specified && !bpp_specified &&
1020 !yres_specified) { 1020 !yres_specified && !cvt && !rb && was_digit) {
1021 refresh = simple_strtol(&name[i+1], NULL, 10); 1021 refresh = simple_strtol(&name[i+1], NULL, 10);
1022 refresh_specified = 1; 1022 refresh_specified = true;
1023 if (cvt || rb) 1023 was_digit = false;
1024 cvt = 0;
1025 } else 1024 } else
1026 goto done; 1025 goto done;
1027 break; 1026 break;
1028 case '-': 1027 case '-':
1029 namelen = i; 1028 if (!bpp_specified && !yres_specified && !cvt &&
1030 if (!bpp_specified && !yres_specified) { 1029 !rb && was_digit) {
1031 bpp = simple_strtol(&name[i+1], NULL, 10); 1030 bpp = simple_strtol(&name[i+1], NULL, 10);
1032 bpp_specified = 1; 1031 bpp_specified = true;
1033 if (cvt || rb) 1032 was_digit = false;
1034 cvt = 0;
1035 } else 1033 } else
1036 goto done; 1034 goto done;
1037 break; 1035 break;
1038 case 'x': 1036 case 'x':
1039 if (!yres_specified) { 1037 if (!yres_specified && was_digit) {
1040 yres = simple_strtol(&name[i+1], NULL, 10); 1038 yres = simple_strtol(&name[i+1], NULL, 10);
1041 yres_specified = 1; 1039 yres_specified = true;
1040 was_digit = false;
1042 } else 1041 } else
1043 goto done; 1042 goto done;
1044 case '0' ... '9': 1043 case '0' ... '9':
1044 was_digit = true;
1045 break; 1045 break;
1046 case 'M': 1046 case 'M':
1047 if (!yres_specified) 1047 if (yres_specified || cvt || was_digit)
1048 cvt = 1; 1048 goto done;
1049 cvt = true;
1049 break; 1050 break;
1050 case 'R': 1051 case 'R':
1051 if (cvt) 1052 if (yres_specified || cvt || rb || was_digit)
1052 rb = 1; 1053 goto done;
1054 rb = true;
1053 break; 1055 break;
1054 case 'm': 1056 case 'm':
1055 if (!cvt) 1057 if (cvt || yres_specified || was_digit)
1056 margins = 1; 1058 goto done;
1059 margins = true;
1057 break; 1060 break;
1058 case 'i': 1061 case 'i':
1059 if (!cvt) 1062 if (cvt || yres_specified || was_digit)
1060 interlace = 1; 1063 goto done;
1064 interlace = true;
1061 break; 1065 break;
1062 case 'e': 1066 case 'e':
1067 if (yres_specified || bpp_specified || refresh_specified ||
1068 was_digit || (force != DRM_FORCE_UNSPECIFIED))
1069 goto done;
1070
1063 force = DRM_FORCE_ON; 1071 force = DRM_FORCE_ON;
1064 break; 1072 break;
1065 case 'D': 1073 case 'D':
1074 if (yres_specified || bpp_specified || refresh_specified ||
1075 was_digit || (force != DRM_FORCE_UNSPECIFIED))
1076 goto done;
1077
1066 if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) && 1078 if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
1067 (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB)) 1079 (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
1068 force = DRM_FORCE_ON; 1080 force = DRM_FORCE_ON;
@@ -1070,17 +1082,37 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
1070 force = DRM_FORCE_ON_DIGITAL; 1082 force = DRM_FORCE_ON_DIGITAL;
1071 break; 1083 break;
1072 case 'd': 1084 case 'd':
1085 if (yres_specified || bpp_specified || refresh_specified ||
1086 was_digit || (force != DRM_FORCE_UNSPECIFIED))
1087 goto done;
1088
1073 force = DRM_FORCE_OFF; 1089 force = DRM_FORCE_OFF;
1074 break; 1090 break;
1075 default: 1091 default:
1076 goto done; 1092 goto done;
1077 } 1093 }
1078 } 1094 }
1095
1079 if (i < 0 && yres_specified) { 1096 if (i < 0 && yres_specified) {
1080 xres = simple_strtol(name, NULL, 10); 1097 char *ch;
1081 res_specified = 1; 1098 xres = simple_strtol(name, &ch, 10);
1099 if ((ch != NULL) && (*ch == 'x'))
1100 res_specified = true;
1101 else
1102 i = ch - name;
1103 } else if (!yres_specified && was_digit) {
1104 /* catch mode that begins with digits but has no 'x' */
1105 i = 0;
1082 } 1106 }
1083done: 1107done:
1108 if (i >= 0) {
1109 printk(KERN_WARNING
1110 "parse error at position %i in video mode '%s'\n",
1111 i, name);
1112 mode->specified = false;
1113 return false;
1114 }
1115
1084 if (res_specified) { 1116 if (res_specified) {
1085 mode->specified = true; 1117 mode->specified = true;
1086 mode->xres = xres; 1118 mode->xres = xres;
@@ -1096,9 +1128,10 @@ done:
1096 mode->bpp_specified = true; 1128 mode->bpp_specified = true;
1097 mode->bpp = bpp; 1129 mode->bpp = bpp;
1098 } 1130 }
1099 mode->rb = rb ? true : false; 1131 mode->rb = rb;
1100 mode->cvt = cvt ? true : false; 1132 mode->cvt = cvt;
1101 mode->interlace = interlace ? true : false; 1133 mode->interlace = interlace;
1134 mode->margins = margins;
1102 mode->force = force; 1135 mode->force = force;
1103 1136
1104 return true; 1137 return true;
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 7223f06d8e58..2a8b6265ad3d 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -123,14 +123,15 @@ static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *mas
123{ 123{
124 int len, ret; 124 int len, ret;
125 125
126 master->unique_len = 10 + strlen(dev->platformdev->name); 126 master->unique_len = 13 + strlen(dev->platformdev->name);
127 master->unique_size = master->unique_len;
127 master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL); 128 master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
128 129
129 if (master->unique == NULL) 130 if (master->unique == NULL)
130 return -ENOMEM; 131 return -ENOMEM;
131 132
132 len = snprintf(master->unique, master->unique_len, 133 len = snprintf(master->unique, master->unique_len,
133 "platform:%s", dev->platformdev->name); 134 "platform:%s:%02d", dev->platformdev->name, dev->platformdev->id);
134 135
135 if (len > master->unique_len) { 136 if (len > master->unique_len) {
136 DRM_ERROR("Unique buffer overflowed\n"); 137 DRM_ERROR("Unique buffer overflowed\n");
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0a893f7400fa..e2662497d50f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
865 MEMSTAT_VID_SHIFT); 865 MEMSTAT_VID_SHIFT);
866 seq_printf(m, "Current P-state: %d\n", 866 seq_printf(m, "Current P-state: %d\n",
867 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 867 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
868 } else if (IS_GEN6(dev)) { 868 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
869 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 869 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
870 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 870 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
871 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 871 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -1123,6 +1123,44 @@ static int i915_emon_status(struct seq_file *m, void *unused)
1123 return 0; 1123 return 0;
1124} 1124}
1125 1125
1126static int i915_ring_freq_table(struct seq_file *m, void *unused)
1127{
1128 struct drm_info_node *node = (struct drm_info_node *) m->private;
1129 struct drm_device *dev = node->minor->dev;
1130 drm_i915_private_t *dev_priv = dev->dev_private;
1131 int ret;
1132 int gpu_freq, ia_freq;
1133
1134 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1135 seq_printf(m, "unsupported on this chipset\n");
1136 return 0;
1137 }
1138
1139 ret = mutex_lock_interruptible(&dev->struct_mutex);
1140 if (ret)
1141 return ret;
1142
1143 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
1144
1145 for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
1146 gpu_freq++) {
1147 I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
1148 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
1149 GEN6_PCODE_READ_MIN_FREQ_TABLE);
1150 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
1151 GEN6_PCODE_READY) == 0, 10)) {
1152 DRM_ERROR("pcode read of freq table timed out\n");
1153 continue;
1154 }
1155 ia_freq = I915_READ(GEN6_PCODE_DATA);
1156 seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
1157 }
1158
1159 mutex_unlock(&dev->struct_mutex);
1160
1161 return 0;
1162}
1163
1126static int i915_gfxec(struct seq_file *m, void *unused) 1164static int i915_gfxec(struct seq_file *m, void *unused)
1127{ 1165{
1128 struct drm_info_node *node = (struct drm_info_node *) m->private; 1166 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1430,6 +1468,7 @@ static struct drm_info_list i915_debugfs_list[] = {
1430 {"i915_inttoext_table", i915_inttoext_table, 0}, 1468 {"i915_inttoext_table", i915_inttoext_table, 0},
1431 {"i915_drpc_info", i915_drpc_info, 0}, 1469 {"i915_drpc_info", i915_drpc_info, 0},
1432 {"i915_emon_status", i915_emon_status, 0}, 1470 {"i915_emon_status", i915_emon_status, 0},
1471 {"i915_ring_freq_table", i915_ring_freq_table, 0},
1433 {"i915_gfxec", i915_gfxec, 0}, 1472 {"i915_gfxec", i915_gfxec, 0},
1434 {"i915_fbc_status", i915_fbc_status, 0}, 1473 {"i915_fbc_status", i915_fbc_status, 0},
1435 {"i915_sr_status", i915_sr_status, 0}, 1474 {"i915_sr_status", i915_sr_status, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 296fbd66f0e1..12712824a6d2 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1073,6 +1073,9 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1073 unsigned long cfb_base; 1073 unsigned long cfb_base;
1074 unsigned long ll_base = 0; 1074 unsigned long ll_base = 0;
1075 1075
1076 /* Just in case the BIOS is doing something questionable. */
1077 intel_disable_fbc(dev);
1078
1076 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); 1079 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
1077 if (compressed_fb) 1080 if (compressed_fb)
1078 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); 1081 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
@@ -1099,7 +1102,6 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1099 1102
1100 dev_priv->cfb_size = size; 1103 dev_priv->cfb_size = size;
1101 1104
1102 intel_disable_fbc(dev);
1103 dev_priv->compressed_fb = compressed_fb; 1105 dev_priv->compressed_fb = compressed_fb;
1104 if (HAS_PCH_SPLIT(dev)) 1106 if (HAS_PCH_SPLIT(dev))
1105 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); 1107 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index eb91e2dd7914..ce045a8cf82c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -37,38 +37,70 @@
37#include <linux/console.h> 37#include <linux/console.h>
38#include "drm_crtc_helper.h" 38#include "drm_crtc_helper.h"
39 39
40static int i915_modeset = -1; 40static int i915_modeset __read_mostly = -1;
41module_param_named(modeset, i915_modeset, int, 0400); 41module_param_named(modeset, i915_modeset, int, 0400);
42MODULE_PARM_DESC(modeset,
43 "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
44 "1=on, -1=force vga console preference [default])");
42 45
43unsigned int i915_fbpercrtc = 0; 46unsigned int i915_fbpercrtc __always_unused = 0;
44module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); 47module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
45 48
46int i915_panel_ignore_lid = 0; 49int i915_panel_ignore_lid __read_mostly = 0;
47module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); 50module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
51MODULE_PARM_DESC(panel_ignore_lid,
52 "Override lid status (0=autodetect [default], 1=lid open, "
53 "-1=lid closed)");
48 54
49unsigned int i915_powersave = 1; 55unsigned int i915_powersave __read_mostly = 1;
50module_param_named(powersave, i915_powersave, int, 0600); 56module_param_named(powersave, i915_powersave, int, 0600);
57MODULE_PARM_DESC(powersave,
58 "Enable powersavings, fbc, downclocking, etc. (default: true)");
51 59
52unsigned int i915_semaphores = 0; 60unsigned int i915_semaphores __read_mostly = 0;
53module_param_named(semaphores, i915_semaphores, int, 0600); 61module_param_named(semaphores, i915_semaphores, int, 0600);
62MODULE_PARM_DESC(semaphores,
63 "Use semaphores for inter-ring sync (default: false)");
54 64
55unsigned int i915_enable_rc6 = 0; 65unsigned int i915_enable_rc6 __read_mostly = 0;
56module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); 66module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
67MODULE_PARM_DESC(i915_enable_rc6,
68 "Enable power-saving render C-state 6 (default: true)");
57 69
58unsigned int i915_enable_fbc = 0; 70unsigned int i915_enable_fbc __read_mostly = 1;
59module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); 71module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
72MODULE_PARM_DESC(i915_enable_fbc,
73 "Enable frame buffer compression for power savings "
74 "(default: false)");
60 75
61unsigned int i915_lvds_downclock = 0; 76unsigned int i915_lvds_downclock __read_mostly = 0;
62module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 77module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
78MODULE_PARM_DESC(lvds_downclock,
79 "Use panel (LVDS/eDP) downclocking for power savings "
80 "(default: false)");
63 81
64unsigned int i915_panel_use_ssc = 1; 82unsigned int i915_panel_use_ssc __read_mostly = 1;
65module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); 83module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
84MODULE_PARM_DESC(lvds_use_ssc,
85 "Use Spread Spectrum Clock with panels [LVDS/eDP] "
86 "(default: true)");
66 87
67int i915_vbt_sdvo_panel_type = -1; 88int i915_vbt_sdvo_panel_type __read_mostly = -1;
68module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); 89module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
90MODULE_PARM_DESC(vbt_sdvo_panel_type,
91 "Override selection of SDVO panel mode in the VBT "
92 "(default: auto)");
69 93
70static bool i915_try_reset = true; 94static bool i915_try_reset __read_mostly = true;
71module_param_named(reset, i915_try_reset, bool, 0600); 95module_param_named(reset, i915_try_reset, bool, 0600);
96MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
97
98bool i915_enable_hangcheck __read_mostly = true;
99module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
100MODULE_PARM_DESC(enable_hangcheck,
101 "Periodically check GPU activity for detecting hangs. "
102 "WARNING: Disabling this can cause system wide hangs. "
103 "(default: true)");
72 104
73static struct drm_driver driver; 105static struct drm_driver driver;
74extern int intel_agp_enabled; 106extern int intel_agp_enabled;
@@ -345,12 +377,17 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
345 377
346void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 378void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
347{ 379{
348 int loop = 500; 380 if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES ) {
349 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); 381 int loop = 500;
350 while (fifo < 20 && loop--) { 382 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
351 udelay(10); 383 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
352 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); 384 udelay(10);
385 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
386 }
387 WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES);
388 dev_priv->gt_fifo_count = fifo;
353 } 389 }
390 dev_priv->gt_fifo_count--;
354} 391}
355 392
356static int i915_drm_freeze(struct drm_device *dev) 393static int i915_drm_freeze(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ce7914c4c044..6867e193d85e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -214,6 +214,8 @@ struct drm_i915_display_funcs {
214 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 214 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
215 struct drm_framebuffer *fb, 215 struct drm_framebuffer *fb,
216 struct drm_i915_gem_object *obj); 216 struct drm_i915_gem_object *obj);
217 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
218 int x, int y);
217 /* clock updates for mode set */ 219 /* clock updates for mode set */
218 /* cursor updates */ 220 /* cursor updates */
219 /* render clock increase/decrease */ 221 /* render clock increase/decrease */
@@ -265,6 +267,7 @@ enum intel_pch {
265#define QUIRK_LVDS_SSC_DISABLE (1<<1) 267#define QUIRK_LVDS_SSC_DISABLE (1<<1)
266 268
267struct intel_fbdev; 269struct intel_fbdev;
270struct intel_fbc_work;
268 271
269typedef struct drm_i915_private { 272typedef struct drm_i915_private {
270 struct drm_device *dev; 273 struct drm_device *dev;
@@ -275,6 +278,7 @@ typedef struct drm_i915_private {
275 int relative_constants_mode; 278 int relative_constants_mode;
276 279
277 void __iomem *regs; 280 void __iomem *regs;
281 u32 gt_fifo_count;
278 282
279 struct intel_gmbus { 283 struct intel_gmbus {
280 struct i2c_adapter adapter; 284 struct i2c_adapter adapter;
@@ -329,11 +333,10 @@ typedef struct drm_i915_private {
329 uint32_t last_instdone1; 333 uint32_t last_instdone1;
330 334
331 unsigned long cfb_size; 335 unsigned long cfb_size;
332 unsigned long cfb_pitch; 336 unsigned int cfb_fb;
333 unsigned long cfb_offset; 337 enum plane cfb_plane;
334 int cfb_fence;
335 int cfb_plane;
336 int cfb_y; 338 int cfb_y;
339 struct intel_fbc_work *fbc_work;
337 340
338 struct intel_opregion opregion; 341 struct intel_opregion opregion;
339 342
@@ -986,15 +989,16 @@ struct drm_i915_file_private {
986 989
987extern struct drm_ioctl_desc i915_ioctls[]; 990extern struct drm_ioctl_desc i915_ioctls[];
988extern int i915_max_ioctl; 991extern int i915_max_ioctl;
989extern unsigned int i915_fbpercrtc; 992extern unsigned int i915_fbpercrtc __always_unused;
990extern int i915_panel_ignore_lid; 993extern int i915_panel_ignore_lid __read_mostly;
991extern unsigned int i915_powersave; 994extern unsigned int i915_powersave __read_mostly;
992extern unsigned int i915_semaphores; 995extern unsigned int i915_semaphores __read_mostly;
993extern unsigned int i915_lvds_downclock; 996extern unsigned int i915_lvds_downclock __read_mostly;
994extern unsigned int i915_panel_use_ssc; 997extern unsigned int i915_panel_use_ssc __read_mostly;
995extern int i915_vbt_sdvo_panel_type; 998extern int i915_vbt_sdvo_panel_type __read_mostly;
996extern unsigned int i915_enable_rc6; 999extern unsigned int i915_enable_rc6 __read_mostly;
997extern unsigned int i915_enable_fbc; 1000extern unsigned int i915_enable_fbc __read_mostly;
1001extern bool i915_enable_hangcheck __read_mostly;
998 1002
999extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1003extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1000extern int i915_resume(struct drm_device *dev); 1004extern int i915_resume(struct drm_device *dev);
@@ -1164,7 +1168,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
1164int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, 1168int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
1165 uint32_t read_domains, 1169 uint32_t read_domains,
1166 uint32_t write_domain); 1170 uint32_t write_domain);
1167int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj); 1171int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1168int __must_check i915_gem_init_ringbuffer(struct drm_device *dev); 1172int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
1169void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1173void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1170void i915_gem_do_init(struct drm_device *dev, 1174void i915_gem_do_init(struct drm_device *dev,
@@ -1183,7 +1187,8 @@ int __must_check
1183i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 1187i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
1184 bool write); 1188 bool write);
1185int __must_check 1189int __must_check
1186i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, 1190i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1191 u32 alignment,
1187 struct intel_ring_buffer *pipelined); 1192 struct intel_ring_buffer *pipelined);
1188int i915_gem_attach_phys_object(struct drm_device *dev, 1193int i915_gem_attach_phys_object(struct drm_device *dev,
1189 struct drm_i915_gem_object *obj, 1194 struct drm_i915_gem_object *obj,
@@ -1199,9 +1204,14 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1199 uint32_t size, 1204 uint32_t size,
1200 int tiling_mode); 1205 int tiling_mode);
1201 1206
1207int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1208 enum i915_cache_level cache_level);
1209
1202/* i915_gem_gtt.c */ 1210/* i915_gem_gtt.c */
1203void i915_gem_restore_gtt_mappings(struct drm_device *dev); 1211void i915_gem_restore_gtt_mappings(struct drm_device *dev);
1204int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); 1212int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
1213void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
1214 enum i915_cache_level cache_level);
1205void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); 1215void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
1206 1216
1207/* i915_gem_evict.c */ 1217/* i915_gem_evict.c */
@@ -1283,12 +1293,8 @@ extern void intel_modeset_init(struct drm_device *dev);
1283extern void intel_modeset_gem_init(struct drm_device *dev); 1293extern void intel_modeset_gem_init(struct drm_device *dev);
1284extern void intel_modeset_cleanup(struct drm_device *dev); 1294extern void intel_modeset_cleanup(struct drm_device *dev);
1285extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 1295extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
1286extern void i8xx_disable_fbc(struct drm_device *dev);
1287extern void g4x_disable_fbc(struct drm_device *dev);
1288extern void ironlake_disable_fbc(struct drm_device *dev);
1289extern void intel_disable_fbc(struct drm_device *dev);
1290extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
1291extern bool intel_fbc_enabled(struct drm_device *dev); 1296extern bool intel_fbc_enabled(struct drm_device *dev);
1297extern void intel_disable_fbc(struct drm_device *dev);
1292extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 1298extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
1293extern void ironlake_enable_rc6(struct drm_device *dev); 1299extern void ironlake_enable_rc6(struct drm_device *dev);
1294extern void gen6_set_rps(struct drm_device *dev, u8 val); 1300extern void gen6_set_rps(struct drm_device *dev, u8 val);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a087e1bf0c2f..d1cd8b89f47d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1763,8 +1763,11 @@ i915_add_request(struct intel_ring_buffer *ring,
1763 ring->outstanding_lazy_request = false; 1763 ring->outstanding_lazy_request = false;
1764 1764
1765 if (!dev_priv->mm.suspended) { 1765 if (!dev_priv->mm.suspended) {
1766 mod_timer(&dev_priv->hangcheck_timer, 1766 if (i915_enable_hangcheck) {
1767 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); 1767 mod_timer(&dev_priv->hangcheck_timer,
1768 jiffies +
1769 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1770 }
1768 if (was_empty) 1771 if (was_empty)
1769 queue_delayed_work(dev_priv->wq, 1772 queue_delayed_work(dev_priv->wq,
1770 &dev_priv->mm.retire_work, HZ); 1773 &dev_priv->mm.retire_work, HZ);
@@ -2135,6 +2138,30 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
2135 return 0; 2138 return 0;
2136} 2139}
2137 2140
2141static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2142{
2143 u32 old_write_domain, old_read_domains;
2144
2145 /* Act a barrier for all accesses through the GTT */
2146 mb();
2147
2148 /* Force a pagefault for domain tracking on next user access */
2149 i915_gem_release_mmap(obj);
2150
2151 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2152 return;
2153
2154 old_read_domains = obj->base.read_domains;
2155 old_write_domain = obj->base.write_domain;
2156
2157 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2158 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2159
2160 trace_i915_gem_object_change_domain(obj,
2161 old_read_domains,
2162 old_write_domain);
2163}
2164
2138/** 2165/**
2139 * Unbinds an object from the GTT aperture. 2166 * Unbinds an object from the GTT aperture.
2140 */ 2167 */
@@ -2151,23 +2178,28 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2151 return -EINVAL; 2178 return -EINVAL;
2152 } 2179 }
2153 2180
2154 /* blow away mappings if mapped through GTT */ 2181 ret = i915_gem_object_finish_gpu(obj);
2155 i915_gem_release_mmap(obj);
2156
2157 /* Move the object to the CPU domain to ensure that
2158 * any possible CPU writes while it's not in the GTT
2159 * are flushed when we go to remap it. This will
2160 * also ensure that all pending GPU writes are finished
2161 * before we unbind.
2162 */
2163 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2164 if (ret == -ERESTARTSYS) 2182 if (ret == -ERESTARTSYS)
2165 return ret; 2183 return ret;
2166 /* Continue on if we fail due to EIO, the GPU is hung so we 2184 /* Continue on if we fail due to EIO, the GPU is hung so we
2167 * should be safe and we need to cleanup or else we might 2185 * should be safe and we need to cleanup or else we might
2168 * cause memory corruption through use-after-free. 2186 * cause memory corruption through use-after-free.
2169 */ 2187 */
2188
2189 i915_gem_object_finish_gtt(obj);
2190
2191 /* Move the object to the CPU domain to ensure that
2192 * any possible CPU writes while it's not in the GTT
2193 * are flushed when we go to remap it.
2194 */
2195 if (ret == 0)
2196 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2197 if (ret == -ERESTARTSYS)
2198 return ret;
2170 if (ret) { 2199 if (ret) {
2200 /* In the event of a disaster, abandon all caches and
2201 * hope for the best.
2202 */
2171 i915_gem_clflush_object(obj); 2203 i915_gem_clflush_object(obj);
2172 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; 2204 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2173 } 2205 }
@@ -2996,51 +3028,139 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2996 return 0; 3028 return 0;
2997} 3029}
2998 3030
3031int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3032 enum i915_cache_level cache_level)
3033{
3034 int ret;
3035
3036 if (obj->cache_level == cache_level)
3037 return 0;
3038
3039 if (obj->pin_count) {
3040 DRM_DEBUG("can not change the cache level of pinned objects\n");
3041 return -EBUSY;
3042 }
3043
3044 if (obj->gtt_space) {
3045 ret = i915_gem_object_finish_gpu(obj);
3046 if (ret)
3047 return ret;
3048
3049 i915_gem_object_finish_gtt(obj);
3050
3051 /* Before SandyBridge, you could not use tiling or fence
3052 * registers with snooped memory, so relinquish any fences
3053 * currently pointing to our region in the aperture.
3054 */
3055 if (INTEL_INFO(obj->base.dev)->gen < 6) {
3056 ret = i915_gem_object_put_fence(obj);
3057 if (ret)
3058 return ret;
3059 }
3060
3061 i915_gem_gtt_rebind_object(obj, cache_level);
3062 }
3063
3064 if (cache_level == I915_CACHE_NONE) {
3065 u32 old_read_domains, old_write_domain;
3066
3067 /* If we're coming from LLC cached, then we haven't
3068 * actually been tracking whether the data is in the
3069 * CPU cache or not, since we only allow one bit set
3070 * in obj->write_domain and have been skipping the clflushes.
3071 * Just set it to the CPU cache for now.
3072 */
3073 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3074 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3075
3076 old_read_domains = obj->base.read_domains;
3077 old_write_domain = obj->base.write_domain;
3078
3079 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3080 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3081
3082 trace_i915_gem_object_change_domain(obj,
3083 old_read_domains,
3084 old_write_domain);
3085 }
3086
3087 obj->cache_level = cache_level;
3088 return 0;
3089}
3090
2999/* 3091/*
3000 * Prepare buffer for display plane. Use uninterruptible for possible flush 3092 * Prepare buffer for display plane (scanout, cursors, etc).
3001 * wait, as in modesetting process we're not supposed to be interrupted. 3093 * Can be called from an uninterruptible phase (modesetting) and allows
3094 * any flushes to be pipelined (for pageflips).
3095 *
3096 * For the display plane, we want to be in the GTT but out of any write
3097 * domains. So in many ways this looks like set_to_gtt_domain() apart from the
3098 * ability to pipeline the waits, pinning and any additional subtleties
3099 * that may differentiate the display plane from ordinary buffers.
3002 */ 3100 */
3003int 3101int
3004i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, 3102i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3103 u32 alignment,
3005 struct intel_ring_buffer *pipelined) 3104 struct intel_ring_buffer *pipelined)
3006{ 3105{
3007 uint32_t old_read_domains; 3106 u32 old_read_domains, old_write_domain;
3008 int ret; 3107 int ret;
3009 3108
3010 /* Not valid to be called on unbound objects. */
3011 if (obj->gtt_space == NULL)
3012 return -EINVAL;
3013
3014 ret = i915_gem_object_flush_gpu_write_domain(obj); 3109 ret = i915_gem_object_flush_gpu_write_domain(obj);
3015 if (ret) 3110 if (ret)
3016 return ret; 3111 return ret;
3017 3112
3018
3019 /* Currently, we are always called from an non-interruptible context. */
3020 if (pipelined != obj->ring) { 3113 if (pipelined != obj->ring) {
3021 ret = i915_gem_object_wait_rendering(obj); 3114 ret = i915_gem_object_wait_rendering(obj);
3022 if (ret) 3115 if (ret)
3023 return ret; 3116 return ret;
3024 } 3117 }
3025 3118
3119 /* The display engine is not coherent with the LLC cache on gen6. As
3120 * a result, we make sure that the pinning that is about to occur is
3121 * done with uncached PTEs. This is lowest common denominator for all
3122 * chipsets.
3123 *
3124 * However for gen6+, we could do better by using the GFDT bit instead
3125 * of uncaching, which would allow us to flush all the LLC-cached data
3126 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3127 */
3128 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3129 if (ret)
3130 return ret;
3131
3132 /* As the user may map the buffer once pinned in the display plane
3133 * (e.g. libkms for the bootup splash), we have to ensure that we
3134 * always use map_and_fenceable for all scanout buffers.
3135 */
3136 ret = i915_gem_object_pin(obj, alignment, true);
3137 if (ret)
3138 return ret;
3139
3026 i915_gem_object_flush_cpu_write_domain(obj); 3140 i915_gem_object_flush_cpu_write_domain(obj);
3027 3141
3142 old_write_domain = obj->base.write_domain;
3028 old_read_domains = obj->base.read_domains; 3143 old_read_domains = obj->base.read_domains;
3144
3145 /* It should now be out of any other write domains, and we can update
3146 * the domain values for our changes.
3147 */
3148 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3029 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 3149 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3030 3150
3031 trace_i915_gem_object_change_domain(obj, 3151 trace_i915_gem_object_change_domain(obj,
3032 old_read_domains, 3152 old_read_domains,
3033 obj->base.write_domain); 3153 old_write_domain);
3034 3154
3035 return 0; 3155 return 0;
3036} 3156}
3037 3157
3038int 3158int
3039i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj) 3159i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3040{ 3160{
3041 int ret; 3161 int ret;
3042 3162
3043 if (!obj->active) 3163 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3044 return 0; 3164 return 0;
3045 3165
3046 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { 3166 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
@@ -3049,6 +3169,9 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
3049 return ret; 3169 return ret;
3050 } 3170 }
3051 3171
3172 /* Ensure that we invalidate the GPU's caches and TLBs. */
3173 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3174
3052 return i915_gem_object_wait_rendering(obj); 3175 return i915_gem_object_wait_rendering(obj);
3053} 3176}
3054 3177
@@ -3575,7 +3698,23 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3575 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3698 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3576 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 3699 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3577 3700
3578 obj->cache_level = I915_CACHE_NONE; 3701 if (IS_GEN6(dev)) {
3702 /* On Gen6, we can have the GPU use the LLC (the CPU
3703 * cache) for about a 10% performance improvement
3704 * compared to uncached. Graphics requests other than
3705 * display scanout are coherent with the CPU in
3706 * accessing this cache. This means in this mode we
3707 * don't need to clflush on the CPU side, and on the
3708 * GPU side we only need to flush internal caches to
3709 * get data visible to the CPU.
3710 *
3711 * However, we maintain the display planes as UC, and so
3712 * need to rebind when first used as such.
3713 */
3714 obj->cache_level = I915_CACHE_LLC;
3715 } else
3716 obj->cache_level = I915_CACHE_NONE;
3717
3579 obj->base.driver_private = NULL; 3718 obj->base.driver_private = NULL;
3580 obj->fence_reg = I915_FENCE_REG_NONE; 3719 obj->fence_reg = I915_FENCE_REG_NONE;
3581 INIT_LIST_HEAD(&obj->mm_list); 3720 INIT_LIST_HEAD(&obj->mm_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e46b645773cf..7a709cd8d543 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -59,24 +59,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
59 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); 59 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
60 60
61 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 61 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
62 unsigned int agp_type =
63 cache_level_to_agp_type(dev, obj->cache_level);
64
65 i915_gem_clflush_object(obj); 62 i915_gem_clflush_object(obj);
66 63 i915_gem_gtt_rebind_object(obj, obj->cache_level);
67 if (dev_priv->mm.gtt->needs_dmar) {
68 BUG_ON(!obj->sg_list);
69
70 intel_gtt_insert_sg_entries(obj->sg_list,
71 obj->num_sg,
72 obj->gtt_space->start >> PAGE_SHIFT,
73 agp_type);
74 } else
75 intel_gtt_insert_pages(obj->gtt_space->start
76 >> PAGE_SHIFT,
77 obj->base.size >> PAGE_SHIFT,
78 obj->pages,
79 agp_type);
80 } 64 }
81 65
82 intel_gtt_chipset_flush(); 66 intel_gtt_chipset_flush();
@@ -110,6 +94,27 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
110 return 0; 94 return 0;
111} 95}
112 96
97void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
98 enum i915_cache_level cache_level)
99{
100 struct drm_device *dev = obj->base.dev;
101 struct drm_i915_private *dev_priv = dev->dev_private;
102 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
103
104 if (dev_priv->mm.gtt->needs_dmar) {
105 BUG_ON(!obj->sg_list);
106
107 intel_gtt_insert_sg_entries(obj->sg_list,
108 obj->num_sg,
109 obj->gtt_space->start >> PAGE_SHIFT,
110 agp_type);
111 } else
112 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
113 obj->base.size >> PAGE_SHIFT,
114 obj->pages,
115 agp_type);
116}
117
113void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) 118void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
114{ 119{
115 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, 120 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3b03f85ea627..23d1ae67d279 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -361,10 +361,12 @@ static void notify_ring(struct drm_device *dev,
361 361
362 ring->irq_seqno = seqno; 362 ring->irq_seqno = seqno;
363 wake_up_all(&ring->irq_queue); 363 wake_up_all(&ring->irq_queue);
364 364 if (i915_enable_hangcheck) {
365 dev_priv->hangcheck_count = 0; 365 dev_priv->hangcheck_count = 0;
366 mod_timer(&dev_priv->hangcheck_timer, 366 mod_timer(&dev_priv->hangcheck_timer,
367 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); 367 jiffies +
368 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
369 }
368} 370}
369 371
370static void gen6_pm_rps_work(struct work_struct *work) 372static void gen6_pm_rps_work(struct work_struct *work)
@@ -1664,6 +1666,9 @@ void i915_hangcheck_elapsed(unsigned long data)
1664 uint32_t acthd, instdone, instdone1; 1666 uint32_t acthd, instdone, instdone1;
1665 bool err = false; 1667 bool err = false;
1666 1668
1669 if (!i915_enable_hangcheck)
1670 return;
1671
1667 /* If all work is done then ACTHD clearly hasn't advanced. */ 1672 /* If all work is done then ACTHD clearly hasn't advanced. */
1668 if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) && 1673 if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
1669 i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) && 1674 i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5d5def756c9e..02db299f621a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -579,6 +579,7 @@
579#define DPFC_CTL_PLANEA (0<<30) 579#define DPFC_CTL_PLANEA (0<<30)
580#define DPFC_CTL_PLANEB (1<<30) 580#define DPFC_CTL_PLANEB (1<<30)
581#define DPFC_CTL_FENCE_EN (1<<29) 581#define DPFC_CTL_FENCE_EN (1<<29)
582#define DPFC_CTL_PERSISTENT_MODE (1<<25)
582#define DPFC_SR_EN (1<<10) 583#define DPFC_SR_EN (1<<10)
583#define DPFC_CTL_LIMIT_1X (0<<6) 584#define DPFC_CTL_LIMIT_1X (0<<6)
584#define DPFC_CTL_LIMIT_2X (1<<6) 585#define DPFC_CTL_LIMIT_2X (1<<6)
@@ -3360,6 +3361,7 @@
3360#define FORCEWAKE_ACK 0x130090 3361#define FORCEWAKE_ACK 0x130090
3361 3362
3362#define GT_FIFO_FREE_ENTRIES 0x120008 3363#define GT_FIFO_FREE_ENTRIES 0x120008
3364#define GT_FIFO_NUM_RESERVED_ENTRIES 20
3363 3365
3364#define GEN6_RPNSWREQ 0xA008 3366#define GEN6_RPNSWREQ 0xA008
3365#define GEN6_TURBO_DISABLE (1<<31) 3367#define GEN6_TURBO_DISABLE (1<<31)
@@ -3434,7 +3436,9 @@
3434#define GEN6_PCODE_MAILBOX 0x138124 3436#define GEN6_PCODE_MAILBOX 0x138124
3435#define GEN6_PCODE_READY (1<<31) 3437#define GEN6_PCODE_READY (1<<31)
3436#define GEN6_READ_OC_PARAMS 0xc 3438#define GEN6_READ_OC_PARAMS 0xc
3437#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9 3439#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
3440#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
3438#define GEN6_PCODE_DATA 0x138128 3441#define GEN6_PCODE_DATA 0x138128
3442#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
3439 3443
3440#endif /* _I915_REG_H_ */ 3444#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 5257cfc34c35..285758603ac8 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -760,15 +760,13 @@ static void i915_restore_display(struct drm_device *dev)
760 /* FIXME: restore TV & SDVO state */ 760 /* FIXME: restore TV & SDVO state */
761 761
762 /* only restore FBC info on the platform that supports FBC*/ 762 /* only restore FBC info on the platform that supports FBC*/
763 intel_disable_fbc(dev);
763 if (I915_HAS_FBC(dev)) { 764 if (I915_HAS_FBC(dev)) {
764 if (HAS_PCH_SPLIT(dev)) { 765 if (HAS_PCH_SPLIT(dev)) {
765 ironlake_disable_fbc(dev);
766 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); 766 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
767 } else if (IS_GM45(dev)) { 767 } else if (IS_GM45(dev)) {
768 g4x_disable_fbc(dev);
769 I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); 768 I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
770 } else { 769 } else {
771 i8xx_disable_fbc(dev);
772 I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); 770 I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
773 I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); 771 I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
774 I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); 772 I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
@@ -878,8 +876,10 @@ int i915_restore_state(struct drm_device *dev)
878 intel_init_emon(dev); 876 intel_init_emon(dev);
879 } 877 }
880 878
881 if (IS_GEN6(dev)) 879 if (IS_GEN6(dev)) {
882 gen6_enable_rps(dev_priv); 880 gen6_enable_rps(dev_priv);
881 gen6_update_ring_freq(dev_priv);
882 }
883 883
884 mutex_lock(&dev->struct_mutex); 884 mutex_lock(&dev->struct_mutex);
885 885
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 927442a11925..61abef8a8119 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -74,7 +74,7 @@ get_blocksize(void *p)
74 74
75static void 75static void
76fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, 76fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
77 struct lvds_dvo_timing *dvo_timing) 77 const struct lvds_dvo_timing *dvo_timing)
78{ 78{
79 panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) | 79 panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
80 dvo_timing->hactive_lo; 80 dvo_timing->hactive_lo;
@@ -115,20 +115,75 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
115 drm_mode_set_name(panel_fixed_mode); 115 drm_mode_set_name(panel_fixed_mode);
116} 116}
117 117
118static bool
119lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a,
120 const struct lvds_dvo_timing *b)
121{
122 if (a->hactive_hi != b->hactive_hi ||
123 a->hactive_lo != b->hactive_lo)
124 return false;
125
126 if (a->hsync_off_hi != b->hsync_off_hi ||
127 a->hsync_off_lo != b->hsync_off_lo)
128 return false;
129
130 if (a->hsync_pulse_width != b->hsync_pulse_width)
131 return false;
132
133 if (a->hblank_hi != b->hblank_hi ||
134 a->hblank_lo != b->hblank_lo)
135 return false;
136
137 if (a->vactive_hi != b->vactive_hi ||
138 a->vactive_lo != b->vactive_lo)
139 return false;
140
141 if (a->vsync_off != b->vsync_off)
142 return false;
143
144 if (a->vsync_pulse_width != b->vsync_pulse_width)
145 return false;
146
147 if (a->vblank_hi != b->vblank_hi ||
148 a->vblank_lo != b->vblank_lo)
149 return false;
150
151 return true;
152}
153
154static const struct lvds_dvo_timing *
155get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
156 const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs,
157 int index)
158{
159 /*
160 * the size of fp_timing varies on the different platform.
161 * So calculate the DVO timing relative offset in LVDS data
162 * entry to get the DVO timing entry
163 */
164
165 int lfp_data_size =
166 lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
167 lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
168 int dvo_timing_offset =
169 lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
170 lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
171 char *entry = (char *)lvds_lfp_data->data + lfp_data_size * index;
172
173 return (struct lvds_dvo_timing *)(entry + dvo_timing_offset);
174}
175
118/* Try to find integrated panel data */ 176/* Try to find integrated panel data */
119static void 177static void
120parse_lfp_panel_data(struct drm_i915_private *dev_priv, 178parse_lfp_panel_data(struct drm_i915_private *dev_priv,
121 struct bdb_header *bdb) 179 struct bdb_header *bdb)
122{ 180{
123 struct bdb_lvds_options *lvds_options; 181 const struct bdb_lvds_options *lvds_options;
124 struct bdb_lvds_lfp_data *lvds_lfp_data; 182 const struct bdb_lvds_lfp_data *lvds_lfp_data;
125 struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; 183 const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
126 struct bdb_lvds_lfp_data_entry *entry; 184 const struct lvds_dvo_timing *panel_dvo_timing;
127 struct lvds_dvo_timing *dvo_timing;
128 struct drm_display_mode *panel_fixed_mode; 185 struct drm_display_mode *panel_fixed_mode;
129 int lfp_data_size, dvo_timing_offset; 186 int i, downclock;
130 int i, temp_downclock;
131 struct drm_display_mode *temp_mode;
132 187
133 lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); 188 lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
134 if (!lvds_options) 189 if (!lvds_options)
@@ -150,75 +205,44 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
150 205
151 dev_priv->lvds_vbt = 1; 206 dev_priv->lvds_vbt = 1;
152 207
153 lfp_data_size = lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset - 208 panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
154 lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset; 209 lvds_lfp_data_ptrs,
155 entry = (struct bdb_lvds_lfp_data_entry *) 210 lvds_options->panel_type);
156 ((uint8_t *)lvds_lfp_data->data + (lfp_data_size *
157 lvds_options->panel_type));
158 dvo_timing_offset = lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
159 lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
160
161 /*
162 * the size of fp_timing varies on the different platform.
163 * So calculate the DVO timing relative offset in LVDS data
164 * entry to get the DVO timing entry
165 */
166 dvo_timing = (struct lvds_dvo_timing *)
167 ((unsigned char *)entry + dvo_timing_offset);
168 211
169 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); 212 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
170 if (!panel_fixed_mode) 213 if (!panel_fixed_mode)
171 return; 214 return;
172 215
173 fill_detail_timing_data(panel_fixed_mode, dvo_timing); 216 fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
174 217
175 dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode; 218 dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
176 219
177 DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n"); 220 DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
178 drm_mode_debug_printmodeline(panel_fixed_mode); 221 drm_mode_debug_printmodeline(panel_fixed_mode);
179 222
180 temp_mode = kzalloc(sizeof(*temp_mode), GFP_KERNEL);
181 temp_downclock = panel_fixed_mode->clock;
182 /* 223 /*
183 * enumerate the LVDS panel timing info entry in VBT to check whether 224 * Iterate over the LVDS panel timing info to find the lowest clock
184 * the LVDS downclock is found. 225 * for the native resolution.
185 */ 226 */
227 downclock = panel_dvo_timing->clock;
186 for (i = 0; i < 16; i++) { 228 for (i = 0; i < 16; i++) {
187 entry = (struct bdb_lvds_lfp_data_entry *) 229 const struct lvds_dvo_timing *dvo_timing;
188 ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * i)); 230
189 dvo_timing = (struct lvds_dvo_timing *) 231 dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
190 ((unsigned char *)entry + dvo_timing_offset); 232 lvds_lfp_data_ptrs,
191 233 i);
192 fill_detail_timing_data(temp_mode, dvo_timing); 234 if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) &&
193 235 dvo_timing->clock < downclock)
194 if (temp_mode->hdisplay == panel_fixed_mode->hdisplay && 236 downclock = dvo_timing->clock;
195 temp_mode->hsync_start == panel_fixed_mode->hsync_start &&
196 temp_mode->hsync_end == panel_fixed_mode->hsync_end &&
197 temp_mode->htotal == panel_fixed_mode->htotal &&
198 temp_mode->vdisplay == panel_fixed_mode->vdisplay &&
199 temp_mode->vsync_start == panel_fixed_mode->vsync_start &&
200 temp_mode->vsync_end == panel_fixed_mode->vsync_end &&
201 temp_mode->vtotal == panel_fixed_mode->vtotal &&
202 temp_mode->clock < temp_downclock) {
203 /*
204 * downclock is already found. But we expect
205 * to find the lower downclock.
206 */
207 temp_downclock = temp_mode->clock;
208 }
209 /* clear it to zero */
210 memset(temp_mode, 0, sizeof(*temp_mode));
211 } 237 }
212 kfree(temp_mode); 238
213 if (temp_downclock < panel_fixed_mode->clock && 239 if (downclock < panel_dvo_timing->clock && i915_lvds_downclock) {
214 i915_lvds_downclock) {
215 dev_priv->lvds_downclock_avail = 1; 240 dev_priv->lvds_downclock_avail = 1;
216 dev_priv->lvds_downclock = temp_downclock; 241 dev_priv->lvds_downclock = downclock * 10;
217 DRM_DEBUG_KMS("LVDS downclock is found in VBT. " 242 DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
218 "Normal Clock %dKHz, downclock %dKHz\n", 243 "Normal Clock %dKHz, downclock %dKHz\n",
219 temp_downclock, panel_fixed_mode->clock); 244 panel_fixed_mode->clock, 10*downclock);
220 } 245 }
221 return;
222} 246}
223 247
224/* Try to find sdvo panel data */ 248/* Try to find sdvo panel data */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 0f1c799afea1..393a39922e53 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,6 +24,7 @@
24 * Eric Anholt <eric@anholt.net> 24 * Eric Anholt <eric@anholt.net>
25 */ 25 */
26 26
27#include <linux/cpufreq.h>
27#include <linux/module.h> 28#include <linux/module.h>
28#include <linux/input.h> 29#include <linux/input.h>
29#include <linux/i2c.h> 30#include <linux/i2c.h>
@@ -1157,12 +1158,15 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1157 1158
1158 reg = TRANSCONF(pipe); 1159 reg = TRANSCONF(pipe);
1159 val = I915_READ(reg); 1160 val = I915_READ(reg);
1160 /* 1161
1161 * make the BPC in transcoder be consistent with 1162 if (HAS_PCH_IBX(dev_priv->dev)) {
1162 * that in pipeconf reg. 1163 /*
1163 */ 1164 * make the BPC in transcoder be consistent with
1164 val &= ~PIPE_BPC_MASK; 1165 * that in pipeconf reg.
1165 val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; 1166 */
1167 val &= ~PIPE_BPC_MASK;
1168 val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
1169 }
1166 I915_WRITE(reg, val | TRANS_ENABLE); 1170 I915_WRITE(reg, val | TRANS_ENABLE);
1167 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) 1171 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1168 DRM_ERROR("failed to enable transcoder %d\n", pipe); 1172 DRM_ERROR("failed to enable transcoder %d\n", pipe);
@@ -1380,6 +1384,28 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1380 disable_pch_hdmi(dev_priv, pipe, HDMID); 1384 disable_pch_hdmi(dev_priv, pipe, HDMID);
1381} 1385}
1382 1386
1387static void i8xx_disable_fbc(struct drm_device *dev)
1388{
1389 struct drm_i915_private *dev_priv = dev->dev_private;
1390 u32 fbc_ctl;
1391
1392 /* Disable compression */
1393 fbc_ctl = I915_READ(FBC_CONTROL);
1394 if ((fbc_ctl & FBC_CTL_EN) == 0)
1395 return;
1396
1397 fbc_ctl &= ~FBC_CTL_EN;
1398 I915_WRITE(FBC_CONTROL, fbc_ctl);
1399
1400 /* Wait for compressing bit to clear */
1401 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
1402 DRM_DEBUG_KMS("FBC idle timed out\n");
1403 return;
1404 }
1405
1406 DRM_DEBUG_KMS("disabled FBC\n");
1407}
1408
1383static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1409static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1384{ 1410{
1385 struct drm_device *dev = crtc->dev; 1411 struct drm_device *dev = crtc->dev;
@@ -1388,36 +1414,25 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1388 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1414 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1389 struct drm_i915_gem_object *obj = intel_fb->obj; 1415 struct drm_i915_gem_object *obj = intel_fb->obj;
1390 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1416 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1417 int cfb_pitch;
1391 int plane, i; 1418 int plane, i;
1392 u32 fbc_ctl, fbc_ctl2; 1419 u32 fbc_ctl, fbc_ctl2;
1393 1420
1394 if (fb->pitch == dev_priv->cfb_pitch && 1421 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1395 obj->fence_reg == dev_priv->cfb_fence && 1422 if (fb->pitch < cfb_pitch)
1396 intel_crtc->plane == dev_priv->cfb_plane && 1423 cfb_pitch = fb->pitch;
1397 I915_READ(FBC_CONTROL) & FBC_CTL_EN)
1398 return;
1399
1400 i8xx_disable_fbc(dev);
1401
1402 dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1403
1404 if (fb->pitch < dev_priv->cfb_pitch)
1405 dev_priv->cfb_pitch = fb->pitch;
1406 1424
1407 /* FBC_CTL wants 64B units */ 1425 /* FBC_CTL wants 64B units */
1408 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; 1426 cfb_pitch = (cfb_pitch / 64) - 1;
1409 dev_priv->cfb_fence = obj->fence_reg; 1427 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1410 dev_priv->cfb_plane = intel_crtc->plane;
1411 plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1412 1428
1413 /* Clear old tags */ 1429 /* Clear old tags */
1414 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) 1430 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1415 I915_WRITE(FBC_TAG + (i * 4), 0); 1431 I915_WRITE(FBC_TAG + (i * 4), 0);
1416 1432
1417 /* Set it up... */ 1433 /* Set it up... */
1418 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; 1434 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
1419 if (obj->tiling_mode != I915_TILING_NONE) 1435 fbc_ctl2 |= plane;
1420 fbc_ctl2 |= FBC_CTL_CPU_FENCE;
1421 I915_WRITE(FBC_CONTROL2, fbc_ctl2); 1436 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1422 I915_WRITE(FBC_FENCE_OFF, crtc->y); 1437 I915_WRITE(FBC_FENCE_OFF, crtc->y);
1423 1438
@@ -1425,36 +1440,13 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1425 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; 1440 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1426 if (IS_I945GM(dev)) 1441 if (IS_I945GM(dev))
1427 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ 1442 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1428 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 1443 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1429 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; 1444 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1430 if (obj->tiling_mode != I915_TILING_NONE) 1445 fbc_ctl |= obj->fence_reg;
1431 fbc_ctl |= dev_priv->cfb_fence;
1432 I915_WRITE(FBC_CONTROL, fbc_ctl);
1433
1434 DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
1435 dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
1436}
1437
1438void i8xx_disable_fbc(struct drm_device *dev)
1439{
1440 struct drm_i915_private *dev_priv = dev->dev_private;
1441 u32 fbc_ctl;
1442
1443 /* Disable compression */
1444 fbc_ctl = I915_READ(FBC_CONTROL);
1445 if ((fbc_ctl & FBC_CTL_EN) == 0)
1446 return;
1447
1448 fbc_ctl &= ~FBC_CTL_EN;
1449 I915_WRITE(FBC_CONTROL, fbc_ctl); 1446 I915_WRITE(FBC_CONTROL, fbc_ctl);
1450 1447
1451 /* Wait for compressing bit to clear */ 1448 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
1452 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { 1449 cfb_pitch, crtc->y, intel_crtc->plane);
1453 DRM_DEBUG_KMS("FBC idle timed out\n");
1454 return;
1455 }
1456
1457 DRM_DEBUG_KMS("disabled FBC\n");
1458} 1450}
1459 1451
1460static bool i8xx_fbc_enabled(struct drm_device *dev) 1452static bool i8xx_fbc_enabled(struct drm_device *dev)
@@ -1476,30 +1468,9 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1476 unsigned long stall_watermark = 200; 1468 unsigned long stall_watermark = 200;
1477 u32 dpfc_ctl; 1469 u32 dpfc_ctl;
1478 1470
1479 dpfc_ctl = I915_READ(DPFC_CONTROL);
1480 if (dpfc_ctl & DPFC_CTL_EN) {
1481 if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
1482 dev_priv->cfb_fence == obj->fence_reg &&
1483 dev_priv->cfb_plane == intel_crtc->plane &&
1484 dev_priv->cfb_y == crtc->y)
1485 return;
1486
1487 I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
1488 intel_wait_for_vblank(dev, intel_crtc->pipe);
1489 }
1490
1491 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1492 dev_priv->cfb_fence = obj->fence_reg;
1493 dev_priv->cfb_plane = intel_crtc->plane;
1494 dev_priv->cfb_y = crtc->y;
1495
1496 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; 1471 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1497 if (obj->tiling_mode != I915_TILING_NONE) { 1472 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
1498 dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; 1473 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1499 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1500 } else {
1501 I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
1502 }
1503 1474
1504 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | 1475 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1505 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | 1476 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
@@ -1512,7 +1483,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1512 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); 1483 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1513} 1484}
1514 1485
1515void g4x_disable_fbc(struct drm_device *dev) 1486static void g4x_disable_fbc(struct drm_device *dev)
1516{ 1487{
1517 struct drm_i915_private *dev_priv = dev->dev_private; 1488 struct drm_i915_private *dev_priv = dev->dev_private;
1518 u32 dpfc_ctl; 1489 u32 dpfc_ctl;
@@ -1567,32 +1538,12 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1567 u32 dpfc_ctl; 1538 u32 dpfc_ctl;
1568 1539
1569 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 1540 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1570 if (dpfc_ctl & DPFC_CTL_EN) {
1571 if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
1572 dev_priv->cfb_fence == obj->fence_reg &&
1573 dev_priv->cfb_plane == intel_crtc->plane &&
1574 dev_priv->cfb_offset == obj->gtt_offset &&
1575 dev_priv->cfb_y == crtc->y)
1576 return;
1577
1578 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
1579 intel_wait_for_vblank(dev, intel_crtc->pipe);
1580 }
1581
1582 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1583 dev_priv->cfb_fence = obj->fence_reg;
1584 dev_priv->cfb_plane = intel_crtc->plane;
1585 dev_priv->cfb_offset = obj->gtt_offset;
1586 dev_priv->cfb_y = crtc->y;
1587
1588 dpfc_ctl &= DPFC_RESERVED; 1541 dpfc_ctl &= DPFC_RESERVED;
1589 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); 1542 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1590 if (obj->tiling_mode != I915_TILING_NONE) { 1543 /* Set persistent mode for front-buffer rendering, ala X. */
1591 dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); 1544 dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
1592 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); 1545 dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
1593 } else { 1546 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1594 I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY);
1595 }
1596 1547
1597 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | 1548 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1598 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | 1549 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
@@ -1604,7 +1555,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1604 1555
1605 if (IS_GEN6(dev)) { 1556 if (IS_GEN6(dev)) {
1606 I915_WRITE(SNB_DPFC_CTL_SA, 1557 I915_WRITE(SNB_DPFC_CTL_SA,
1607 SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence); 1558 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
1608 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); 1559 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1609 sandybridge_blit_fbc_update(dev); 1560 sandybridge_blit_fbc_update(dev);
1610 } 1561 }
@@ -1612,7 +1563,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1612 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); 1563 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1613} 1564}
1614 1565
1615void ironlake_disable_fbc(struct drm_device *dev) 1566static void ironlake_disable_fbc(struct drm_device *dev)
1616{ 1567{
1617 struct drm_i915_private *dev_priv = dev->dev_private; 1568 struct drm_i915_private *dev_priv = dev->dev_private;
1618 u32 dpfc_ctl; 1569 u32 dpfc_ctl;
@@ -1644,24 +1595,109 @@ bool intel_fbc_enabled(struct drm_device *dev)
1644 return dev_priv->display.fbc_enabled(dev); 1595 return dev_priv->display.fbc_enabled(dev);
1645} 1596}
1646 1597
1647void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1598static void intel_fbc_work_fn(struct work_struct *__work)
1648{ 1599{
1649 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 1600 struct intel_fbc_work *work =
1601 container_of(to_delayed_work(__work),
1602 struct intel_fbc_work, work);
1603 struct drm_device *dev = work->crtc->dev;
1604 struct drm_i915_private *dev_priv = dev->dev_private;
1605
1606 mutex_lock(&dev->struct_mutex);
1607 if (work == dev_priv->fbc_work) {
1608 /* Double check that we haven't switched fb without cancelling
1609 * the prior work.
1610 */
1611 if (work->crtc->fb == work->fb) {
1612 dev_priv->display.enable_fbc(work->crtc,
1613 work->interval);
1614
1615 dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
1616 dev_priv->cfb_fb = work->crtc->fb->base.id;
1617 dev_priv->cfb_y = work->crtc->y;
1618 }
1619
1620 dev_priv->fbc_work = NULL;
1621 }
1622 mutex_unlock(&dev->struct_mutex);
1623
1624 kfree(work);
1625}
1626
1627static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
1628{
1629 if (dev_priv->fbc_work == NULL)
1630 return;
1631
1632 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
1633
1634 /* Synchronisation is provided by struct_mutex and checking of
1635 * dev_priv->fbc_work, so we can perform the cancellation
1636 * entirely asynchronously.
1637 */
1638 if (cancel_delayed_work(&dev_priv->fbc_work->work))
1639 /* tasklet was killed before being run, clean up */
1640 kfree(dev_priv->fbc_work);
1641
1642 /* Mark the work as no longer wanted so that if it does
1643 * wake-up (because the work was already running and waiting
1644 * for our mutex), it will discover that is no longer
1645 * necessary to run.
1646 */
1647 dev_priv->fbc_work = NULL;
1648}
1649
1650static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1651{
1652 struct intel_fbc_work *work;
1653 struct drm_device *dev = crtc->dev;
1654 struct drm_i915_private *dev_priv = dev->dev_private;
1650 1655
1651 if (!dev_priv->display.enable_fbc) 1656 if (!dev_priv->display.enable_fbc)
1652 return; 1657 return;
1653 1658
1654 dev_priv->display.enable_fbc(crtc, interval); 1659 intel_cancel_fbc_work(dev_priv);
1660
1661 work = kzalloc(sizeof *work, GFP_KERNEL);
1662 if (work == NULL) {
1663 dev_priv->display.enable_fbc(crtc, interval);
1664 return;
1665 }
1666
1667 work->crtc = crtc;
1668 work->fb = crtc->fb;
1669 work->interval = interval;
1670 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
1671
1672 dev_priv->fbc_work = work;
1673
1674 DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
1675
1676 /* Delay the actual enabling to let pageflipping cease and the
1677 * display to settle before starting the compression. Note that
1678 * this delay also serves a second purpose: it allows for a
1679 * vblank to pass after disabling the FBC before we attempt
1680 * to modify the control registers.
1681 *
1682 * A more complicated solution would involve tracking vblanks
1683 * following the termination of the page-flipping sequence
1684 * and indeed performing the enable as a co-routine and not
1685 * waiting synchronously upon the vblank.
1686 */
1687 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
1655} 1688}
1656 1689
1657void intel_disable_fbc(struct drm_device *dev) 1690void intel_disable_fbc(struct drm_device *dev)
1658{ 1691{
1659 struct drm_i915_private *dev_priv = dev->dev_private; 1692 struct drm_i915_private *dev_priv = dev->dev_private;
1660 1693
1694 intel_cancel_fbc_work(dev_priv);
1695
1661 if (!dev_priv->display.disable_fbc) 1696 if (!dev_priv->display.disable_fbc)
1662 return; 1697 return;
1663 1698
1664 dev_priv->display.disable_fbc(dev); 1699 dev_priv->display.disable_fbc(dev);
1700 dev_priv->cfb_plane = -1;
1665} 1701}
1666 1702
1667/** 1703/**
@@ -1760,8 +1796,13 @@ static void intel_update_fbc(struct drm_device *dev)
1760 dev_priv->no_fbc_reason = FBC_BAD_PLANE; 1796 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1761 goto out_disable; 1797 goto out_disable;
1762 } 1798 }
1763 if (obj->tiling_mode != I915_TILING_X) { 1799
1764 DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); 1800 /* The use of a CPU fence is mandatory in order to detect writes
1801 * by the CPU to the scanout and trigger updates to the FBC.
1802 */
1803 if (obj->tiling_mode != I915_TILING_X ||
1804 obj->fence_reg == I915_FENCE_REG_NONE) {
1805 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
1765 dev_priv->no_fbc_reason = FBC_NOT_TILED; 1806 dev_priv->no_fbc_reason = FBC_NOT_TILED;
1766 goto out_disable; 1807 goto out_disable;
1767 } 1808 }
@@ -1770,6 +1811,44 @@ static void intel_update_fbc(struct drm_device *dev)
1770 if (in_dbg_master()) 1811 if (in_dbg_master())
1771 goto out_disable; 1812 goto out_disable;
1772 1813
1814 /* If the scanout has not changed, don't modify the FBC settings.
1815 * Note that we make the fundamental assumption that the fb->obj
1816 * cannot be unpinned (and have its GTT offset and fence revoked)
1817 * without first being decoupled from the scanout and FBC disabled.
1818 */
1819 if (dev_priv->cfb_plane == intel_crtc->plane &&
1820 dev_priv->cfb_fb == fb->base.id &&
1821 dev_priv->cfb_y == crtc->y)
1822 return;
1823
1824 if (intel_fbc_enabled(dev)) {
1825 /* We update FBC along two paths, after changing fb/crtc
1826 * configuration (modeswitching) and after page-flipping
1827 * finishes. For the latter, we know that not only did
1828 * we disable the FBC at the start of the page-flip
1829 * sequence, but also more than one vblank has passed.
1830 *
1831 * For the former case of modeswitching, it is possible
1832 * to switch between two FBC valid configurations
1833 * instantaneously so we do need to disable the FBC
1834 * before we can modify its control registers. We also
1835 * have to wait for the next vblank for that to take
1836 * effect. However, since we delay enabling FBC we can
1837 * assume that a vblank has passed since disabling and
1838 * that we can safely alter the registers in the deferred
1839 * callback.
1840 *
1841 * In the scenario that we go from a valid to invalid
1842 * and then back to valid FBC configuration we have
1843 * no strict enforcement that a vblank occurred since
1844 * disabling the FBC. However, along all current pipe
1845 * disabling paths we do need to wait for a vblank at
1846 * some point. And we wait before enabling FBC anyway.
1847 */
1848 DRM_DEBUG_KMS("disabling active FBC for update\n");
1849 intel_disable_fbc(dev);
1850 }
1851
1773 intel_enable_fbc(crtc, 500); 1852 intel_enable_fbc(crtc, 500);
1774 return; 1853 return;
1775 1854
@@ -1812,14 +1891,10 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
1812 } 1891 }
1813 1892
1814 dev_priv->mm.interruptible = false; 1893 dev_priv->mm.interruptible = false;
1815 ret = i915_gem_object_pin(obj, alignment, true); 1894 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
1816 if (ret) 1895 if (ret)
1817 goto err_interruptible; 1896 goto err_interruptible;
1818 1897
1819 ret = i915_gem_object_set_to_display_plane(obj, pipelined);
1820 if (ret)
1821 goto err_unpin;
1822
1823 /* Install a fence for tiled scan-out. Pre-i965 always needs a 1898 /* Install a fence for tiled scan-out. Pre-i965 always needs a
1824 * fence, whereas 965+ only requires a fence if using 1899 * fence, whereas 965+ only requires a fence if using
1825 * framebuffer compression. For simplicity, we always install 1900 * framebuffer compression. For simplicity, we always install
@@ -1841,10 +1916,8 @@ err_interruptible:
1841 return ret; 1916 return ret;
1842} 1917}
1843 1918
1844/* Assume fb object is pinned & idle & fenced and just update base pointers */ 1919static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1845static int 1920 int x, int y)
1846intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1847 int x, int y, enum mode_set_atomic state)
1848{ 1921{
1849 struct drm_device *dev = crtc->dev; 1922 struct drm_device *dev = crtc->dev;
1850 struct drm_i915_private *dev_priv = dev->dev_private; 1923 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1887,7 +1960,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1887 dspcntr |= DISPPLANE_32BPP_NO_ALPHA; 1960 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
1888 break; 1961 break;
1889 default: 1962 default:
1890 DRM_ERROR("Unknown color depth\n"); 1963 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
1891 return -EINVAL; 1964 return -EINVAL;
1892 } 1965 }
1893 if (INTEL_INFO(dev)->gen >= 4) { 1966 if (INTEL_INFO(dev)->gen >= 4) {
@@ -1897,10 +1970,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1897 dspcntr &= ~DISPPLANE_TILED; 1970 dspcntr &= ~DISPPLANE_TILED;
1898 } 1971 }
1899 1972
1900 if (HAS_PCH_SPLIT(dev))
1901 /* must disable */
1902 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1903
1904 I915_WRITE(reg, dspcntr); 1973 I915_WRITE(reg, dspcntr);
1905 1974
1906 Start = obj->gtt_offset; 1975 Start = obj->gtt_offset;
@@ -1917,6 +1986,99 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1917 I915_WRITE(DSPADDR(plane), Start + Offset); 1986 I915_WRITE(DSPADDR(plane), Start + Offset);
1918 POSTING_READ(reg); 1987 POSTING_READ(reg);
1919 1988
1989 return 0;
1990}
1991
1992static int ironlake_update_plane(struct drm_crtc *crtc,
1993 struct drm_framebuffer *fb, int x, int y)
1994{
1995 struct drm_device *dev = crtc->dev;
1996 struct drm_i915_private *dev_priv = dev->dev_private;
1997 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1998 struct intel_framebuffer *intel_fb;
1999 struct drm_i915_gem_object *obj;
2000 int plane = intel_crtc->plane;
2001 unsigned long Start, Offset;
2002 u32 dspcntr;
2003 u32 reg;
2004
2005 switch (plane) {
2006 case 0:
2007 case 1:
2008 break;
2009 default:
2010 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2011 return -EINVAL;
2012 }
2013
2014 intel_fb = to_intel_framebuffer(fb);
2015 obj = intel_fb->obj;
2016
2017 reg = DSPCNTR(plane);
2018 dspcntr = I915_READ(reg);
2019 /* Mask out pixel format bits in case we change it */
2020 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2021 switch (fb->bits_per_pixel) {
2022 case 8:
2023 dspcntr |= DISPPLANE_8BPP;
2024 break;
2025 case 16:
2026 if (fb->depth != 16)
2027 return -EINVAL;
2028
2029 dspcntr |= DISPPLANE_16BPP;
2030 break;
2031 case 24:
2032 case 32:
2033 if (fb->depth == 24)
2034 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2035 else if (fb->depth == 30)
2036 dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
2037 else
2038 return -EINVAL;
2039 break;
2040 default:
2041 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2042 return -EINVAL;
2043 }
2044
2045 if (obj->tiling_mode != I915_TILING_NONE)
2046 dspcntr |= DISPPLANE_TILED;
2047 else
2048 dspcntr &= ~DISPPLANE_TILED;
2049
2050 /* must disable */
2051 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2052
2053 I915_WRITE(reg, dspcntr);
2054
2055 Start = obj->gtt_offset;
2056 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
2057
2058 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2059 Start, Offset, x, y, fb->pitch);
2060 I915_WRITE(DSPSTRIDE(plane), fb->pitch);
2061 I915_WRITE(DSPSURF(plane), Start);
2062 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2063 I915_WRITE(DSPADDR(plane), Offset);
2064 POSTING_READ(reg);
2065
2066 return 0;
2067}
2068
2069/* Assume fb object is pinned & idle & fenced and just update base pointers */
2070static int
2071intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2072 int x, int y, enum mode_set_atomic state)
2073{
2074 struct drm_device *dev = crtc->dev;
2075 struct drm_i915_private *dev_priv = dev->dev_private;
2076 int ret;
2077
2078 ret = dev_priv->display.update_plane(crtc, fb, x, y);
2079 if (ret)
2080 return ret;
2081
1920 intel_update_fbc(dev); 2082 intel_update_fbc(dev);
1921 intel_increase_pllclock(crtc); 2083 intel_increase_pllclock(crtc);
1922 2084
@@ -1971,7 +2133,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1971 * This should only fail upon a hung GPU, in which case we 2133 * This should only fail upon a hung GPU, in which case we
1972 * can safely continue. 2134 * can safely continue.
1973 */ 2135 */
1974 ret = i915_gem_object_flush_gpu(obj); 2136 ret = i915_gem_object_finish_gpu(obj);
1975 (void) ret; 2137 (void) ret;
1976 } 2138 }
1977 2139
@@ -2622,6 +2784,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2622 /* For PCH DP, enable TRANS_DP_CTL */ 2784 /* For PCH DP, enable TRANS_DP_CTL */
2623 if (HAS_PCH_CPT(dev) && 2785 if (HAS_PCH_CPT(dev) &&
2624 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 2786 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
2787 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
2625 reg = TRANS_DP_CTL(pipe); 2788 reg = TRANS_DP_CTL(pipe);
2626 temp = I915_READ(reg); 2789 temp = I915_READ(reg);
2627 temp &= ~(TRANS_DP_PORT_SEL_MASK | 2790 temp &= ~(TRANS_DP_PORT_SEL_MASK |
@@ -2629,7 +2792,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2629 TRANS_DP_BPC_MASK); 2792 TRANS_DP_BPC_MASK);
2630 temp |= (TRANS_DP_OUTPUT_ENABLE | 2793 temp |= (TRANS_DP_OUTPUT_ENABLE |
2631 TRANS_DP_ENH_FRAMING); 2794 TRANS_DP_ENH_FRAMING);
2632 temp |= TRANS_DP_8BPC; 2795 temp |= bpc << 9; /* same format but at 11:9 */
2633 2796
2634 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) 2797 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2635 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 2798 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
@@ -2732,9 +2895,8 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
2732 2895
2733 intel_disable_plane(dev_priv, plane, pipe); 2896 intel_disable_plane(dev_priv, plane, pipe);
2734 2897
2735 if (dev_priv->cfb_plane == plane && 2898 if (dev_priv->cfb_plane == plane)
2736 dev_priv->display.disable_fbc) 2899 intel_disable_fbc(dev);
2737 dev_priv->display.disable_fbc(dev);
2738 2900
2739 intel_disable_pipe(dev_priv, pipe); 2901 intel_disable_pipe(dev_priv, pipe);
2740 2902
@@ -2898,9 +3060,8 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
2898 intel_crtc_dpms_overlay(intel_crtc, false); 3060 intel_crtc_dpms_overlay(intel_crtc, false);
2899 intel_crtc_update_cursor(crtc, false); 3061 intel_crtc_update_cursor(crtc, false);
2900 3062
2901 if (dev_priv->cfb_plane == plane && 3063 if (dev_priv->cfb_plane == plane)
2902 dev_priv->display.disable_fbc) 3064 intel_disable_fbc(dev);
2903 dev_priv->display.disable_fbc(dev);
2904 3065
2905 intel_disable_plane(dev_priv, plane, pipe); 3066 intel_disable_plane(dev_priv, plane, pipe);
2906 intel_disable_pipe(dev_priv, pipe); 3067 intel_disable_pipe(dev_priv, pipe);
@@ -4309,6 +4470,133 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4309 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 4470 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4310} 4471}
4311 4472
4473/**
4474 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4475 * @crtc: CRTC structure
4476 *
4477 * A pipe may be connected to one or more outputs. Based on the depth of the
4478 * attached framebuffer, choose a good color depth to use on the pipe.
4479 *
4480 * If possible, match the pipe depth to the fb depth. In some cases, this
4481 * isn't ideal, because the connected output supports a lesser or restricted
4482 * set of depths. Resolve that here:
4483 * LVDS typically supports only 6bpc, so clamp down in that case
4484 * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4485 * Displays may support a restricted set as well, check EDID and clamp as
4486 * appropriate.
4487 *
4488 * RETURNS:
4489 * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4490 * true if they don't match).
4491 */
4492static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4493 unsigned int *pipe_bpp)
4494{
4495 struct drm_device *dev = crtc->dev;
4496 struct drm_i915_private *dev_priv = dev->dev_private;
4497 struct drm_encoder *encoder;
4498 struct drm_connector *connector;
4499 unsigned int display_bpc = UINT_MAX, bpc;
4500
4501 /* Walk the encoders & connectors on this crtc, get min bpc */
4502 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4503 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4504
4505 if (encoder->crtc != crtc)
4506 continue;
4507
4508 if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
4509 unsigned int lvds_bpc;
4510
4511 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
4512 LVDS_A3_POWER_UP)
4513 lvds_bpc = 8;
4514 else
4515 lvds_bpc = 6;
4516
4517 if (lvds_bpc < display_bpc) {
4518 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
4519 display_bpc = lvds_bpc;
4520 }
4521 continue;
4522 }
4523
4524 if (intel_encoder->type == INTEL_OUTPUT_EDP) {
4525 /* Use VBT settings if we have an eDP panel */
4526 unsigned int edp_bpc = dev_priv->edp.bpp / 3;
4527
4528 if (edp_bpc < display_bpc) {
4529 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
4530 display_bpc = edp_bpc;
4531 }
4532 continue;
4533 }
4534
4535 /* Not one of the known troublemakers, check the EDID */
4536 list_for_each_entry(connector, &dev->mode_config.connector_list,
4537 head) {
4538 if (connector->encoder != encoder)
4539 continue;
4540
4541 if (connector->display_info.bpc < display_bpc) {
4542 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
4543 display_bpc = connector->display_info.bpc;
4544 }
4545 }
4546
4547 /*
4548 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
4549 * through, clamp it down. (Note: >12bpc will be caught below.)
4550 */
4551 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
4552 if (display_bpc > 8 && display_bpc < 12) {
4553 DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n");
4554 display_bpc = 12;
4555 } else {
4556 DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n");
4557 display_bpc = 8;
4558 }
4559 }
4560 }
4561
4562 /*
4563 * We could just drive the pipe at the highest bpc all the time and
4564 * enable dithering as needed, but that costs bandwidth. So choose
4565 * the minimum value that expresses the full color range of the fb but
4566 * also stays within the max display bpc discovered above.
4567 */
4568
4569 switch (crtc->fb->depth) {
4570 case 8:
4571 bpc = 8; /* since we go through a colormap */
4572 break;
4573 case 15:
4574 case 16:
4575 bpc = 6; /* min is 18bpp */
4576 break;
4577 case 24:
4578 bpc = min((unsigned int)8, display_bpc);
4579 break;
4580 case 30:
4581 bpc = min((unsigned int)10, display_bpc);
4582 break;
4583 case 48:
4584 bpc = min((unsigned int)12, display_bpc);
4585 break;
4586 default:
4587 DRM_DEBUG("unsupported depth, assuming 24 bits\n");
4588 bpc = min((unsigned int)8, display_bpc);
4589 break;
4590 }
4591
4592 DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
4593 bpc, display_bpc);
4594
4595 *pipe_bpp = bpc * 3;
4596
4597 return display_bpc != bpc;
4598}
4599
4312static int i9xx_crtc_mode_set(struct drm_crtc *crtc, 4600static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4313 struct drm_display_mode *mode, 4601 struct drm_display_mode *mode,
4314 struct drm_display_mode *adjusted_mode, 4602 struct drm_display_mode *adjusted_mode,
@@ -4721,7 +5009,9 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4721 struct fdi_m_n m_n = {0}; 5009 struct fdi_m_n m_n = {0};
4722 u32 temp; 5010 u32 temp;
4723 u32 lvds_sync = 0; 5011 u32 lvds_sync = 0;
4724 int target_clock, pixel_multiplier, lane, link_bw, bpp, factor; 5012 int target_clock, pixel_multiplier, lane, link_bw, factor;
5013 unsigned int pipe_bpp;
5014 bool dither;
4725 5015
4726 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 5016 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4727 if (encoder->base.crtc != crtc) 5017 if (encoder->base.crtc != crtc)
@@ -4848,56 +5138,37 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4848 /* determine panel color depth */ 5138 /* determine panel color depth */
4849 temp = I915_READ(PIPECONF(pipe)); 5139 temp = I915_READ(PIPECONF(pipe));
4850 temp &= ~PIPE_BPC_MASK; 5140 temp &= ~PIPE_BPC_MASK;
4851 if (is_lvds) { 5141 dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
4852 /* the BPC will be 6 if it is 18-bit LVDS panel */ 5142 switch (pipe_bpp) {
4853 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) 5143 case 18:
4854 temp |= PIPE_8BPC; 5144 temp |= PIPE_6BPC;
4855 else
4856 temp |= PIPE_6BPC;
4857 } else if (has_edp_encoder) {
4858 switch (dev_priv->edp.bpp/3) {
4859 case 8:
4860 temp |= PIPE_8BPC;
4861 break;
4862 case 10:
4863 temp |= PIPE_10BPC;
4864 break;
4865 case 6:
4866 temp |= PIPE_6BPC;
4867 break;
4868 case 12:
4869 temp |= PIPE_12BPC;
4870 break;
4871 }
4872 } else
4873 temp |= PIPE_8BPC;
4874 I915_WRITE(PIPECONF(pipe), temp);
4875
4876 switch (temp & PIPE_BPC_MASK) {
4877 case PIPE_8BPC:
4878 bpp = 24;
4879 break; 5145 break;
4880 case PIPE_10BPC: 5146 case 24:
4881 bpp = 30; 5147 temp |= PIPE_8BPC;
4882 break; 5148 break;
4883 case PIPE_6BPC: 5149 case 30:
4884 bpp = 18; 5150 temp |= PIPE_10BPC;
4885 break; 5151 break;
4886 case PIPE_12BPC: 5152 case 36:
4887 bpp = 36; 5153 temp |= PIPE_12BPC;
4888 break; 5154 break;
4889 default: 5155 default:
4890 DRM_ERROR("unknown pipe bpc value\n"); 5156 WARN(1, "intel_choose_pipe_bpp returned invalid value\n");
4891 bpp = 24; 5157 temp |= PIPE_8BPC;
5158 pipe_bpp = 24;
5159 break;
4892 } 5160 }
4893 5161
5162 intel_crtc->bpp = pipe_bpp;
5163 I915_WRITE(PIPECONF(pipe), temp);
5164
4894 if (!lane) { 5165 if (!lane) {
4895 /* 5166 /*
4896 * Account for spread spectrum to avoid 5167 * Account for spread spectrum to avoid
4897 * oversubscribing the link. Max center spread 5168 * oversubscribing the link. Max center spread
4898 * is 2.5%; use 5% for safety's sake. 5169 * is 2.5%; use 5% for safety's sake.
4899 */ 5170 */
4900 u32 bps = target_clock * bpp * 21 / 20; 5171 u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
4901 lane = bps / (link_bw * 8) + 1; 5172 lane = bps / (link_bw * 8) + 1;
4902 } 5173 }
4903 5174
@@ -4905,7 +5176,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4905 5176
4906 if (pixel_multiplier > 1) 5177 if (pixel_multiplier > 1)
4907 link_bw *= pixel_multiplier; 5178 link_bw *= pixel_multiplier;
4908 ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); 5179 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5180 &m_n);
4909 5181
4910 /* Ironlake: try to setup display ref clock before DPLL 5182 /* Ironlake: try to setup display ref clock before DPLL
4911 * enabling. This is only under driver's control after 5183 * enabling. This is only under driver's control after
@@ -5108,14 +5380,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5108 I915_WRITE(PCH_LVDS, temp); 5380 I915_WRITE(PCH_LVDS, temp);
5109 } 5381 }
5110 5382
5111 /* set the dithering flag and clear for anything other than a panel. */
5112 pipeconf &= ~PIPECONF_DITHER_EN; 5383 pipeconf &= ~PIPECONF_DITHER_EN;
5113 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; 5384 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5114 if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) { 5385 if ((is_lvds && dev_priv->lvds_dither) || dither) {
5115 pipeconf |= PIPECONF_DITHER_EN; 5386 pipeconf |= PIPECONF_DITHER_EN;
5116 pipeconf |= PIPECONF_DITHER_TYPE_ST1; 5387 pipeconf |= PIPECONF_DITHER_TYPE_ST1;
5117 } 5388 }
5118
5119 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5389 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5120 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5390 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5121 } else { 5391 } else {
@@ -5435,21 +5705,15 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
5435 goto fail_locked; 5705 goto fail_locked;
5436 } 5706 }
5437 5707
5438 ret = i915_gem_object_pin(obj, PAGE_SIZE, true); 5708 ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
5439 if (ret) {
5440 DRM_ERROR("failed to pin cursor bo\n");
5441 goto fail_locked;
5442 }
5443
5444 ret = i915_gem_object_set_to_gtt_domain(obj, 0);
5445 if (ret) { 5709 if (ret) {
5446 DRM_ERROR("failed to move cursor bo into the GTT\n"); 5710 DRM_ERROR("failed to move cursor bo into the GTT\n");
5447 goto fail_unpin; 5711 goto fail_locked;
5448 } 5712 }
5449 5713
5450 ret = i915_gem_object_put_fence(obj); 5714 ret = i915_gem_object_put_fence(obj);
5451 if (ret) { 5715 if (ret) {
5452 DRM_ERROR("failed to move cursor bo into the GTT\n"); 5716 DRM_ERROR("failed to release fence for cursor");
5453 goto fail_unpin; 5717 goto fail_unpin;
5454 } 5718 }
5455 5719
@@ -6152,6 +6416,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
6152 drm_gem_object_unreference(&work->pending_flip_obj->base); 6416 drm_gem_object_unreference(&work->pending_flip_obj->base);
6153 drm_gem_object_unreference(&work->old_fb_obj->base); 6417 drm_gem_object_unreference(&work->old_fb_obj->base);
6154 6418
6419 intel_update_fbc(work->dev);
6155 mutex_unlock(&work->dev->struct_mutex); 6420 mutex_unlock(&work->dev->struct_mutex);
6156 kfree(work); 6421 kfree(work);
6157} 6422}
@@ -6516,6 +6781,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6516 if (ret) 6781 if (ret)
6517 goto cleanup_pending; 6782 goto cleanup_pending;
6518 6783
6784 intel_disable_fbc(dev);
6519 mutex_unlock(&dev->struct_mutex); 6785 mutex_unlock(&dev->struct_mutex);
6520 6786
6521 trace_i915_flip_request(intel_crtc->plane, obj); 6787 trace_i915_flip_request(intel_crtc->plane, obj);
@@ -6644,6 +6910,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
6644 6910
6645 intel_crtc_reset(&intel_crtc->base); 6911 intel_crtc_reset(&intel_crtc->base);
6646 intel_crtc->active = true; /* force the pipe off on setup_init_config */ 6912 intel_crtc->active = true; /* force the pipe off on setup_init_config */
6913 intel_crtc->bpp = 24; /* default for pre-Ironlake */
6647 6914
6648 if (HAS_PCH_SPLIT(dev)) { 6915 if (HAS_PCH_SPLIT(dev)) {
6649 intel_helper_funcs.prepare = ironlake_crtc_prepare; 6916 intel_helper_funcs.prepare = ironlake_crtc_prepare;
@@ -6870,6 +7137,11 @@ int intel_framebuffer_init(struct drm_device *dev,
6870 switch (mode_cmd->bpp) { 7137 switch (mode_cmd->bpp) {
6871 case 8: 7138 case 8:
6872 case 16: 7139 case 16:
7140 /* Only pre-ILK can handle 5:5:5 */
7141 if (mode_cmd->depth == 15 && !HAS_PCH_SPLIT(dev))
7142 return -EINVAL;
7143 break;
7144
6873 case 24: 7145 case 24:
6874 case 32: 7146 case 32:
6875 break; 7147 break;
@@ -7284,6 +7556,59 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
7284 mutex_unlock(&dev_priv->dev->struct_mutex); 7556 mutex_unlock(&dev_priv->dev->struct_mutex);
7285} 7557}
7286 7558
7559void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
7560{
7561 int min_freq = 15;
7562 int gpu_freq, ia_freq, max_ia_freq;
7563 int scaling_factor = 180;
7564
7565 max_ia_freq = cpufreq_quick_get_max(0);
7566 /*
7567 * Default to measured freq if none found, PCU will ensure we don't go
7568 * over
7569 */
7570 if (!max_ia_freq)
7571 max_ia_freq = tsc_khz;
7572
7573 /* Convert from kHz to MHz */
7574 max_ia_freq /= 1000;
7575
7576 mutex_lock(&dev_priv->dev->struct_mutex);
7577
7578 /*
7579 * For each potential GPU frequency, load a ring frequency we'd like
7580 * to use for memory access. We do this by specifying the IA frequency
7581 * the PCU should use as a reference to determine the ring frequency.
7582 */
7583 for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
7584 gpu_freq--) {
7585 int diff = dev_priv->max_delay - gpu_freq;
7586
7587 /*
7588 * For GPU frequencies less than 750MHz, just use the lowest
7589 * ring freq.
7590 */
7591 if (gpu_freq < min_freq)
7592 ia_freq = 800;
7593 else
7594 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
7595 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
7596
7597 I915_WRITE(GEN6_PCODE_DATA,
7598 (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
7599 gpu_freq);
7600 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
7601 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
7602 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
7603 GEN6_PCODE_READY) == 0, 10)) {
7604 DRM_ERROR("pcode write of freq table timed out\n");
7605 continue;
7606 }
7607 }
7608
7609 mutex_unlock(&dev_priv->dev->struct_mutex);
7610}
7611
7287static void ironlake_init_clock_gating(struct drm_device *dev) 7612static void ironlake_init_clock_gating(struct drm_device *dev)
7288{ 7613{
7289 struct drm_i915_private *dev_priv = dev->dev_private; 7614 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7640,9 +7965,11 @@ static void intel_init_display(struct drm_device *dev)
7640 if (HAS_PCH_SPLIT(dev)) { 7965 if (HAS_PCH_SPLIT(dev)) {
7641 dev_priv->display.dpms = ironlake_crtc_dpms; 7966 dev_priv->display.dpms = ironlake_crtc_dpms;
7642 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 7967 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
7968 dev_priv->display.update_plane = ironlake_update_plane;
7643 } else { 7969 } else {
7644 dev_priv->display.dpms = i9xx_crtc_dpms; 7970 dev_priv->display.dpms = i9xx_crtc_dpms;
7645 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 7971 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
7972 dev_priv->display.update_plane = i9xx_update_plane;
7646 } 7973 }
7647 7974
7648 if (I915_HAS_FBC(dev)) { 7975 if (I915_HAS_FBC(dev)) {
@@ -7939,8 +8266,10 @@ void intel_modeset_init(struct drm_device *dev)
7939 intel_init_emon(dev); 8266 intel_init_emon(dev);
7940 } 8267 }
7941 8268
7942 if (IS_GEN6(dev)) 8269 if (IS_GEN6(dev) || IS_GEN7(dev)) {
7943 gen6_enable_rps(dev_priv); 8270 gen6_enable_rps(dev_priv);
8271 gen6_update_ring_freq(dev_priv);
8272 }
7944 8273
7945 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 8274 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
7946 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 8275 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
@@ -7976,12 +8305,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
7976 intel_increase_pllclock(crtc); 8305 intel_increase_pllclock(crtc);
7977 } 8306 }
7978 8307
7979 if (dev_priv->display.disable_fbc) 8308 intel_disable_fbc(dev);
7980 dev_priv->display.disable_fbc(dev);
7981 8309
7982 if (IS_IRONLAKE_M(dev)) 8310 if (IS_IRONLAKE_M(dev))
7983 ironlake_disable_drps(dev); 8311 ironlake_disable_drps(dev);
7984 if (IS_GEN6(dev)) 8312 if (IS_GEN6(dev) || IS_GEN7(dev))
7985 gen6_disable_rps(dev); 8313 gen6_disable_rps(dev);
7986 8314
7987 if (IS_IRONLAKE_M(dev)) 8315 if (IS_IRONLAKE_M(dev))
@@ -7994,6 +8322,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
7994 drm_irq_uninstall(dev); 8322 drm_irq_uninstall(dev);
7995 cancel_work_sync(&dev_priv->hotplug_work); 8323 cancel_work_sync(&dev_priv->hotplug_work);
7996 8324
8325 /* flush any delayed tasks or pending work */
8326 flush_scheduled_work();
8327
7997 /* Shut off idle work before the crtcs get freed. */ 8328 /* Shut off idle work before the crtcs get freed. */
7998 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 8329 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7999 intel_crtc = to_intel_crtc(crtc); 8330 intel_crtc = to_intel_crtc(crtc);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index e2aced6eec4c..f797fb58ba9c 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -178,12 +178,14 @@ intel_dp_link_clock(uint8_t link_bw)
178static int 178static int
179intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock) 179intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock)
180{ 180{
181 struct drm_i915_private *dev_priv = dev->dev_private; 181 struct drm_crtc *crtc = intel_dp->base.base.crtc;
182 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
183 int bpp = 24;
182 184
183 if (is_edp(intel_dp)) 185 if (intel_crtc)
184 return (pixel_clock * dev_priv->edp.bpp + 7) / 8; 186 bpp = intel_crtc->bpp;
185 else 187
186 return pixel_clock * 3; 188 return (pixel_clock * bpp + 7) / 8;
187} 189}
188 190
189static int 191static int
@@ -681,7 +683,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
681 struct drm_encoder *encoder; 683 struct drm_encoder *encoder;
682 struct drm_i915_private *dev_priv = dev->dev_private; 684 struct drm_i915_private *dev_priv = dev->dev_private;
683 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 685 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
684 int lane_count = 4, bpp = 24; 686 int lane_count = 4;
685 struct intel_dp_m_n m_n; 687 struct intel_dp_m_n m_n;
686 int pipe = intel_crtc->pipe; 688 int pipe = intel_crtc->pipe;
687 689
@@ -700,7 +702,6 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
700 break; 702 break;
701 } else if (is_edp(intel_dp)) { 703 } else if (is_edp(intel_dp)) {
702 lane_count = dev_priv->edp.lanes; 704 lane_count = dev_priv->edp.lanes;
703 bpp = dev_priv->edp.bpp;
704 break; 705 break;
705 } 706 }
706 } 707 }
@@ -710,7 +711,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
710 * the number of bytes_per_pixel post-LUT, which we always 711 * the number of bytes_per_pixel post-LUT, which we always
711 * set up for 8-bits of R/G/B, or 3 bytes total. 712 * set up for 8-bits of R/G/B, or 3 bytes total.
712 */ 713 */
713 intel_dp_compute_m_n(bpp, lane_count, 714 intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
714 mode->clock, adjusted_mode->clock, &m_n); 715 mode->clock, adjusted_mode->clock, &m_n);
715 716
716 if (HAS_PCH_SPLIT(dev)) { 717 if (HAS_PCH_SPLIT(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9ffa61eb4d7e..6e990f9760ef 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -170,6 +170,7 @@ struct intel_crtc {
170 int16_t cursor_x, cursor_y; 170 int16_t cursor_x, cursor_y;
171 int16_t cursor_width, cursor_height; 171 int16_t cursor_width, cursor_height;
172 bool cursor_visible; 172 bool cursor_visible;
173 unsigned int bpp;
173}; 174};
174 175
175#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) 176#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -233,6 +234,13 @@ struct intel_unpin_work {
233 bool enable_stall_check; 234 bool enable_stall_check;
234}; 235};
235 236
237struct intel_fbc_work {
238 struct delayed_work work;
239 struct drm_crtc *crtc;
240 struct drm_framebuffer *fb;
241 int interval;
242};
243
236int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); 244int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
237extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); 245extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
238 246
@@ -317,6 +325,7 @@ extern void intel_enable_clock_gating(struct drm_device *dev);
317extern void ironlake_enable_drps(struct drm_device *dev); 325extern void ironlake_enable_drps(struct drm_device *dev);
318extern void ironlake_disable_drps(struct drm_device *dev); 326extern void ironlake_disable_drps(struct drm_device *dev);
319extern void gen6_enable_rps(struct drm_i915_private *dev_priv); 327extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
328extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
320extern void gen6_disable_rps(struct drm_device *dev); 329extern void gen6_disable_rps(struct drm_device *dev);
321extern void intel_init_emon(struct drm_device *dev); 330extern void intel_init_emon(struct drm_device *dev);
322 331
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index aa0a8e83142e..1ed8e6903915 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -124,12 +124,18 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
124 u32 sdvox; 124 u32 sdvox;
125 125
126 sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE; 126 sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
127 sdvox |= intel_hdmi->color_range; 127 if (!HAS_PCH_SPLIT(dev))
128 sdvox |= intel_hdmi->color_range;
128 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 129 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
129 sdvox |= SDVO_VSYNC_ACTIVE_HIGH; 130 sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
130 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 131 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
131 sdvox |= SDVO_HSYNC_ACTIVE_HIGH; 132 sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
132 133
134 if (intel_crtc->bpp > 24)
135 sdvox |= COLOR_FORMAT_12bpc;
136 else
137 sdvox |= COLOR_FORMAT_8bpc;
138
133 /* Required on CPT */ 139 /* Required on CPT */
134 if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev)) 140 if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
135 sdvox |= HDMI_MODE_SELECT; 141 sdvox |= HDMI_MODE_SELECT;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index d2c710422908..b7c5ddb564d1 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -297,19 +297,26 @@ static int intel_opregion_video_event(struct notifier_block *nb,
297 /* The only video events relevant to opregion are 0x80. These indicate 297 /* The only video events relevant to opregion are 0x80. These indicate
298 either a docking event, lid switch or display switch request. In 298 either a docking event, lid switch or display switch request. In
299 Linux, these are handled by the dock, button and video drivers. 299 Linux, these are handled by the dock, button and video drivers.
300 We might want to fix the video driver to be opregion-aware in 300 */
301 future, but right now we just indicate to the firmware that the
302 request has been handled */
303 301
304 struct opregion_acpi *acpi; 302 struct opregion_acpi *acpi;
303 struct acpi_bus_event *event = data;
304 int ret = NOTIFY_OK;
305
306 if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
307 return NOTIFY_DONE;
305 308
306 if (!system_opregion) 309 if (!system_opregion)
307 return NOTIFY_DONE; 310 return NOTIFY_DONE;
308 311
309 acpi = system_opregion->acpi; 312 acpi = system_opregion->acpi;
313
314 if (event->type == 0x80 && !(acpi->cevt & 0x1))
315 ret = NOTIFY_BAD;
316
310 acpi->csts = 0; 317 acpi->csts = 0;
311 318
312 return NOTIFY_OK; 319 return ret;
313} 320}
314 321
315static struct notifier_block intel_opregion_notifier = { 322static struct notifier_block intel_opregion_notifier = {
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 9e2959bc91cd..d36038086826 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -773,14 +773,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
773 if (ret != 0) 773 if (ret != 0)
774 return ret; 774 return ret;
775 775
776 ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true); 776 ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
777 if (ret != 0) 777 if (ret != 0)
778 return ret; 778 return ret;
779 779
780 ret = i915_gem_object_set_to_gtt_domain(new_bo, 0);
781 if (ret != 0)
782 goto out_unpin;
783
784 ret = i915_gem_object_put_fence(new_bo); 780 ret = i915_gem_object_put_fence(new_bo);
785 if (ret) 781 if (ret)
786 goto out_unpin; 782 goto out_unpin;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 95c4b1429935..e9615685a39c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -236,7 +236,8 @@ init_pipe_control(struct intel_ring_buffer *ring)
236 ret = -ENOMEM; 236 ret = -ENOMEM;
237 goto err; 237 goto err;
238 } 238 }
239 obj->cache_level = I915_CACHE_LLC; 239
240 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
240 241
241 ret = i915_gem_object_pin(obj, 4096, true); 242 ret = i915_gem_object_pin(obj, 4096, true);
242 if (ret) 243 if (ret)
@@ -776,7 +777,8 @@ static int init_status_page(struct intel_ring_buffer *ring)
776 ret = -ENOMEM; 777 ret = -ENOMEM;
777 goto err; 778 goto err;
778 } 779 }
779 obj->cache_level = I915_CACHE_LLC; 780
781 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
780 782
781 ret = i915_gem_object_pin(obj, 4096, true); 783 ret = i915_gem_object_pin(obj, 4096, true);
782 if (ret != 0) { 784 if (ret != 0) {
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 113e4e7264cd..210d570fd516 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1236,6 +1236,8 @@ intel_tv_detect_type (struct intel_tv *intel_tv,
1236 struct drm_connector *connector) 1236 struct drm_connector *connector)
1237{ 1237{
1238 struct drm_encoder *encoder = &intel_tv->base.base; 1238 struct drm_encoder *encoder = &intel_tv->base.base;
1239 struct drm_crtc *crtc = encoder->crtc;
1240 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1239 struct drm_device *dev = encoder->dev; 1241 struct drm_device *dev = encoder->dev;
1240 struct drm_i915_private *dev_priv = dev->dev_private; 1242 struct drm_i915_private *dev_priv = dev->dev_private;
1241 unsigned long irqflags; 1243 unsigned long irqflags;
@@ -1258,6 +1260,10 @@ intel_tv_detect_type (struct intel_tv *intel_tv,
1258 /* Poll for TV detection */ 1260 /* Poll for TV detection */
1259 tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK); 1261 tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
1260 tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; 1262 tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
1263 if (intel_crtc->pipe == 1)
1264 tv_ctl |= TV_ENC_PIPEB_SELECT;
1265 else
1266 tv_ctl &= ~TV_ENC_PIPEB_SELECT;
1261 1267
1262 tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK); 1268 tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
1263 tv_dac |= (TVDAC_STATE_CHG_EN | 1269 tv_dac |= (TVDAC_STATE_CHG_EN |
@@ -1277,26 +1283,26 @@ intel_tv_detect_type (struct intel_tv *intel_tv,
1277 to_intel_crtc(intel_tv->base.base.crtc)->pipe); 1283 to_intel_crtc(intel_tv->base.base.crtc)->pipe);
1278 1284
1279 type = -1; 1285 type = -1;
1280 if (wait_for((tv_dac = I915_READ(TV_DAC)) & TVDAC_STATE_CHG, 20) == 0) { 1286 tv_dac = I915_READ(TV_DAC);
1281 DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac); 1287 DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
1282 /* 1288 /*
1283 * A B C 1289 * A B C
1284 * 0 1 1 Composite 1290 * 0 1 1 Composite
1285 * 1 0 X svideo 1291 * 1 0 X svideo
1286 * 0 0 0 Component 1292 * 0 0 0 Component
1287 */ 1293 */
1288 if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { 1294 if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
1289 DRM_DEBUG_KMS("Detected Composite TV connection\n"); 1295 DRM_DEBUG_KMS("Detected Composite TV connection\n");
1290 type = DRM_MODE_CONNECTOR_Composite; 1296 type = DRM_MODE_CONNECTOR_Composite;
1291 } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { 1297 } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
1292 DRM_DEBUG_KMS("Detected S-Video TV connection\n"); 1298 DRM_DEBUG_KMS("Detected S-Video TV connection\n");
1293 type = DRM_MODE_CONNECTOR_SVIDEO; 1299 type = DRM_MODE_CONNECTOR_SVIDEO;
1294 } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { 1300 } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
1295 DRM_DEBUG_KMS("Detected Component TV connection\n"); 1301 DRM_DEBUG_KMS("Detected Component TV connection\n");
1296 type = DRM_MODE_CONNECTOR_Component; 1302 type = DRM_MODE_CONNECTOR_Component;
1297 } else { 1303 } else {
1298 DRM_DEBUG_KMS("Unrecognised TV connection\n"); 1304 DRM_DEBUG_KMS("Unrecognised TV connection\n");
1299 } 1305 type = -1;
1300 } 1306 }
1301 1307
1302 I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); 1308 I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 729d5fd7c88d..b311faba34f8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -135,13 +135,14 @@ static void load_vbios_pramin(struct drm_device *dev, uint8_t *data)
135 int i; 135 int i;
136 136
137 if (dev_priv->card_type >= NV_50) { 137 if (dev_priv->card_type >= NV_50) {
138 uint32_t vbios_vram = (nv_rd32(dev, 0x619f04) & ~0xff) << 8; 138 u64 addr = (u64)(nv_rd32(dev, 0x619f04) & 0xffffff00) << 8;
139 139 if (!addr) {
140 if (!vbios_vram) 140 addr = (u64)nv_rd32(dev, 0x1700) << 16;
141 vbios_vram = (nv_rd32(dev, 0x1700) << 16) + 0xf0000; 141 addr += 0xf0000;
142 }
142 143
143 old_bar0_pramin = nv_rd32(dev, 0x1700); 144 old_bar0_pramin = nv_rd32(dev, 0x1700);
144 nv_wr32(dev, 0x1700, vbios_vram >> 16); 145 nv_wr32(dev, 0x1700, addr >> 16);
145 } 146 }
146 147
147 /* bail if no rom signature */ 148 /* bail if no rom signature */
@@ -5186,7 +5187,7 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
5186 load_table_ptr = ROM16(bios->data[bitentry->offset]); 5187 load_table_ptr = ROM16(bios->data[bitentry->offset]);
5187 5188
5188 if (load_table_ptr == 0x0) { 5189 if (load_table_ptr == 0x0) {
5189 NV_ERROR(dev, "Pointer to BIT loadval table invalid\n"); 5190 NV_DEBUG(dev, "Pointer to BIT loadval table invalid\n");
5190 return -EINVAL; 5191 return -EINVAL;
5191 } 5192 }
5192 5193
@@ -5965,6 +5966,12 @@ apply_dcb_connector_quirks(struct nvbios *bios, int idx)
5965 if (cte->type == DCB_CONNECTOR_HDMI_1) 5966 if (cte->type == DCB_CONNECTOR_HDMI_1)
5966 cte->type = DCB_CONNECTOR_DVI_I; 5967 cte->type = DCB_CONNECTOR_DVI_I;
5967 } 5968 }
5969
5970 /* Gigabyte GV-NX86T512H */
5971 if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) {
5972 if (cte->type == DCB_CONNECTOR_HDMI_1)
5973 cte->type = DCB_CONNECTOR_DVI_I;
5974 }
5968} 5975}
5969 5976
5970static const u8 hpd_gpio[16] = { 5977static const u8 hpd_gpio[16] = {
@@ -6377,6 +6384,37 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
6377 } 6384 }
6378 } 6385 }
6379 6386
6387 /* Some other twisted XFX board (rhbz#694914)
6388 *
6389 * The DVI/VGA encoder combo that's supposed to represent the
6390 * DVI-I connector actually point at two different ones, and
6391 * the HDMI connector ends up paired with the VGA instead.
6392 *
6393 * Connector table is missing anything for VGA at all, pointing it
6394 * an invalid conntab entry 2 so we figure it out ourself.
6395 */
6396 if (nv_match_device(dev, 0x0615, 0x1682, 0x2605)) {
6397 if (idx == 0) {
6398 *conn = 0x02002300; /* VGA, connector 2 */
6399 *conf = 0x00000028;
6400 } else
6401 if (idx == 1) {
6402 *conn = 0x01010312; /* DVI, connector 0 */
6403 *conf = 0x00020030;
6404 } else
6405 if (idx == 2) {
6406 *conn = 0x04020310; /* VGA, connector 0 */
6407 *conf = 0x00000028;
6408 } else
6409 if (idx == 3) {
6410 *conn = 0x02021322; /* HDMI, connector 1 */
6411 *conf = 0x00020010;
6412 } else {
6413 *conn = 0x0000000e; /* EOL */
6414 *conf = 0x00000000;
6415 }
6416 }
6417
6380 return true; 6418 return true;
6381} 6419}
6382 6420
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 2ad49cbf7c8b..890d50e4d682 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -49,16 +49,12 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
49 DRM_ERROR("bo %p still attached to GEM object\n", bo); 49 DRM_ERROR("bo %p still attached to GEM object\n", bo);
50 50
51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL); 51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
52 if (nvbo->vma.node) {
53 nouveau_vm_unmap(&nvbo->vma);
54 nouveau_vm_put(&nvbo->vma);
55 }
56 kfree(nvbo); 52 kfree(nvbo);
57} 53}
58 54
59static void 55static void
60nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, 56nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
61 int *align, int *size, int *page_shift) 57 int *align, int *size)
62{ 58{
63 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 59 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
64 60
@@ -82,67 +78,51 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
82 } 78 }
83 } 79 }
84 } else { 80 } else {
85 if (likely(dev_priv->chan_vm)) { 81 *size = roundup(*size, (1 << nvbo->page_shift));
86 if (!(flags & TTM_PL_FLAG_TT) && *size > 256 * 1024) 82 *align = max((1 << nvbo->page_shift), *align);
87 *page_shift = dev_priv->chan_vm->lpg_shift;
88 else
89 *page_shift = dev_priv->chan_vm->spg_shift;
90 } else {
91 *page_shift = 12;
92 }
93
94 *size = roundup(*size, (1 << *page_shift));
95 *align = max((1 << *page_shift), *align);
96 } 83 }
97 84
98 *size = roundup(*size, PAGE_SIZE); 85 *size = roundup(*size, PAGE_SIZE);
99} 86}
100 87
101int 88int
102nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, 89nouveau_bo_new(struct drm_device *dev, int size, int align,
103 int size, int align, uint32_t flags, uint32_t tile_mode, 90 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
104 uint32_t tile_flags, struct nouveau_bo **pnvbo) 91 struct nouveau_bo **pnvbo)
105{ 92{
106 struct drm_nouveau_private *dev_priv = dev->dev_private; 93 struct drm_nouveau_private *dev_priv = dev->dev_private;
107 struct nouveau_bo *nvbo; 94 struct nouveau_bo *nvbo;
108 int ret = 0, page_shift = 0; 95 int ret;
109 96
110 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); 97 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
111 if (!nvbo) 98 if (!nvbo)
112 return -ENOMEM; 99 return -ENOMEM;
113 INIT_LIST_HEAD(&nvbo->head); 100 INIT_LIST_HEAD(&nvbo->head);
114 INIT_LIST_HEAD(&nvbo->entry); 101 INIT_LIST_HEAD(&nvbo->entry);
102 INIT_LIST_HEAD(&nvbo->vma_list);
115 nvbo->tile_mode = tile_mode; 103 nvbo->tile_mode = tile_mode;
116 nvbo->tile_flags = tile_flags; 104 nvbo->tile_flags = tile_flags;
117 nvbo->bo.bdev = &dev_priv->ttm.bdev; 105 nvbo->bo.bdev = &dev_priv->ttm.bdev;
118 106
119 nouveau_bo_fixup_align(nvbo, flags, &align, &size, &page_shift); 107 nvbo->page_shift = 12;
120 align >>= PAGE_SHIFT; 108 if (dev_priv->bar1_vm) {
121 109 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
122 if (dev_priv->chan_vm) { 110 nvbo->page_shift = dev_priv->bar1_vm->lpg_shift;
123 ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
124 NV_MEM_ACCESS_RW, &nvbo->vma);
125 if (ret) {
126 kfree(nvbo);
127 return ret;
128 }
129 } 111 }
130 112
113 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
131 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; 114 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
132 nouveau_bo_placement_set(nvbo, flags, 0); 115 nouveau_bo_placement_set(nvbo, flags, 0);
133 116
134 nvbo->channel = chan;
135 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, 117 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
136 ttm_bo_type_device, &nvbo->placement, align, 0, 118 ttm_bo_type_device, &nvbo->placement,
137 false, NULL, size, nouveau_bo_del_ttm); 119 align >> PAGE_SHIFT, 0, false, NULL, size,
120 nouveau_bo_del_ttm);
138 if (ret) { 121 if (ret) {
139 /* ttm will call nouveau_bo_del_ttm if it fails.. */ 122 /* ttm will call nouveau_bo_del_ttm if it fails.. */
140 return ret; 123 return ret;
141 } 124 }
142 nvbo->channel = NULL;
143 125
144 if (nvbo->vma.node)
145 nvbo->bo.offset = nvbo->vma.offset;
146 *pnvbo = nvbo; 126 *pnvbo = nvbo;
147 return 0; 127 return 0;
148} 128}
@@ -312,8 +292,6 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
312 if (ret) 292 if (ret)
313 return ret; 293 return ret;
314 294
315 if (nvbo->vma.node)
316 nvbo->bo.offset = nvbo->vma.offset;
317 return 0; 295 return 0;
318} 296}
319 297
@@ -440,7 +418,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
440 TTM_MEMTYPE_FLAG_CMA; 418 TTM_MEMTYPE_FLAG_CMA;
441 man->available_caching = TTM_PL_MASK_CACHING; 419 man->available_caching = TTM_PL_MASK_CACHING;
442 man->default_caching = TTM_PL_FLAG_CACHED; 420 man->default_caching = TTM_PL_FLAG_CACHED;
443 man->gpu_offset = dev_priv->gart_info.aper_base;
444 break; 421 break;
445 default: 422 default:
446 NV_ERROR(dev, "Unknown GART type: %d\n", 423 NV_ERROR(dev, "Unknown GART type: %d\n",
@@ -501,19 +478,12 @@ static int
501nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 478nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
502 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 479 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
503{ 480{
504 struct nouveau_mem *old_node = old_mem->mm_node; 481 struct nouveau_mem *node = old_mem->mm_node;
505 struct nouveau_mem *new_node = new_mem->mm_node; 482 u64 src_offset = node->vma[0].offset;
506 struct nouveau_bo *nvbo = nouveau_bo(bo); 483 u64 dst_offset = node->vma[1].offset;
507 u32 page_count = new_mem->num_pages; 484 u32 page_count = new_mem->num_pages;
508 u64 src_offset, dst_offset;
509 int ret; 485 int ret;
510 486
511 src_offset = old_node->tmp_vma.offset;
512 if (new_node->tmp_vma.node)
513 dst_offset = new_node->tmp_vma.offset;
514 else
515 dst_offset = nvbo->vma.offset;
516
517 page_count = new_mem->num_pages; 487 page_count = new_mem->num_pages;
518 while (page_count) { 488 while (page_count) {
519 int line_count = (page_count > 2047) ? 2047 : page_count; 489 int line_count = (page_count > 2047) ? 2047 : page_count;
@@ -547,19 +517,13 @@ static int
547nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 517nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
548 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 518 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
549{ 519{
550 struct nouveau_mem *old_node = old_mem->mm_node; 520 struct nouveau_mem *node = old_mem->mm_node;
551 struct nouveau_mem *new_node = new_mem->mm_node;
552 struct nouveau_bo *nvbo = nouveau_bo(bo); 521 struct nouveau_bo *nvbo = nouveau_bo(bo);
553 u64 length = (new_mem->num_pages << PAGE_SHIFT); 522 u64 length = (new_mem->num_pages << PAGE_SHIFT);
554 u64 src_offset, dst_offset; 523 u64 src_offset = node->vma[0].offset;
524 u64 dst_offset = node->vma[1].offset;
555 int ret; 525 int ret;
556 526
557 src_offset = old_node->tmp_vma.offset;
558 if (new_node->tmp_vma.node)
559 dst_offset = new_node->tmp_vma.offset;
560 else
561 dst_offset = nvbo->vma.offset;
562
563 while (length) { 527 while (length) {
564 u32 amount, stride, height; 528 u32 amount, stride, height;
565 529
@@ -695,6 +659,27 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
695} 659}
696 660
697static int 661static int
662nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
663 struct ttm_mem_reg *mem, struct nouveau_vma *vma)
664{
665 struct nouveau_mem *node = mem->mm_node;
666 int ret;
667
668 ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT,
669 node->page_shift, NV_MEM_ACCESS_RO, vma);
670 if (ret)
671 return ret;
672
673 if (mem->mem_type == TTM_PL_VRAM)
674 nouveau_vm_map(vma, node);
675 else
676 nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT,
677 node, node->pages);
678
679 return 0;
680}
681
682static int
698nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, 683nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
699 bool no_wait_reserve, bool no_wait_gpu, 684 bool no_wait_reserve, bool no_wait_gpu,
700 struct ttm_mem_reg *new_mem) 685 struct ttm_mem_reg *new_mem)
@@ -711,31 +696,20 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
711 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); 696 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
712 } 697 }
713 698
714 /* create temporary vma for old memory, this will get cleaned 699 /* create temporary vmas for the transfer and attach them to the
715 * up after ttm destroys the ttm_mem_reg 700 * old nouveau_mem node, these will get cleaned up after ttm has
701 * destroyed the ttm_mem_reg
716 */ 702 */
717 if (dev_priv->card_type >= NV_50) { 703 if (dev_priv->card_type >= NV_50) {
718 struct nouveau_mem *node = old_mem->mm_node; 704 struct nouveau_mem *node = old_mem->mm_node;
719 if (!node->tmp_vma.node) {
720 u32 page_shift = nvbo->vma.node->type;
721 if (old_mem->mem_type == TTM_PL_TT)
722 page_shift = nvbo->vma.vm->spg_shift;
723
724 ret = nouveau_vm_get(chan->vm,
725 old_mem->num_pages << PAGE_SHIFT,
726 page_shift, NV_MEM_ACCESS_RO,
727 &node->tmp_vma);
728 if (ret)
729 goto out;
730 }
731 705
732 if (old_mem->mem_type == TTM_PL_VRAM) 706 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
733 nouveau_vm_map(&node->tmp_vma, node); 707 if (ret)
734 else { 708 goto out;
735 nouveau_vm_map_sg(&node->tmp_vma, 0, 709
736 old_mem->num_pages << PAGE_SHIFT, 710 ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
737 node, node->pages); 711 if (ret)
738 } 712 goto out;
739 } 713 }
740 714
741 if (dev_priv->card_type < NV_50) 715 if (dev_priv->card_type < NV_50)
@@ -762,7 +736,6 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
762 bool no_wait_reserve, bool no_wait_gpu, 736 bool no_wait_reserve, bool no_wait_gpu,
763 struct ttm_mem_reg *new_mem) 737 struct ttm_mem_reg *new_mem)
764{ 738{
765 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
766 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 739 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
767 struct ttm_placement placement; 740 struct ttm_placement placement;
768 struct ttm_mem_reg tmp_mem; 741 struct ttm_mem_reg tmp_mem;
@@ -782,23 +755,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
782 if (ret) 755 if (ret)
783 goto out; 756 goto out;
784 757
785 if (dev_priv->card_type >= NV_50) {
786 struct nouveau_bo *nvbo = nouveau_bo(bo);
787 struct nouveau_mem *node = tmp_mem.mm_node;
788 struct nouveau_vma *vma = &nvbo->vma;
789 if (vma->node->type != vma->vm->spg_shift)
790 vma = &node->tmp_vma;
791 nouveau_vm_map_sg(vma, 0, tmp_mem.num_pages << PAGE_SHIFT,
792 node, node->pages);
793 }
794
795 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); 758 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
796
797 if (dev_priv->card_type >= NV_50) {
798 struct nouveau_bo *nvbo = nouveau_bo(bo);
799 nouveau_vm_unmap(&nvbo->vma);
800 }
801
802 if (ret) 759 if (ret)
803 goto out; 760 goto out;
804 761
@@ -844,30 +801,22 @@ out:
844static void 801static void
845nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) 802nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
846{ 803{
847 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
848 struct nouveau_mem *node = new_mem->mm_node; 804 struct nouveau_mem *node = new_mem->mm_node;
849 struct nouveau_bo *nvbo = nouveau_bo(bo); 805 struct nouveau_bo *nvbo = nouveau_bo(bo);
850 struct nouveau_vma *vma = &nvbo->vma; 806 struct nouveau_vma *vma;
851 struct nouveau_vm *vm = vma->vm; 807
852 808 list_for_each_entry(vma, &nvbo->vma_list, head) {
853 if (dev_priv->card_type < NV_50) 809 if (new_mem->mem_type == TTM_PL_VRAM) {
854 return; 810 nouveau_vm_map(vma, new_mem->mm_node);
855 811 } else
856 switch (new_mem->mem_type) { 812 if (new_mem->mem_type == TTM_PL_TT &&
857 case TTM_PL_VRAM: 813 nvbo->page_shift == vma->vm->spg_shift) {
858 nouveau_vm_map(vma, node); 814 nouveau_vm_map_sg(vma, 0, new_mem->
859 break; 815 num_pages << PAGE_SHIFT,
860 case TTM_PL_TT: 816 node, node->pages);
861 if (vma->node->type != vm->spg_shift) { 817 } else {
862 nouveau_vm_unmap(vma); 818 nouveau_vm_unmap(vma);
863 vma = &node->tmp_vma;
864 } 819 }
865 nouveau_vm_map_sg(vma, 0, new_mem->num_pages << PAGE_SHIFT,
866 node, node->pages);
867 break;
868 default:
869 nouveau_vm_unmap(&nvbo->vma);
870 break;
871 } 820 }
872} 821}
873 822
@@ -1113,3 +1062,54 @@ struct ttm_bo_driver nouveau_bo_driver = {
1113 .io_mem_free = &nouveau_ttm_io_mem_free, 1062 .io_mem_free = &nouveau_ttm_io_mem_free,
1114}; 1063};
1115 1064
1065struct nouveau_vma *
1066nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1067{
1068 struct nouveau_vma *vma;
1069 list_for_each_entry(vma, &nvbo->vma_list, head) {
1070 if (vma->vm == vm)
1071 return vma;
1072 }
1073
1074 return NULL;
1075}
1076
1077int
1078nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1079 struct nouveau_vma *vma)
1080{
1081 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1082 struct nouveau_mem *node = nvbo->bo.mem.mm_node;
1083 int ret;
1084
1085 ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1086 NV_MEM_ACCESS_RW, vma);
1087 if (ret)
1088 return ret;
1089
1090 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
1091 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
1092 else
1093 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
1094 nouveau_vm_map_sg(vma, 0, size, node, node->pages);
1095
1096 list_add_tail(&vma->head, &nvbo->vma_list);
1097 vma->refcount = 1;
1098 return 0;
1099}
1100
1101void
1102nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1103{
1104 if (vma->node) {
1105 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
1106 spin_lock(&nvbo->bo.bdev->fence_lock);
1107 ttm_bo_wait(&nvbo->bo, false, false, false);
1108 spin_unlock(&nvbo->bo.bdev->fence_lock);
1109 nouveau_vm_unmap(vma);
1110 }
1111
1112 nouveau_vm_put(vma);
1113 list_del(&vma->head);
1114 }
1115}
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index a7583a8ddb01..b0d753f45bbd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -27,40 +27,63 @@
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28#include "nouveau_drm.h" 28#include "nouveau_drm.h"
29#include "nouveau_dma.h" 29#include "nouveau_dma.h"
30#include "nouveau_ramht.h"
30 31
31static int 32static int
32nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) 33nouveau_channel_pushbuf_init(struct nouveau_channel *chan)
33{ 34{
35 u32 mem = nouveau_vram_pushbuf ? TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT;
34 struct drm_device *dev = chan->dev; 36 struct drm_device *dev = chan->dev;
35 struct drm_nouveau_private *dev_priv = dev->dev_private; 37 struct drm_nouveau_private *dev_priv = dev->dev_private;
36 struct nouveau_bo *pb = chan->pushbuf_bo; 38 int ret;
37 struct nouveau_gpuobj *pushbuf = NULL; 39
38 int ret = 0; 40 /* allocate buffer object */
41 ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, &chan->pushbuf_bo);
42 if (ret)
43 goto out;
44
45 ret = nouveau_bo_pin(chan->pushbuf_bo, mem);
46 if (ret)
47 goto out;
48
49 ret = nouveau_bo_map(chan->pushbuf_bo);
50 if (ret)
51 goto out;
39 52
53 /* create DMA object covering the entire memtype where the push
54 * buffer resides, userspace can submit its own push buffers from
55 * anywhere within the same memtype.
56 */
57 chan->pushbuf_base = chan->pushbuf_bo->bo.offset;
40 if (dev_priv->card_type >= NV_50) { 58 if (dev_priv->card_type >= NV_50) {
59 ret = nouveau_bo_vma_add(chan->pushbuf_bo, chan->vm,
60 &chan->pushbuf_vma);
61 if (ret)
62 goto out;
63
41 if (dev_priv->card_type < NV_C0) { 64 if (dev_priv->card_type < NV_C0) {
42 ret = nouveau_gpuobj_dma_new(chan, 65 ret = nouveau_gpuobj_dma_new(chan,
43 NV_CLASS_DMA_IN_MEMORY, 0, 66 NV_CLASS_DMA_IN_MEMORY, 0,
44 (1ULL << 40), 67 (1ULL << 40),
45 NV_MEM_ACCESS_RO, 68 NV_MEM_ACCESS_RO,
46 NV_MEM_TARGET_VM, 69 NV_MEM_TARGET_VM,
47 &pushbuf); 70 &chan->pushbuf);
48 } 71 }
49 chan->pushbuf_base = pb->bo.offset; 72 chan->pushbuf_base = chan->pushbuf_vma.offset;
50 } else 73 } else
51 if (pb->bo.mem.mem_type == TTM_PL_TT) { 74 if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_TT) {
52 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, 75 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
53 dev_priv->gart_info.aper_size, 76 dev_priv->gart_info.aper_size,
54 NV_MEM_ACCESS_RO, 77 NV_MEM_ACCESS_RO,
55 NV_MEM_TARGET_GART, &pushbuf); 78 NV_MEM_TARGET_GART,
56 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; 79 &chan->pushbuf);
57 } else 80 } else
58 if (dev_priv->card_type != NV_04) { 81 if (dev_priv->card_type != NV_04) {
59 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, 82 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
60 dev_priv->fb_available_size, 83 dev_priv->fb_available_size,
61 NV_MEM_ACCESS_RO, 84 NV_MEM_ACCESS_RO,
62 NV_MEM_TARGET_VRAM, &pushbuf); 85 NV_MEM_TARGET_VRAM,
63 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; 86 &chan->pushbuf);
64 } else { 87 } else {
65 /* NV04 cmdbuf hack, from original ddx.. not sure of it's 88 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
66 * exact reason for existing :) PCI access to cmdbuf in 89 * exact reason for existing :) PCI access to cmdbuf in
@@ -70,47 +93,22 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
70 pci_resource_start(dev->pdev, 1), 93 pci_resource_start(dev->pdev, 1),
71 dev_priv->fb_available_size, 94 dev_priv->fb_available_size,
72 NV_MEM_ACCESS_RO, 95 NV_MEM_ACCESS_RO,
73 NV_MEM_TARGET_PCI, &pushbuf); 96 NV_MEM_TARGET_PCI,
74 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; 97 &chan->pushbuf);
75 } 98 }
76 99
77 nouveau_gpuobj_ref(pushbuf, &chan->pushbuf); 100out:
78 nouveau_gpuobj_ref(NULL, &pushbuf);
79 return ret;
80}
81
82static struct nouveau_bo *
83nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
84{
85 struct nouveau_bo *pushbuf = NULL;
86 int location, ret;
87
88 if (nouveau_vram_pushbuf)
89 location = TTM_PL_FLAG_VRAM;
90 else
91 location = TTM_PL_FLAG_TT;
92
93 ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, &pushbuf);
94 if (ret) {
95 NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
96 return NULL;
97 }
98
99 ret = nouveau_bo_pin(pushbuf, location);
100 if (ret) {
101 NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret);
102 nouveau_bo_ref(NULL, &pushbuf);
103 return NULL;
104 }
105
106 ret = nouveau_bo_map(pushbuf);
107 if (ret) { 101 if (ret) {
108 nouveau_bo_unpin(pushbuf); 102 NV_ERROR(dev, "error initialising pushbuf: %d\n", ret);
109 nouveau_bo_ref(NULL, &pushbuf); 103 nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
110 return NULL; 104 nouveau_gpuobj_ref(NULL, &chan->pushbuf);
105 if (chan->pushbuf_bo) {
106 nouveau_bo_unmap(chan->pushbuf_bo);
107 nouveau_bo_ref(NULL, &chan->pushbuf_bo);
108 }
111 } 109 }
112 110
113 return pushbuf; 111 return 0;
114} 112}
115 113
116/* allocates and initializes a fifo for user space consumption */ 114/* allocates and initializes a fifo for user space consumption */
@@ -121,6 +119,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
121{ 119{
122 struct drm_nouveau_private *dev_priv = dev->dev_private; 120 struct drm_nouveau_private *dev_priv = dev->dev_private;
123 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 121 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
122 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
124 struct nouveau_channel *chan; 123 struct nouveau_channel *chan;
125 unsigned long flags; 124 unsigned long flags;
126 int ret; 125 int ret;
@@ -160,19 +159,14 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
160 INIT_LIST_HEAD(&chan->nvsw.flip); 159 INIT_LIST_HEAD(&chan->nvsw.flip);
161 INIT_LIST_HEAD(&chan->fence.pending); 160 INIT_LIST_HEAD(&chan->fence.pending);
162 161
163 /* Allocate DMA push buffer */ 162 /* setup channel's memory and vm */
164 chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev); 163 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
165 if (!chan->pushbuf_bo) { 164 if (ret) {
166 ret = -ENOMEM; 165 NV_ERROR(dev, "gpuobj %d\n", ret);
167 NV_ERROR(dev, "pushbuf %d\n", ret);
168 nouveau_channel_put(&chan); 166 nouveau_channel_put(&chan);
169 return ret; 167 return ret;
170 } 168 }
171 169
172 nouveau_dma_pre_init(chan);
173 chan->user_put = 0x40;
174 chan->user_get = 0x44;
175
176 /* Allocate space for per-channel fixed notifier memory */ 170 /* Allocate space for per-channel fixed notifier memory */
177 ret = nouveau_notifier_init_channel(chan); 171 ret = nouveau_notifier_init_channel(chan);
178 if (ret) { 172 if (ret) {
@@ -181,21 +175,17 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
181 return ret; 175 return ret;
182 } 176 }
183 177
184 /* Setup channel's default objects */ 178 /* Allocate DMA push buffer */
185 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); 179 ret = nouveau_channel_pushbuf_init(chan);
186 if (ret) { 180 if (ret) {
187 NV_ERROR(dev, "gpuobj %d\n", ret); 181 NV_ERROR(dev, "pushbuf %d\n", ret);
188 nouveau_channel_put(&chan); 182 nouveau_channel_put(&chan);
189 return ret; 183 return ret;
190 } 184 }
191 185
192 /* Create a dma object for the push buffer */ 186 nouveau_dma_pre_init(chan);
193 ret = nouveau_channel_pushbuf_ctxdma_init(chan); 187 chan->user_put = 0x40;
194 if (ret) { 188 chan->user_get = 0x44;
195 NV_ERROR(dev, "pbctxdma %d\n", ret);
196 nouveau_channel_put(&chan);
197 return ret;
198 }
199 189
200 /* disable the fifo caches */ 190 /* disable the fifo caches */
201 pfifo->reassign(dev, false); 191 pfifo->reassign(dev, false);
@@ -220,6 +210,11 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
220 nouveau_debugfs_channel_init(chan); 210 nouveau_debugfs_channel_init(chan);
221 211
222 NV_DEBUG(dev, "channel %d initialised\n", chan->id); 212 NV_DEBUG(dev, "channel %d initialised\n", chan->id);
213 if (fpriv) {
214 spin_lock(&fpriv->lock);
215 list_add(&chan->list, &fpriv->channels);
216 spin_unlock(&fpriv->lock);
217 }
223 *chan_ret = chan; 218 *chan_ret = chan;
224 return 0; 219 return 0;
225} 220}
@@ -236,29 +231,23 @@ nouveau_channel_get_unlocked(struct nouveau_channel *ref)
236} 231}
237 232
238struct nouveau_channel * 233struct nouveau_channel *
239nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id) 234nouveau_channel_get(struct drm_file *file_priv, int id)
240{ 235{
241 struct drm_nouveau_private *dev_priv = dev->dev_private; 236 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
242 struct nouveau_channel *chan; 237 struct nouveau_channel *chan;
243 unsigned long flags;
244
245 if (unlikely(id < 0 || id >= NOUVEAU_MAX_CHANNEL_NR))
246 return ERR_PTR(-EINVAL);
247
248 spin_lock_irqsave(&dev_priv->channels.lock, flags);
249 chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]);
250 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
251
252 if (unlikely(!chan))
253 return ERR_PTR(-EINVAL);
254 238
255 if (unlikely(file_priv && chan->file_priv != file_priv)) { 239 spin_lock(&fpriv->lock);
256 nouveau_channel_put_unlocked(&chan); 240 list_for_each_entry(chan, &fpriv->channels, list) {
257 return ERR_PTR(-EINVAL); 241 if (chan->id == id) {
242 chan = nouveau_channel_get_unlocked(chan);
243 spin_unlock(&fpriv->lock);
244 mutex_lock(&chan->mutex);
245 return chan;
246 }
258 } 247 }
248 spin_unlock(&fpriv->lock);
259 249
260 mutex_lock(&chan->mutex); 250 return ERR_PTR(-EINVAL);
261 return chan;
262} 251}
263 252
264void 253void
@@ -312,12 +301,14 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
312 /* destroy any resources the channel owned */ 301 /* destroy any resources the channel owned */
313 nouveau_gpuobj_ref(NULL, &chan->pushbuf); 302 nouveau_gpuobj_ref(NULL, &chan->pushbuf);
314 if (chan->pushbuf_bo) { 303 if (chan->pushbuf_bo) {
304 nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
315 nouveau_bo_unmap(chan->pushbuf_bo); 305 nouveau_bo_unmap(chan->pushbuf_bo);
316 nouveau_bo_unpin(chan->pushbuf_bo); 306 nouveau_bo_unpin(chan->pushbuf_bo);
317 nouveau_bo_ref(NULL, &chan->pushbuf_bo); 307 nouveau_bo_ref(NULL, &chan->pushbuf_bo);
318 } 308 }
319 nouveau_gpuobj_channel_takedown(chan); 309 nouveau_ramht_ref(NULL, &chan->ramht, chan);
320 nouveau_notifier_takedown_channel(chan); 310 nouveau_notifier_takedown_channel(chan);
311 nouveau_gpuobj_channel_takedown(chan);
321 312
322 nouveau_channel_ref(NULL, pchan); 313 nouveau_channel_ref(NULL, pchan);
323} 314}
@@ -383,10 +374,11 @@ nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
383 374
384 NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); 375 NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
385 for (i = 0; i < engine->fifo.channels; i++) { 376 for (i = 0; i < engine->fifo.channels; i++) {
386 chan = nouveau_channel_get(dev, file_priv, i); 377 chan = nouveau_channel_get(file_priv, i);
387 if (IS_ERR(chan)) 378 if (IS_ERR(chan))
388 continue; 379 continue;
389 380
381 list_del(&chan->list);
390 atomic_dec(&chan->users); 382 atomic_dec(&chan->users);
391 nouveau_channel_put(&chan); 383 nouveau_channel_put(&chan);
392 } 384 }
@@ -459,10 +451,11 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
459 struct drm_nouveau_channel_free *req = data; 451 struct drm_nouveau_channel_free *req = data;
460 struct nouveau_channel *chan; 452 struct nouveau_channel *chan;
461 453
462 chan = nouveau_channel_get(dev, file_priv, req->channel); 454 chan = nouveau_channel_get(file_priv, req->channel);
463 if (IS_ERR(chan)) 455 if (IS_ERR(chan))
464 return PTR_ERR(chan); 456 return PTR_ERR(chan);
465 457
458 list_del(&chan->list);
466 atomic_dec(&chan->users); 459 atomic_dec(&chan->users);
467 nouveau_channel_put(&chan); 460 nouveau_channel_put(&chan);
468 return 0; 461 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 1595d0b6e815..939d4df07777 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -40,7 +40,7 @@
40static void nouveau_connector_hotplug(void *, int); 40static void nouveau_connector_hotplug(void *, int);
41 41
42static struct nouveau_encoder * 42static struct nouveau_encoder *
43find_encoder_by_type(struct drm_connector *connector, int type) 43find_encoder(struct drm_connector *connector, int type)
44{ 44{
45 struct drm_device *dev = connector->dev; 45 struct drm_device *dev = connector->dev;
46 struct nouveau_encoder *nv_encoder; 46 struct nouveau_encoder *nv_encoder;
@@ -170,8 +170,8 @@ nouveau_connector_of_detect(struct drm_connector *connector)
170 struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev); 170 struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev);
171 171
172 if (!dn || 172 if (!dn ||
173 !((nv_encoder = find_encoder_by_type(connector, OUTPUT_TMDS)) || 173 !((nv_encoder = find_encoder(connector, OUTPUT_TMDS)) ||
174 (nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG)))) 174 (nv_encoder = find_encoder(connector, OUTPUT_ANALOG))))
175 return NULL; 175 return NULL;
176 176
177 for_each_child_of_node(dn, cn) { 177 for_each_child_of_node(dn, cn) {
@@ -233,6 +233,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
233 struct drm_device *dev = connector->dev; 233 struct drm_device *dev = connector->dev;
234 struct nouveau_connector *nv_connector = nouveau_connector(connector); 234 struct nouveau_connector *nv_connector = nouveau_connector(connector);
235 struct nouveau_encoder *nv_encoder = NULL; 235 struct nouveau_encoder *nv_encoder = NULL;
236 struct nouveau_encoder *nv_partner;
236 struct nouveau_i2c_chan *i2c; 237 struct nouveau_i2c_chan *i2c;
237 int type; 238 int type;
238 239
@@ -266,19 +267,22 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
266 * same i2c channel so the value returned from ddc_detect 267 * same i2c channel so the value returned from ddc_detect
267 * isn't necessarily correct. 268 * isn't necessarily correct.
268 */ 269 */
269 if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) { 270 nv_partner = NULL;
271 if (nv_encoder->dcb->type == OUTPUT_TMDS)
272 nv_partner = find_encoder(connector, OUTPUT_ANALOG);
273 if (nv_encoder->dcb->type == OUTPUT_ANALOG)
274 nv_partner = find_encoder(connector, OUTPUT_TMDS);
275
276 if (nv_partner && ((nv_encoder->dcb->type == OUTPUT_ANALOG &&
277 nv_partner->dcb->type == OUTPUT_TMDS) ||
278 (nv_encoder->dcb->type == OUTPUT_TMDS &&
279 nv_partner->dcb->type == OUTPUT_ANALOG))) {
270 if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL) 280 if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
271 type = OUTPUT_TMDS; 281 type = OUTPUT_TMDS;
272 else 282 else
273 type = OUTPUT_ANALOG; 283 type = OUTPUT_ANALOG;
274 284
275 nv_encoder = find_encoder_by_type(connector, type); 285 nv_encoder = find_encoder(connector, type);
276 if (!nv_encoder) {
277 NV_ERROR(dev, "Detected %d encoder on %s, "
278 "but no object!\n", type,
279 drm_get_connector_name(connector));
280 return connector_status_disconnected;
281 }
282 } 286 }
283 287
284 nouveau_connector_set_encoder(connector, nv_encoder); 288 nouveau_connector_set_encoder(connector, nv_encoder);
@@ -292,9 +296,9 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
292 } 296 }
293 297
294detect_analog: 298detect_analog:
295 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); 299 nv_encoder = find_encoder(connector, OUTPUT_ANALOG);
296 if (!nv_encoder && !nouveau_tv_disable) 300 if (!nv_encoder && !nouveau_tv_disable)
297 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); 301 nv_encoder = find_encoder(connector, OUTPUT_TV);
298 if (nv_encoder && force) { 302 if (nv_encoder && force) {
299 struct drm_encoder *encoder = to_drm_encoder(nv_encoder); 303 struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
300 struct drm_encoder_helper_funcs *helper = 304 struct drm_encoder_helper_funcs *helper =
@@ -327,7 +331,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
327 nv_connector->edid = NULL; 331 nv_connector->edid = NULL;
328 } 332 }
329 333
330 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); 334 nv_encoder = find_encoder(connector, OUTPUT_LVDS);
331 if (!nv_encoder) 335 if (!nv_encoder)
332 return connector_status_disconnected; 336 return connector_status_disconnected;
333 337
@@ -405,7 +409,7 @@ nouveau_connector_force(struct drm_connector *connector)
405 } else 409 } else
406 type = OUTPUT_ANY; 410 type = OUTPUT_ANY;
407 411
408 nv_encoder = find_encoder_by_type(connector, type); 412 nv_encoder = find_encoder(connector, type);
409 if (!nv_encoder) { 413 if (!nv_encoder) {
410 NV_ERROR(connector->dev, "can't find encoder to force %s on!\n", 414 NV_ERROR(connector->dev, "can't find encoder to force %s on!\n",
411 drm_get_connector_name(connector)); 415 drm_get_connector_name(connector));
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 568caedd7216..00bc6eaad558 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -167,8 +167,13 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
167 int delta, int length) 167 int delta, int length)
168{ 168{
169 struct nouveau_bo *pb = chan->pushbuf_bo; 169 struct nouveau_bo *pb = chan->pushbuf_bo;
170 uint64_t offset = bo->bo.offset + delta; 170 struct nouveau_vma *vma;
171 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; 171 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
172 u64 offset;
173
174 vma = nouveau_bo_vma_find(bo, chan->vm);
175 BUG_ON(!vma);
176 offset = vma->offset + delta;
172 177
173 BUG_ON(chan->dma.ib_free < 1); 178 BUG_ON(chan->dma.ib_free < 1);
174 nouveau_bo_wr32(pb, ip++, lower_32_bits(offset)); 179 nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 02c6f37d8bd7..b30ddd8d2e2a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -73,7 +73,7 @@ int nouveau_ignorelid = 0;
73module_param_named(ignorelid, nouveau_ignorelid, int, 0400); 73module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
74 74
75MODULE_PARM_DESC(noaccel, "Disable all acceleration"); 75MODULE_PARM_DESC(noaccel, "Disable all acceleration");
76int nouveau_noaccel = 0; 76int nouveau_noaccel = -1;
77module_param_named(noaccel, nouveau_noaccel, int, 0400); 77module_param_named(noaccel, nouveau_noaccel, int, 0400);
78 78
79MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); 79MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
@@ -119,6 +119,10 @@ MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n");
119int nouveau_msi; 119int nouveau_msi;
120module_param_named(msi, nouveau_msi, int, 0400); 120module_param_named(msi, nouveau_msi, int, 0400);
121 121
122MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)\n");
123int nouveau_ctxfw;
124module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
125
122int nouveau_fbpercrtc; 126int nouveau_fbpercrtc;
123#if 0 127#if 0
124module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400); 128module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@ -210,10 +214,13 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
210 pfifo->unload_context(dev); 214 pfifo->unload_context(dev);
211 215
212 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) { 216 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
213 if (dev_priv->eng[e]) { 217 if (!dev_priv->eng[e])
214 ret = dev_priv->eng[e]->fini(dev, e); 218 continue;
215 if (ret) 219
216 goto out_abort; 220 ret = dev_priv->eng[e]->fini(dev, e, true);
221 if (ret) {
222 NV_ERROR(dev, "... engine %d failed: %d\n", i, ret);
223 goto out_abort;
217 } 224 }
218 } 225 }
219 226
@@ -354,7 +361,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
354 361
355 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 362 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
356 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 363 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
357 u32 offset = nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT; 364 u32 offset = nv_crtc->cursor.nvbo->bo.offset;
358 365
359 nv_crtc->cursor.set_offset(nv_crtc, offset); 366 nv_crtc->cursor.set_offset(nv_crtc, offset);
360 nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, 367 nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
@@ -389,7 +396,9 @@ static struct drm_driver driver = {
389 .firstopen = nouveau_firstopen, 396 .firstopen = nouveau_firstopen,
390 .lastclose = nouveau_lastclose, 397 .lastclose = nouveau_lastclose,
391 .unload = nouveau_unload, 398 .unload = nouveau_unload,
399 .open = nouveau_open,
392 .preclose = nouveau_preclose, 400 .preclose = nouveau_preclose,
401 .postclose = nouveau_postclose,
393#if defined(CONFIG_DRM_NOUVEAU_DEBUG) 402#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
394 .debugfs_init = nouveau_debugfs_init, 403 .debugfs_init = nouveau_debugfs_init,
395 .debugfs_cleanup = nouveau_debugfs_takedown, 404 .debugfs_cleanup = nouveau_debugfs_takedown,
@@ -420,6 +429,8 @@ static struct drm_driver driver = {
420 429
421 .gem_init_object = nouveau_gem_object_new, 430 .gem_init_object = nouveau_gem_object_new,
422 .gem_free_object = nouveau_gem_object_del, 431 .gem_free_object = nouveau_gem_object_del,
432 .gem_open_object = nouveau_gem_object_open,
433 .gem_close_object = nouveau_gem_object_close,
423 434
424 .name = DRIVER_NAME, 435 .name = DRIVER_NAME,
425 .desc = DRIVER_DESC, 436 .desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 9c56331941e2..d7d51deb34b6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -46,9 +46,17 @@
46#include "ttm/ttm_module.h" 46#include "ttm/ttm_module.h"
47 47
48struct nouveau_fpriv { 48struct nouveau_fpriv {
49 struct ttm_object_file *tfile; 49 spinlock_t lock;
50 struct list_head channels;
51 struct nouveau_vm *vm;
50}; 52};
51 53
54static inline struct nouveau_fpriv *
55nouveau_fpriv(struct drm_file *file_priv)
56{
57 return file_priv ? file_priv->driver_priv : NULL;
58}
59
52#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) 60#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
53 61
54#include "nouveau_drm.h" 62#include "nouveau_drm.h"
@@ -69,7 +77,7 @@ struct nouveau_mem {
69 struct drm_device *dev; 77 struct drm_device *dev;
70 78
71 struct nouveau_vma bar_vma; 79 struct nouveau_vma bar_vma;
72 struct nouveau_vma tmp_vma; 80 struct nouveau_vma vma[2];
73 u8 page_shift; 81 u8 page_shift;
74 82
75 struct drm_mm_node *tag; 83 struct drm_mm_node *tag;
@@ -107,7 +115,8 @@ struct nouveau_bo {
107 115
108 struct nouveau_channel *channel; 116 struct nouveau_channel *channel;
109 117
110 struct nouveau_vma vma; 118 struct list_head vma_list;
119 unsigned page_shift;
111 120
112 uint32_t tile_mode; 121 uint32_t tile_mode;
113 uint32_t tile_flags; 122 uint32_t tile_flags;
@@ -176,9 +185,10 @@ struct nouveau_gpuobj {
176 uint32_t flags; 185 uint32_t flags;
177 186
178 u32 size; 187 u32 size;
179 u32 pinst; 188 u32 pinst; /* PRAMIN BAR offset */
180 u32 cinst; 189 u32 cinst; /* Channel offset */
181 u64 vinst; 190 u64 vinst; /* VRAM address */
191 u64 linst; /* VM address */
182 192
183 uint32_t engine; 193 uint32_t engine;
184 uint32_t class; 194 uint32_t class;
@@ -201,6 +211,7 @@ enum nouveau_channel_mutex_class {
201 211
202struct nouveau_channel { 212struct nouveau_channel {
203 struct drm_device *dev; 213 struct drm_device *dev;
214 struct list_head list;
204 int id; 215 int id;
205 216
206 /* references to the channel data structure */ 217 /* references to the channel data structure */
@@ -228,15 +239,18 @@ struct nouveau_channel {
228 uint32_t sequence; 239 uint32_t sequence;
229 uint32_t sequence_ack; 240 uint32_t sequence_ack;
230 atomic_t last_sequence_irq; 241 atomic_t last_sequence_irq;
242 struct nouveau_vma vma;
231 } fence; 243 } fence;
232 244
233 /* DMA push buffer */ 245 /* DMA push buffer */
234 struct nouveau_gpuobj *pushbuf; 246 struct nouveau_gpuobj *pushbuf;
235 struct nouveau_bo *pushbuf_bo; 247 struct nouveau_bo *pushbuf_bo;
248 struct nouveau_vma pushbuf_vma;
236 uint32_t pushbuf_base; 249 uint32_t pushbuf_base;
237 250
238 /* Notifier memory */ 251 /* Notifier memory */
239 struct nouveau_bo *notifier_bo; 252 struct nouveau_bo *notifier_bo;
253 struct nouveau_vma notifier_vma;
240 struct drm_mm notifier_heap; 254 struct drm_mm notifier_heap;
241 255
242 /* PFIFO context */ 256 /* PFIFO context */
@@ -278,6 +292,7 @@ struct nouveau_channel {
278 292
279 uint32_t sw_subchannel[8]; 293 uint32_t sw_subchannel[8];
280 294
295 struct nouveau_vma dispc_vma[2];
281 struct { 296 struct {
282 struct nouveau_gpuobj *vblsem; 297 struct nouveau_gpuobj *vblsem;
283 uint32_t vblsem_head; 298 uint32_t vblsem_head;
@@ -297,7 +312,7 @@ struct nouveau_channel {
297struct nouveau_exec_engine { 312struct nouveau_exec_engine {
298 void (*destroy)(struct drm_device *, int engine); 313 void (*destroy)(struct drm_device *, int engine);
299 int (*init)(struct drm_device *, int engine); 314 int (*init)(struct drm_device *, int engine);
300 int (*fini)(struct drm_device *, int engine); 315 int (*fini)(struct drm_device *, int engine, bool suspend);
301 int (*context_new)(struct nouveau_channel *, int engine); 316 int (*context_new)(struct nouveau_channel *, int engine);
302 void (*context_del)(struct nouveau_channel *, int engine); 317 void (*context_del)(struct nouveau_channel *, int engine);
303 int (*object_new)(struct nouveau_channel *, int engine, 318 int (*object_new)(struct nouveau_channel *, int engine,
@@ -314,7 +329,8 @@ struct nouveau_instmem_engine {
314 int (*suspend)(struct drm_device *dev); 329 int (*suspend)(struct drm_device *dev);
315 void (*resume)(struct drm_device *dev); 330 void (*resume)(struct drm_device *dev);
316 331
317 int (*get)(struct nouveau_gpuobj *, u32 size, u32 align); 332 int (*get)(struct nouveau_gpuobj *, struct nouveau_channel *,
333 u32 size, u32 align);
318 void (*put)(struct nouveau_gpuobj *); 334 void (*put)(struct nouveau_gpuobj *);
319 int (*map)(struct nouveau_gpuobj *); 335 int (*map)(struct nouveau_gpuobj *);
320 void (*unmap)(struct nouveau_gpuobj *); 336 void (*unmap)(struct nouveau_gpuobj *);
@@ -445,9 +461,9 @@ struct nouveau_pm_level {
445struct nouveau_pm_temp_sensor_constants { 461struct nouveau_pm_temp_sensor_constants {
446 u16 offset_constant; 462 u16 offset_constant;
447 s16 offset_mult; 463 s16 offset_mult;
448 u16 offset_div; 464 s16 offset_div;
449 u16 slope_mult; 465 s16 slope_mult;
450 u16 slope_div; 466 s16 slope_div;
451}; 467};
452 468
453struct nouveau_pm_threshold_temp { 469struct nouveau_pm_threshold_temp {
@@ -488,7 +504,10 @@ struct nouveau_pm_engine {
488}; 504};
489 505
490struct nouveau_vram_engine { 506struct nouveau_vram_engine {
507 struct nouveau_mm *mm;
508
491 int (*init)(struct drm_device *); 509 int (*init)(struct drm_device *);
510 void (*takedown)(struct drm_device *dev);
492 int (*get)(struct drm_device *, u64, u32 align, u32 size_nc, 511 int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
493 u32 type, struct nouveau_mem **); 512 u32 type, struct nouveau_mem **);
494 void (*put)(struct drm_device *, struct nouveau_mem **); 513 void (*put)(struct drm_device *, struct nouveau_mem **);
@@ -608,6 +627,7 @@ enum nouveau_card_type {
608 627
609struct drm_nouveau_private { 628struct drm_nouveau_private {
610 struct drm_device *dev; 629 struct drm_device *dev;
630 bool noaccel;
611 631
612 /* the card type, takes NV_* as values */ 632 /* the card type, takes NV_* as values */
613 enum nouveau_card_type card_type; 633 enum nouveau_card_type card_type;
@@ -700,7 +720,6 @@ struct drm_nouveau_private {
700 /* VRAM/fb configuration */ 720 /* VRAM/fb configuration */
701 uint64_t vram_size; 721 uint64_t vram_size;
702 uint64_t vram_sys_base; 722 uint64_t vram_sys_base;
703 u32 vram_rblock_size;
704 723
705 uint64_t fb_phys; 724 uint64_t fb_phys;
706 uint64_t fb_available_size; 725 uint64_t fb_available_size;
@@ -784,12 +803,15 @@ extern int nouveau_override_conntype;
784extern char *nouveau_perflvl; 803extern char *nouveau_perflvl;
785extern int nouveau_perflvl_wr; 804extern int nouveau_perflvl_wr;
786extern int nouveau_msi; 805extern int nouveau_msi;
806extern int nouveau_ctxfw;
787 807
788extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); 808extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
789extern int nouveau_pci_resume(struct pci_dev *pdev); 809extern int nouveau_pci_resume(struct pci_dev *pdev);
790 810
791/* nouveau_state.c */ 811/* nouveau_state.c */
812extern int nouveau_open(struct drm_device *, struct drm_file *);
792extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); 813extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
814extern void nouveau_postclose(struct drm_device *, struct drm_file *);
793extern int nouveau_load(struct drm_device *, unsigned long flags); 815extern int nouveau_load(struct drm_device *, unsigned long flags);
794extern int nouveau_firstopen(struct drm_device *); 816extern int nouveau_firstopen(struct drm_device *);
795extern void nouveau_lastclose(struct drm_device *); 817extern void nouveau_lastclose(struct drm_device *);
@@ -847,7 +869,7 @@ extern int nouveau_channel_alloc(struct drm_device *dev,
847extern struct nouveau_channel * 869extern struct nouveau_channel *
848nouveau_channel_get_unlocked(struct nouveau_channel *); 870nouveau_channel_get_unlocked(struct nouveau_channel *);
849extern struct nouveau_channel * 871extern struct nouveau_channel *
850nouveau_channel_get(struct drm_device *, struct drm_file *, int id); 872nouveau_channel_get(struct drm_file *, int id);
851extern void nouveau_channel_put_unlocked(struct nouveau_channel **); 873extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
852extern void nouveau_channel_put(struct nouveau_channel **); 874extern void nouveau_channel_put(struct nouveau_channel **);
853extern void nouveau_channel_ref(struct nouveau_channel *chan, 875extern void nouveau_channel_ref(struct nouveau_channel *chan,
@@ -1120,7 +1142,6 @@ extern int nvc0_fifo_unload_context(struct drm_device *);
1120 1142
1121/* nv04_graph.c */ 1143/* nv04_graph.c */
1122extern int nv04_graph_create(struct drm_device *); 1144extern int nv04_graph_create(struct drm_device *);
1123extern void nv04_graph_fifo_access(struct drm_device *, bool);
1124extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16); 1145extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16);
1125extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan, 1146extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
1126 u32 class, u32 mthd, u32 data); 1147 u32 class, u32 mthd, u32 data);
@@ -1169,7 +1190,8 @@ extern int nv04_instmem_init(struct drm_device *);
1169extern void nv04_instmem_takedown(struct drm_device *); 1190extern void nv04_instmem_takedown(struct drm_device *);
1170extern int nv04_instmem_suspend(struct drm_device *); 1191extern int nv04_instmem_suspend(struct drm_device *);
1171extern void nv04_instmem_resume(struct drm_device *); 1192extern void nv04_instmem_resume(struct drm_device *);
1172extern int nv04_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); 1193extern int nv04_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *,
1194 u32 size, u32 align);
1173extern void nv04_instmem_put(struct nouveau_gpuobj *); 1195extern void nv04_instmem_put(struct nouveau_gpuobj *);
1174extern int nv04_instmem_map(struct nouveau_gpuobj *); 1196extern int nv04_instmem_map(struct nouveau_gpuobj *);
1175extern void nv04_instmem_unmap(struct nouveau_gpuobj *); 1197extern void nv04_instmem_unmap(struct nouveau_gpuobj *);
@@ -1180,7 +1202,8 @@ extern int nv50_instmem_init(struct drm_device *);
1180extern void nv50_instmem_takedown(struct drm_device *); 1202extern void nv50_instmem_takedown(struct drm_device *);
1181extern int nv50_instmem_suspend(struct drm_device *); 1203extern int nv50_instmem_suspend(struct drm_device *);
1182extern void nv50_instmem_resume(struct drm_device *); 1204extern void nv50_instmem_resume(struct drm_device *);
1183extern int nv50_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); 1205extern int nv50_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *,
1206 u32 size, u32 align);
1184extern void nv50_instmem_put(struct nouveau_gpuobj *); 1207extern void nv50_instmem_put(struct nouveau_gpuobj *);
1185extern int nv50_instmem_map(struct nouveau_gpuobj *); 1208extern int nv50_instmem_map(struct nouveau_gpuobj *);
1186extern void nv50_instmem_unmap(struct nouveau_gpuobj *); 1209extern void nv50_instmem_unmap(struct nouveau_gpuobj *);
@@ -1247,10 +1270,9 @@ extern int nv04_crtc_create(struct drm_device *, int index);
1247 1270
1248/* nouveau_bo.c */ 1271/* nouveau_bo.c */
1249extern struct ttm_bo_driver nouveau_bo_driver; 1272extern struct ttm_bo_driver nouveau_bo_driver;
1250extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *, 1273extern int nouveau_bo_new(struct drm_device *, int size, int align,
1251 int size, int align, uint32_t flags, 1274 uint32_t flags, uint32_t tile_mode,
1252 uint32_t tile_mode, uint32_t tile_flags, 1275 uint32_t tile_flags, struct nouveau_bo **);
1253 struct nouveau_bo **);
1254extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags); 1276extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
1255extern int nouveau_bo_unpin(struct nouveau_bo *); 1277extern int nouveau_bo_unpin(struct nouveau_bo *);
1256extern int nouveau_bo_map(struct nouveau_bo *); 1278extern int nouveau_bo_map(struct nouveau_bo *);
@@ -1265,6 +1287,12 @@ extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
1265extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, 1287extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
1266 bool no_wait_reserve, bool no_wait_gpu); 1288 bool no_wait_reserve, bool no_wait_gpu);
1267 1289
1290extern struct nouveau_vma *
1291nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
1292extern int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
1293 struct nouveau_vma *);
1294extern void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
1295
1268/* nouveau_fence.c */ 1296/* nouveau_fence.c */
1269struct nouveau_fence; 1297struct nouveau_fence;
1270extern int nouveau_fence_init(struct drm_device *); 1298extern int nouveau_fence_init(struct drm_device *);
@@ -1310,12 +1338,14 @@ static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
1310} 1338}
1311 1339
1312/* nouveau_gem.c */ 1340/* nouveau_gem.c */
1313extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *, 1341extern int nouveau_gem_new(struct drm_device *, int size, int align,
1314 int size, int align, uint32_t domain, 1342 uint32_t domain, uint32_t tile_mode,
1315 uint32_t tile_mode, uint32_t tile_flags, 1343 uint32_t tile_flags, struct nouveau_bo **);
1316 struct nouveau_bo **);
1317extern int nouveau_gem_object_new(struct drm_gem_object *); 1344extern int nouveau_gem_object_new(struct drm_gem_object *);
1318extern void nouveau_gem_object_del(struct drm_gem_object *); 1345extern void nouveau_gem_object_del(struct drm_gem_object *);
1346extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *);
1347extern void nouveau_gem_object_close(struct drm_gem_object *,
1348 struct drm_file *);
1319extern int nouveau_gem_ioctl_new(struct drm_device *, void *, 1349extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
1320 struct drm_file *); 1350 struct drm_file *);
1321extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *, 1351extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
index a3a88ad00f86..95c843e684bb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -30,6 +30,7 @@
30struct nouveau_framebuffer { 30struct nouveau_framebuffer {
31 struct drm_framebuffer base; 31 struct drm_framebuffer base;
32 struct nouveau_bo *nvbo; 32 struct nouveau_bo *nvbo;
33 struct nouveau_vma vma;
33 u32 r_dma; 34 u32 r_dma;
34 u32 r_format; 35 u32 r_format;
35 u32 r_pitch; 36 u32 r_pitch;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 39aee6d4daf8..14a8627efe4d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -279,6 +279,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
279 struct fb_info *info; 279 struct fb_info *info;
280 struct drm_framebuffer *fb; 280 struct drm_framebuffer *fb;
281 struct nouveau_framebuffer *nouveau_fb; 281 struct nouveau_framebuffer *nouveau_fb;
282 struct nouveau_channel *chan;
282 struct nouveau_bo *nvbo; 283 struct nouveau_bo *nvbo;
283 struct drm_mode_fb_cmd mode_cmd; 284 struct drm_mode_fb_cmd mode_cmd;
284 struct pci_dev *pdev = dev->pdev; 285 struct pci_dev *pdev = dev->pdev;
@@ -296,8 +297,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
296 size = mode_cmd.pitch * mode_cmd.height; 297 size = mode_cmd.pitch * mode_cmd.height;
297 size = roundup(size, PAGE_SIZE); 298 size = roundup(size, PAGE_SIZE);
298 299
299 ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, 300 ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
300 NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo); 301 0, 0x0000, &nvbo);
301 if (ret) { 302 if (ret) {
302 NV_ERROR(dev, "failed to allocate framebuffer\n"); 303 NV_ERROR(dev, "failed to allocate framebuffer\n");
303 goto out; 304 goto out;
@@ -318,6 +319,15 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
318 goto out; 319 goto out;
319 } 320 }
320 321
322 chan = nouveau_nofbaccel ? NULL : dev_priv->channel;
323 if (chan && dev_priv->card_type >= NV_50) {
324 ret = nouveau_bo_vma_add(nvbo, chan->vm, &nfbdev->nouveau_fb.vma);
325 if (ret) {
326 NV_ERROR(dev, "failed to map fb into chan: %d\n", ret);
327 chan = NULL;
328 }
329 }
330
321 mutex_lock(&dev->struct_mutex); 331 mutex_lock(&dev->struct_mutex);
322 332
323 info = framebuffer_alloc(0, device); 333 info = framebuffer_alloc(0, device);
@@ -448,6 +458,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
448 458
449 if (nouveau_fb->nvbo) { 459 if (nouveau_fb->nvbo) {
450 nouveau_bo_unmap(nouveau_fb->nvbo); 460 nouveau_bo_unmap(nouveau_fb->nvbo);
461 nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
451 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); 462 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
452 nouveau_fb->nvbo = NULL; 463 nouveau_fb->nvbo = NULL;
453 } 464 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 7347075ca5b8..8d02d875376d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -336,6 +336,7 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
336{ 336{
337 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 337 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
338 struct nouveau_fence *fence = NULL; 338 struct nouveau_fence *fence = NULL;
339 u64 offset = chan->fence.vma.offset + sema->mem->start;
339 int ret; 340 int ret;
340 341
341 if (dev_priv->chipset < 0x84) { 342 if (dev_priv->chipset < 0x84) {
@@ -345,13 +346,10 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
345 346
346 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3); 347 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3);
347 OUT_RING (chan, NvSema); 348 OUT_RING (chan, NvSema);
348 OUT_RING (chan, sema->mem->start); 349 OUT_RING (chan, offset);
349 OUT_RING (chan, 1); 350 OUT_RING (chan, 1);
350 } else 351 } else
351 if (dev_priv->chipset < 0xc0) { 352 if (dev_priv->chipset < 0xc0) {
352 struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
353 u64 offset = vma->offset + sema->mem->start;
354
355 ret = RING_SPACE(chan, 7); 353 ret = RING_SPACE(chan, 7);
356 if (ret) 354 if (ret)
357 return ret; 355 return ret;
@@ -364,9 +362,6 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
364 OUT_RING (chan, 1); 362 OUT_RING (chan, 1);
365 OUT_RING (chan, 1); /* ACQUIRE_EQ */ 363 OUT_RING (chan, 1); /* ACQUIRE_EQ */
366 } else { 364 } else {
367 struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
368 u64 offset = vma->offset + sema->mem->start;
369
370 ret = RING_SPACE(chan, 5); 365 ret = RING_SPACE(chan, 5);
371 if (ret) 366 if (ret)
372 return ret; 367 return ret;
@@ -394,6 +389,7 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
394{ 389{
395 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 390 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
396 struct nouveau_fence *fence = NULL; 391 struct nouveau_fence *fence = NULL;
392 u64 offset = chan->fence.vma.offset + sema->mem->start;
397 int ret; 393 int ret;
398 394
399 if (dev_priv->chipset < 0x84) { 395 if (dev_priv->chipset < 0x84) {
@@ -403,14 +399,11 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
403 399
404 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2); 400 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2);
405 OUT_RING (chan, NvSema); 401 OUT_RING (chan, NvSema);
406 OUT_RING (chan, sema->mem->start); 402 OUT_RING (chan, offset);
407 BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1); 403 BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
408 OUT_RING (chan, 1); 404 OUT_RING (chan, 1);
409 } else 405 } else
410 if (dev_priv->chipset < 0xc0) { 406 if (dev_priv->chipset < 0xc0) {
411 struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
412 u64 offset = vma->offset + sema->mem->start;
413
414 ret = RING_SPACE(chan, 7); 407 ret = RING_SPACE(chan, 7);
415 if (ret) 408 if (ret)
416 return ret; 409 return ret;
@@ -423,9 +416,6 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
423 OUT_RING (chan, 1); 416 OUT_RING (chan, 1);
424 OUT_RING (chan, 2); /* RELEASE */ 417 OUT_RING (chan, 2); /* RELEASE */
425 } else { 418 } else {
426 struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
427 u64 offset = vma->offset + sema->mem->start;
428
429 ret = RING_SPACE(chan, 5); 419 ret = RING_SPACE(chan, 5);
430 if (ret) 420 if (ret)
431 return ret; 421 return ret;
@@ -540,6 +530,12 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
540 nouveau_gpuobj_ref(NULL, &obj); 530 nouveau_gpuobj_ref(NULL, &obj);
541 if (ret) 531 if (ret)
542 return ret; 532 return ret;
533 } else {
534 /* map fence bo into channel's vm */
535 ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
536 &chan->fence.vma);
537 if (ret)
538 return ret;
543 } 539 }
544 540
545 INIT_LIST_HEAD(&chan->fence.pending); 541 INIT_LIST_HEAD(&chan->fence.pending);
@@ -551,10 +547,10 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
551void 547void
552nouveau_fence_channel_fini(struct nouveau_channel *chan) 548nouveau_fence_channel_fini(struct nouveau_channel *chan)
553{ 549{
550 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
554 struct nouveau_fence *tmp, *fence; 551 struct nouveau_fence *tmp, *fence;
555 552
556 spin_lock(&chan->fence.lock); 553 spin_lock(&chan->fence.lock);
557
558 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { 554 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
559 fence->signalled = true; 555 fence->signalled = true;
560 list_del(&fence->entry); 556 list_del(&fence->entry);
@@ -564,8 +560,9 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan)
564 560
565 kref_put(&fence->refcount, nouveau_fence_del); 561 kref_put(&fence->refcount, nouveau_fence_del);
566 } 562 }
567
568 spin_unlock(&chan->fence.lock); 563 spin_unlock(&chan->fence.lock);
564
565 nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma);
569} 566}
570 567
571int 568int
@@ -577,7 +574,7 @@ nouveau_fence_init(struct drm_device *dev)
577 574
578 /* Create a shared VRAM heap for cross-channel sync. */ 575 /* Create a shared VRAM heap for cross-channel sync. */
579 if (USE_SEMA(dev)) { 576 if (USE_SEMA(dev)) {
580 ret = nouveau_bo_new(dev, NULL, size, 0, TTM_PL_FLAG_VRAM, 577 ret = nouveau_bo_new(dev, size, 0, TTM_PL_FLAG_VRAM,
581 0, 0, &dev_priv->fence.bo); 578 0, 0, &dev_priv->fence.bo);
582 if (ret) 579 if (ret)
583 return ret; 580 return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index b52e46018245..5f0bc57fdaab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -60,9 +60,71 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
60} 60}
61 61
62int 62int
63nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan, 63nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
64 int size, int align, uint32_t domain, uint32_t tile_mode, 64{
65 uint32_t tile_flags, struct nouveau_bo **pnvbo) 65 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
66 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
67 struct nouveau_vma *vma;
68 int ret;
69
70 if (!fpriv->vm)
71 return 0;
72
73 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
74 if (ret)
75 return ret;
76
77 vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
78 if (!vma) {
79 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
80 if (!vma) {
81 ret = -ENOMEM;
82 goto out;
83 }
84
85 ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma);
86 if (ret) {
87 kfree(vma);
88 goto out;
89 }
90 } else {
91 vma->refcount++;
92 }
93
94out:
95 ttm_bo_unreserve(&nvbo->bo);
96 return ret;
97}
98
99void
100nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
101{
102 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
103 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
104 struct nouveau_vma *vma;
105 int ret;
106
107 if (!fpriv->vm)
108 return;
109
110 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
111 if (ret)
112 return;
113
114 vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
115 if (vma) {
116 if (--vma->refcount == 0) {
117 nouveau_bo_vma_del(nvbo, vma);
118 kfree(vma);
119 }
120 }
121 ttm_bo_unreserve(&nvbo->bo);
122}
123
124int
125nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
126 uint32_t tile_mode, uint32_t tile_flags,
127 struct nouveau_bo **pnvbo)
66{ 128{
67 struct drm_nouveau_private *dev_priv = dev->dev_private; 129 struct drm_nouveau_private *dev_priv = dev->dev_private;
68 struct nouveau_bo *nvbo; 130 struct nouveau_bo *nvbo;
@@ -76,7 +138,7 @@ nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
76 if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU) 138 if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
77 flags |= TTM_PL_FLAG_SYSTEM; 139 flags |= TTM_PL_FLAG_SYSTEM;
78 140
79 ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode, 141 ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
80 tile_flags, pnvbo); 142 tile_flags, pnvbo);
81 if (ret) 143 if (ret)
82 return ret; 144 return ret;
@@ -103,17 +165,28 @@ nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
103} 165}
104 166
105static int 167static int
106nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) 168nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
169 struct drm_nouveau_gem_info *rep)
107{ 170{
171 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
108 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 172 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
173 struct nouveau_vma *vma;
109 174
110 if (nvbo->bo.mem.mem_type == TTM_PL_TT) 175 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
111 rep->domain = NOUVEAU_GEM_DOMAIN_GART; 176 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
112 else 177 else
113 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; 178 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
114 179
115 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
116 rep->offset = nvbo->bo.offset; 180 rep->offset = nvbo->bo.offset;
181 if (fpriv->vm) {
182 vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
183 if (!vma)
184 return -EINVAL;
185
186 rep->offset = vma->offset;
187 }
188
189 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
117 rep->map_handle = nvbo->bo.addr_space_offset; 190 rep->map_handle = nvbo->bo.addr_space_offset;
118 rep->tile_mode = nvbo->tile_mode; 191 rep->tile_mode = nvbo->tile_mode;
119 rep->tile_flags = nvbo->tile_flags; 192 rep->tile_flags = nvbo->tile_flags;
@@ -127,7 +200,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
127 struct drm_nouveau_private *dev_priv = dev->dev_private; 200 struct drm_nouveau_private *dev_priv = dev->dev_private;
128 struct drm_nouveau_gem_new *req = data; 201 struct drm_nouveau_gem_new *req = data;
129 struct nouveau_bo *nvbo = NULL; 202 struct nouveau_bo *nvbo = NULL;
130 struct nouveau_channel *chan = NULL;
131 int ret = 0; 203 int ret = 0;
132 204
133 if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) 205 if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
@@ -138,28 +210,21 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
138 return -EINVAL; 210 return -EINVAL;
139 } 211 }
140 212
141 if (req->channel_hint) { 213 ret = nouveau_gem_new(dev, req->info.size, req->align,
142 chan = nouveau_channel_get(dev, file_priv, req->channel_hint);
143 if (IS_ERR(chan))
144 return PTR_ERR(chan);
145 }
146
147 ret = nouveau_gem_new(dev, chan, req->info.size, req->align,
148 req->info.domain, req->info.tile_mode, 214 req->info.domain, req->info.tile_mode,
149 req->info.tile_flags, &nvbo); 215 req->info.tile_flags, &nvbo);
150 if (chan)
151 nouveau_channel_put(&chan);
152 if (ret) 216 if (ret)
153 return ret; 217 return ret;
154 218
155 ret = nouveau_gem_info(nvbo->gem, &req->info);
156 if (ret)
157 goto out;
158
159 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); 219 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
220 if (ret == 0) {
221 ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
222 if (ret)
223 drm_gem_handle_delete(file_priv, req->info.handle);
224 }
225
160 /* drop reference from allocate - handle holds it now */ 226 /* drop reference from allocate - handle holds it now */
161 drm_gem_object_unreference_unlocked(nvbo->gem); 227 drm_gem_object_unreference_unlocked(nvbo->gem);
162out:
163 return ret; 228 return ret;
164} 229}
165 230
@@ -318,6 +383,7 @@ static int
318validate_list(struct nouveau_channel *chan, struct list_head *list, 383validate_list(struct nouveau_channel *chan, struct list_head *list,
319 struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr) 384 struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
320{ 385{
386 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
321 struct drm_nouveau_gem_pushbuf_bo __user *upbbo = 387 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
322 (void __force __user *)(uintptr_t)user_pbbo_ptr; 388 (void __force __user *)(uintptr_t)user_pbbo_ptr;
323 struct drm_device *dev = chan->dev; 389 struct drm_device *dev = chan->dev;
@@ -356,24 +422,26 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
356 return ret; 422 return ret;
357 } 423 }
358 424
359 if (nvbo->bo.offset == b->presumed.offset && 425 if (dev_priv->card_type < NV_50) {
360 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && 426 if (nvbo->bo.offset == b->presumed.offset &&
361 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || 427 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
362 (nvbo->bo.mem.mem_type == TTM_PL_TT && 428 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
363 b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) 429 (nvbo->bo.mem.mem_type == TTM_PL_TT &&
364 continue; 430 b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
431 continue;
365 432
366 if (nvbo->bo.mem.mem_type == TTM_PL_TT) 433 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
367 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; 434 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
368 else 435 else
369 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; 436 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
370 b->presumed.offset = nvbo->bo.offset; 437 b->presumed.offset = nvbo->bo.offset;
371 b->presumed.valid = 0; 438 b->presumed.valid = 0;
372 relocs++; 439 relocs++;
373 440
374 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed, 441 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
375 &b->presumed, sizeof(b->presumed))) 442 &b->presumed, sizeof(b->presumed)))
376 return -EFAULT; 443 return -EFAULT;
444 }
377 } 445 }
378 446
379 return relocs; 447 return relocs;
@@ -548,7 +616,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
548 struct nouveau_fence *fence = NULL; 616 struct nouveau_fence *fence = NULL;
549 int i, j, ret = 0, do_reloc = 0; 617 int i, j, ret = 0, do_reloc = 0;
550 618
551 chan = nouveau_channel_get(dev, file_priv, req->channel); 619 chan = nouveau_channel_get(file_priv, req->channel);
552 if (IS_ERR(chan)) 620 if (IS_ERR(chan))
553 return PTR_ERR(chan); 621 return PTR_ERR(chan);
554 622
@@ -782,7 +850,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
782 if (!gem) 850 if (!gem)
783 return -ENOENT; 851 return -ENOENT;
784 852
785 ret = nouveau_gem_info(gem, req); 853 ret = nouveau_gem_info(file_priv, gem, req);
786 drm_gem_object_unreference_unlocked(gem); 854 drm_gem_object_unreference_unlocked(gem);
787 return ret; 855 return ret;
788} 856}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 2ba7265bc967..868c7fd74854 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -79,7 +79,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
79 int i; 79 int i;
80 80
81 stat = nv_rd32(dev, NV03_PMC_INTR_0); 81 stat = nv_rd32(dev, NV03_PMC_INTR_0);
82 if (!stat) 82 if (stat == 0 || stat == ~0)
83 return IRQ_NONE; 83 return IRQ_NONE;
84 84
85 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 85 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 5ee14d216ce8..f9ae2fc3d6f1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -397,7 +397,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
397 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40))) 397 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
398 dma_bits = 40; 398 dma_bits = 40;
399 } else 399 } else
400 if (0 && drm_pci_device_is_pcie(dev) && 400 if (0 && pci_is_pcie(dev->pdev) &&
401 dev_priv->chipset > 0x40 && 401 dev_priv->chipset > 0x40 &&
402 dev_priv->chipset != 0x45) { 402 dev_priv->chipset != 0x45) {
403 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39))) 403 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
@@ -423,38 +423,6 @@ nouveau_mem_vram_init(struct drm_device *dev)
423 return ret; 423 return ret;
424 } 424 }
425 425
426 /* reserve space at end of VRAM for PRAMIN */
427 if (dev_priv->card_type >= NV_50) {
428 dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024;
429 } else
430 if (dev_priv->card_type >= NV_40) {
431 u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
432 u32 rsvd;
433
434 /* estimate grctx size, the magics come from nv40_grctx.c */
435 if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
436 else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
437 else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
438 else rsvd = 0x4a40 * vs;
439 rsvd += 16 * 1024;
440 rsvd *= dev_priv->engine.fifo.channels;
441
442 /* pciegart table */
443 if (drm_pci_device_is_pcie(dev))
444 rsvd += 512 * 1024;
445
446 /* object storage */
447 rsvd += 512 * 1024;
448
449 dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
450 } else {
451 dev_priv->ramin_rsvd_vram = 512 * 1024;
452 }
453
454 ret = dev_priv->engine.vram.init(dev);
455 if (ret)
456 return ret;
457
458 NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); 426 NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
459 if (dev_priv->vram_sys_base) { 427 if (dev_priv->vram_sys_base) {
460 NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", 428 NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
@@ -479,7 +447,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
479 } 447 }
480 448
481 if (dev_priv->card_type < NV_50) { 449 if (dev_priv->card_type < NV_50) {
482 ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM, 450 ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM,
483 0, 0, &dev_priv->vga_ram); 451 0, 0, &dev_priv->vga_ram);
484 if (ret == 0) 452 if (ret == 0)
485 ret = nouveau_bo_pin(dev_priv->vga_ram, 453 ret = nouveau_bo_pin(dev_priv->vga_ram,
@@ -729,37 +697,31 @@ nouveau_mem_timing_fini(struct drm_device *dev)
729} 697}
730 698
731static int 699static int
732nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size) 700nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
733{ 701{
734 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); 702 /* nothing to do */
735 struct nouveau_mm *mm;
736 u64 size, block, rsvd;
737 int ret;
738
739 rsvd = (256 * 1024); /* vga memory */
740 size = (p_size << PAGE_SHIFT) - rsvd;
741 block = dev_priv->vram_rblock_size;
742
743 ret = nouveau_mm_init(&mm, rsvd >> 12, size >> 12, block >> 12);
744 if (ret)
745 return ret;
746
747 man->priv = mm;
748 return 0; 703 return 0;
749} 704}
750 705
751static int 706static int
752nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) 707nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
753{ 708{
754 struct nouveau_mm *mm = man->priv; 709 /* nothing to do */
755 int ret; 710 return 0;
711}
756 712
757 ret = nouveau_mm_fini(&mm); 713static inline void
758 if (ret) 714nouveau_mem_node_cleanup(struct nouveau_mem *node)
759 return ret; 715{
716 if (node->vma[0].node) {
717 nouveau_vm_unmap(&node->vma[0]);
718 nouveau_vm_put(&node->vma[0]);
719 }
760 720
761 man->priv = NULL; 721 if (node->vma[1].node) {
762 return 0; 722 nouveau_vm_unmap(&node->vma[1]);
723 nouveau_vm_put(&node->vma[1]);
724 }
763} 725}
764 726
765static void 727static void
@@ -768,14 +730,9 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
768{ 730{
769 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); 731 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
770 struct nouveau_vram_engine *vram = &dev_priv->engine.vram; 732 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
771 struct nouveau_mem *node = mem->mm_node;
772 struct drm_device *dev = dev_priv->dev; 733 struct drm_device *dev = dev_priv->dev;
773 734
774 if (node->tmp_vma.node) { 735 nouveau_mem_node_cleanup(mem->mm_node);
775 nouveau_vm_unmap(&node->tmp_vma);
776 nouveau_vm_put(&node->tmp_vma);
777 }
778
779 vram->put(dev, (struct nouveau_mem **)&mem->mm_node); 736 vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
780} 737}
781 738
@@ -794,7 +751,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
794 int ret; 751 int ret;
795 752
796 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) 753 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
797 size_nc = 1 << nvbo->vma.node->type; 754 size_nc = 1 << nvbo->page_shift;
798 755
799 ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, 756 ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
800 mem->page_alignment << PAGE_SHIFT, size_nc, 757 mem->page_alignment << PAGE_SHIFT, size_nc,
@@ -804,9 +761,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
804 return (ret == -ENOSPC) ? 0 : ret; 761 return (ret == -ENOSPC) ? 0 : ret;
805 } 762 }
806 763
807 node->page_shift = 12; 764 node->page_shift = nvbo->page_shift;
808 if (nvbo->vma.node)
809 node->page_shift = nvbo->vma.node->type;
810 765
811 mem->mm_node = node; 766 mem->mm_node = node;
812 mem->start = node->offset >> PAGE_SHIFT; 767 mem->start = node->offset >> PAGE_SHIFT;
@@ -862,15 +817,9 @@ static void
862nouveau_gart_manager_del(struct ttm_mem_type_manager *man, 817nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
863 struct ttm_mem_reg *mem) 818 struct ttm_mem_reg *mem)
864{ 819{
865 struct nouveau_mem *node = mem->mm_node; 820 nouveau_mem_node_cleanup(mem->mm_node);
866 821 kfree(mem->mm_node);
867 if (node->tmp_vma.node) {
868 nouveau_vm_unmap(&node->tmp_vma);
869 nouveau_vm_put(&node->tmp_vma);
870 }
871
872 mem->mm_node = NULL; 822 mem->mm_node = NULL;
873 kfree(node);
874} 823}
875 824
876static int 825static int
@@ -880,11 +829,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
880 struct ttm_mem_reg *mem) 829 struct ttm_mem_reg *mem)
881{ 830{
882 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 831 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
883 struct nouveau_bo *nvbo = nouveau_bo(bo);
884 struct nouveau_vma *vma = &nvbo->vma;
885 struct nouveau_vm *vm = vma->vm;
886 struct nouveau_mem *node; 832 struct nouveau_mem *node;
887 int ret;
888 833
889 if (unlikely((mem->num_pages << PAGE_SHIFT) >= 834 if (unlikely((mem->num_pages << PAGE_SHIFT) >=
890 dev_priv->gart_info.aper_size)) 835 dev_priv->gart_info.aper_size))
@@ -893,24 +838,8 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
893 node = kzalloc(sizeof(*node), GFP_KERNEL); 838 node = kzalloc(sizeof(*node), GFP_KERNEL);
894 if (!node) 839 if (!node)
895 return -ENOMEM; 840 return -ENOMEM;
841 node->page_shift = 12;
896 842
897 /* This node must be for evicting large-paged VRAM
898 * to system memory. Due to a nv50 limitation of
899 * not being able to mix large/small pages within
900 * the same PDE, we need to create a temporary
901 * small-paged VMA for the eviction.
902 */
903 if (vma->node->type != vm->spg_shift) {
904 ret = nouveau_vm_get(vm, (u64)vma->node->length << 12,
905 vm->spg_shift, NV_MEM_ACCESS_RW,
906 &node->tmp_vma);
907 if (ret) {
908 kfree(node);
909 return ret;
910 }
911 }
912
913 node->page_shift = nvbo->vma.node->type;
914 mem->mm_node = node; 843 mem->mm_node = node;
915 mem->start = 0; 844 mem->start = 0;
916 return 0; 845 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
index 7609756b6faf..1640dec3b823 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -158,11 +158,18 @@ int
158nouveau_mm_fini(struct nouveau_mm **prmm) 158nouveau_mm_fini(struct nouveau_mm **prmm)
159{ 159{
160 struct nouveau_mm *rmm = *prmm; 160 struct nouveau_mm *rmm = *prmm;
161 struct nouveau_mm_node *heap = 161 struct nouveau_mm_node *node, *heap =
162 list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry); 162 list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry);
163 163
164 if (!list_is_singular(&rmm->nodes)) 164 if (!list_is_singular(&rmm->nodes)) {
165 printk(KERN_ERR "nouveau_mm not empty at destroy time!\n");
166 list_for_each_entry(node, &rmm->nodes, nl_entry) {
167 printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n",
168 node->type, node->offset, node->length);
169 }
170 WARN_ON(1);
165 return -EBUSY; 171 return -EBUSY;
172 }
166 173
167 kfree(heap); 174 kfree(heap);
168 kfree(rmm); 175 kfree(rmm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
index 1f7483aae9a4..b9c016d21553 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -52,6 +52,7 @@ int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
52void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *); 52void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
53 53
54int nv50_vram_init(struct drm_device *); 54int nv50_vram_init(struct drm_device *);
55void nv50_vram_fini(struct drm_device *);
55int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc, 56int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
56 u32 memtype, struct nouveau_mem **); 57 u32 memtype, struct nouveau_mem **);
57void nv50_vram_del(struct drm_device *, struct nouveau_mem **); 58void nv50_vram_del(struct drm_device *, struct nouveau_mem **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 5b39718ae1f8..6abdbe6530a7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -34,6 +34,7 @@ int
34nouveau_notifier_init_channel(struct nouveau_channel *chan) 34nouveau_notifier_init_channel(struct nouveau_channel *chan)
35{ 35{
36 struct drm_device *dev = chan->dev; 36 struct drm_device *dev = chan->dev;
37 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 struct nouveau_bo *ntfy = NULL; 38 struct nouveau_bo *ntfy = NULL;
38 uint32_t flags, ttmpl; 39 uint32_t flags, ttmpl;
39 int ret; 40 int ret;
@@ -46,7 +47,7 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
46 ttmpl = TTM_PL_FLAG_TT; 47 ttmpl = TTM_PL_FLAG_TT;
47 } 48 }
48 49
49 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy); 50 ret = nouveau_gem_new(dev, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
50 if (ret) 51 if (ret)
51 return ret; 52 return ret;
52 53
@@ -58,14 +59,22 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
58 if (ret) 59 if (ret)
59 goto out_err; 60 goto out_err;
60 61
62 if (dev_priv->card_type >= NV_50) {
63 ret = nouveau_bo_vma_add(ntfy, chan->vm, &chan->notifier_vma);
64 if (ret)
65 goto out_err;
66 }
67
61 ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size); 68 ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size);
62 if (ret) 69 if (ret)
63 goto out_err; 70 goto out_err;
64 71
65 chan->notifier_bo = ntfy; 72 chan->notifier_bo = ntfy;
66out_err: 73out_err:
67 if (ret) 74 if (ret) {
75 nouveau_bo_vma_del(ntfy, &chan->notifier_vma);
68 drm_gem_object_unreference_unlocked(ntfy->gem); 76 drm_gem_object_unreference_unlocked(ntfy->gem);
77 }
69 78
70 return ret; 79 return ret;
71} 80}
@@ -78,6 +87,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
78 if (!chan->notifier_bo) 87 if (!chan->notifier_bo)
79 return; 88 return;
80 89
90 nouveau_bo_vma_del(chan->notifier_bo, &chan->notifier_vma);
81 nouveau_bo_unmap(chan->notifier_bo); 91 nouveau_bo_unmap(chan->notifier_bo);
82 mutex_lock(&dev->struct_mutex); 92 mutex_lock(&dev->struct_mutex);
83 nouveau_bo_unpin(chan->notifier_bo); 93 nouveau_bo_unpin(chan->notifier_bo);
@@ -122,10 +132,10 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
122 target = NV_MEM_TARGET_VRAM; 132 target = NV_MEM_TARGET_VRAM;
123 else 133 else
124 target = NV_MEM_TARGET_GART; 134 target = NV_MEM_TARGET_GART;
125 offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT; 135 offset = chan->notifier_bo->bo.offset;
126 } else { 136 } else {
127 target = NV_MEM_TARGET_VM; 137 target = NV_MEM_TARGET_VM;
128 offset = chan->notifier_bo->vma.offset; 138 offset = chan->notifier_vma.offset;
129 } 139 }
130 offset += mem->start; 140 offset += mem->start;
131 141
@@ -183,7 +193,7 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
183 if (unlikely(dev_priv->card_type >= NV_C0)) 193 if (unlikely(dev_priv->card_type >= NV_C0))
184 return -EINVAL; 194 return -EINVAL;
185 195
186 chan = nouveau_channel_get(dev, file_priv, na->channel); 196 chan = nouveau_channel_get(file_priv, na->channel);
187 if (IS_ERR(chan)) 197 if (IS_ERR(chan))
188 return PTR_ERR(chan); 198 return PTR_ERR(chan);
189 199
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 8f97016f5b26..159b7c437d3f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -125,7 +125,7 @@ nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
125 int ret = -EINVAL; 125 int ret = -EINVAL;
126 126
127 spin_lock_irqsave(&dev_priv->channels.lock, flags); 127 spin_lock_irqsave(&dev_priv->channels.lock, flags);
128 if (chid > 0 && chid < dev_priv->engine.fifo.channels) 128 if (chid >= 0 && chid < dev_priv->engine.fifo.channels)
129 chan = dev_priv->channels.ptr[chid]; 129 chan = dev_priv->channels.ptr[chid];
130 if (chan) 130 if (chan)
131 ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data); 131 ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
@@ -191,7 +191,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
191 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); 191 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
192 spin_unlock(&dev_priv->ramin_lock); 192 spin_unlock(&dev_priv->ramin_lock);
193 193
194 if (chan) { 194 if (!(flags & NVOBJ_FLAG_VM) && chan) {
195 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); 195 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
196 if (ramin) 196 if (ramin)
197 ramin = drm_mm_get_block(ramin, size, align); 197 ramin = drm_mm_get_block(ramin, size, align);
@@ -208,7 +208,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
208 gpuobj->vinst = ramin->start + chan->ramin->vinst; 208 gpuobj->vinst = ramin->start + chan->ramin->vinst;
209 gpuobj->node = ramin; 209 gpuobj->node = ramin;
210 } else { 210 } else {
211 ret = instmem->get(gpuobj, size, align); 211 ret = instmem->get(gpuobj, chan, size, align);
212 if (ret) { 212 if (ret) {
213 nouveau_gpuobj_ref(NULL, &gpuobj); 213 nouveau_gpuobj_ref(NULL, &gpuobj);
214 return ret; 214 return ret;
@@ -690,35 +690,64 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
690 return 0; 690 return 0;
691} 691}
692 692
693static int
694nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
695{
696 struct drm_device *dev = chan->dev;
697 struct nouveau_gpuobj *pgd = NULL;
698 struct nouveau_vm_pgd *vpgd;
699 int ret, i;
700
701 ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin);
702 if (ret)
703 return ret;
704
705 /* create page directory for this vm if none currently exists,
706 * will be destroyed automagically when last reference to the
707 * vm is removed
708 */
709 if (list_empty(&vm->pgd_list)) {
710 ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &pgd);
711 if (ret)
712 return ret;
713 }
714 nouveau_vm_ref(vm, &chan->vm, pgd);
715 nouveau_gpuobj_ref(NULL, &pgd);
716
717 /* point channel at vm's page directory */
718 vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
719 nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
720 nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
721 nv_wo32(chan->ramin, 0x0208, 0xffffffff);
722 nv_wo32(chan->ramin, 0x020c, 0x000000ff);
723
724 /* map display semaphore buffers into channel's vm */
725 for (i = 0; i < 2; i++) {
726 struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i];
727
728 ret = nouveau_bo_vma_add(dispc->sem.bo, chan->vm,
729 &chan->dispc_vma[i]);
730 if (ret)
731 return ret;
732 }
733
734 return 0;
735}
736
693int 737int
694nouveau_gpuobj_channel_init(struct nouveau_channel *chan, 738nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
695 uint32_t vram_h, uint32_t tt_h) 739 uint32_t vram_h, uint32_t tt_h)
696{ 740{
697 struct drm_device *dev = chan->dev; 741 struct drm_device *dev = chan->dev;
698 struct drm_nouveau_private *dev_priv = dev->dev_private; 742 struct drm_nouveau_private *dev_priv = dev->dev_private;
743 struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv);
744 struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm;
699 struct nouveau_gpuobj *vram = NULL, *tt = NULL; 745 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
700 int ret, i; 746 int ret, i;
701 747
702 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); 748 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
703 749 if (dev_priv->card_type == NV_C0)
704 if (dev_priv->card_type == NV_C0) { 750 return nvc0_gpuobj_channel_init(chan, vm);
705 struct nouveau_vm *vm = dev_priv->chan_vm;
706 struct nouveau_vm_pgd *vpgd;
707
708 ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0,
709 &chan->ramin);
710 if (ret)
711 return ret;
712
713 nouveau_vm_ref(vm, &chan->vm, NULL);
714
715 vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
716 nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
717 nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
718 nv_wo32(chan->ramin, 0x0208, 0xffffffff);
719 nv_wo32(chan->ramin, 0x020c, 0x000000ff);
720 return 0;
721 }
722 751
723 /* Allocate a chunk of memory for per-channel object storage */ 752 /* Allocate a chunk of memory for per-channel object storage */
724 ret = nouveau_gpuobj_channel_init_pramin(chan); 753 ret = nouveau_gpuobj_channel_init_pramin(chan);
@@ -731,7 +760,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
731 * - Allocate per-channel page-directory 760 * - Allocate per-channel page-directory
732 * - Link with shared channel VM 761 * - Link with shared channel VM
733 */ 762 */
734 if (dev_priv->chan_vm) { 763 if (vm) {
735 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; 764 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
736 u64 vm_vinst = chan->ramin->vinst + pgd_offs; 765 u64 vm_vinst = chan->ramin->vinst + pgd_offs;
737 u32 vm_pinst = chan->ramin->pinst; 766 u32 vm_pinst = chan->ramin->pinst;
@@ -744,7 +773,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
744 if (ret) 773 if (ret)
745 return ret; 774 return ret;
746 775
747 nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd); 776 nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
748 } 777 }
749 778
750 /* RAMHT */ 779 /* RAMHT */
@@ -768,7 +797,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
768 struct nouveau_gpuobj *sem = NULL; 797 struct nouveau_gpuobj *sem = NULL;
769 struct nv50_display_crtc *dispc = 798 struct nv50_display_crtc *dispc =
770 &nv50_display(dev)->crtc[i]; 799 &nv50_display(dev)->crtc[i];
771 u64 offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT; 800 u64 offset = dispc->sem.bo->bo.offset;
772 801
773 ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff, 802 ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff,
774 NV_MEM_ACCESS_RW, 803 NV_MEM_ACCESS_RW,
@@ -841,13 +870,22 @@ void
841nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) 870nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
842{ 871{
843 struct drm_device *dev = chan->dev; 872 struct drm_device *dev = chan->dev;
873 struct drm_nouveau_private *dev_priv = dev->dev_private;
874 int i;
844 875
845 NV_DEBUG(dev, "ch%d\n", chan->id); 876 NV_DEBUG(dev, "ch%d\n", chan->id);
846 877
847 nouveau_ramht_ref(NULL, &chan->ramht, chan); 878 if (dev_priv->card_type >= NV_50) {
879 struct nv50_display *disp = nv50_display(dev);
880
881 for (i = 0; i < 2; i++) {
882 struct nv50_display_crtc *dispc = &disp->crtc[i];
883 nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]);
884 }
848 885
849 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); 886 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
850 nouveau_gpuobj_ref(NULL, &chan->vm_pd); 887 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
888 }
851 889
852 if (drm_mm_initialized(&chan->ramin_heap)) 890 if (drm_mm_initialized(&chan->ramin_heap))
853 drm_mm_takedown(&chan->ramin_heap); 891 drm_mm_takedown(&chan->ramin_heap);
@@ -909,7 +947,7 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
909 if (init->handle == ~0) 947 if (init->handle == ~0)
910 return -EINVAL; 948 return -EINVAL;
911 949
912 chan = nouveau_channel_get(dev, file_priv, init->channel); 950 chan = nouveau_channel_get(file_priv, init->channel);
913 if (IS_ERR(chan)) 951 if (IS_ERR(chan))
914 return PTR_ERR(chan); 952 return PTR_ERR(chan);
915 953
@@ -936,7 +974,7 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
936 struct nouveau_channel *chan; 974 struct nouveau_channel *chan;
937 int ret; 975 int ret;
938 976
939 chan = nouveau_channel_get(dev, file_priv, objfree->channel); 977 chan = nouveau_channel_get(file_priv, objfree->channel);
940 if (IS_ERR(chan)) 978 if (IS_ERR(chan))
941 return PTR_ERR(chan); 979 return PTR_ERR(chan);
942 980
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 82fad914e648..c444cadbf849 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -429,7 +429,7 @@ nouveau_sgdma_init(struct drm_device *dev)
429 u32 aper_size, align; 429 u32 aper_size, align;
430 int ret; 430 int ret;
431 431
432 if (dev_priv->card_type >= NV_40 && drm_pci_device_is_pcie(dev)) 432 if (dev_priv->card_type >= NV_40 && pci_is_pcie(dev->pdev))
433 aper_size = 512 * 1024 * 1024; 433 aper_size = 512 * 1024 * 1024;
434 else 434 else
435 aper_size = 64 * 1024 * 1024; 435 aper_size = 64 * 1024 * 1024;
@@ -458,7 +458,7 @@ nouveau_sgdma_init(struct drm_device *dev)
458 dev_priv->gart_info.type = NOUVEAU_GART_HW; 458 dev_priv->gart_info.type = NOUVEAU_GART_HW;
459 dev_priv->gart_info.func = &nv50_sgdma_backend; 459 dev_priv->gart_info.func = &nv50_sgdma_backend;
460 } else 460 } else
461 if (0 && drm_pci_device_is_pcie(dev) && 461 if (0 && pci_is_pcie(dev->pdev) &&
462 dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) { 462 dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
463 if (nv44_graph_class(dev)) { 463 if (nv44_graph_class(dev)) {
464 dev_priv->gart_info.func = &nv44_sgdma_backend; 464 dev_priv->gart_info.func = &nv44_sgdma_backend;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 731acea865b5..10656e430b44 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -91,6 +91,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
91 engine->pm.clock_pre = nv04_pm_clock_pre; 91 engine->pm.clock_pre = nv04_pm_clock_pre;
92 engine->pm.clock_set = nv04_pm_clock_set; 92 engine->pm.clock_set = nv04_pm_clock_set;
93 engine->vram.init = nouveau_mem_detect; 93 engine->vram.init = nouveau_mem_detect;
94 engine->vram.takedown = nouveau_stub_takedown;
94 engine->vram.flags_valid = nouveau_mem_flags_valid; 95 engine->vram.flags_valid = nouveau_mem_flags_valid;
95 break; 96 break;
96 case 0x10: 97 case 0x10:
@@ -139,6 +140,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
139 engine->pm.clock_pre = nv04_pm_clock_pre; 140 engine->pm.clock_pre = nv04_pm_clock_pre;
140 engine->pm.clock_set = nv04_pm_clock_set; 141 engine->pm.clock_set = nv04_pm_clock_set;
141 engine->vram.init = nouveau_mem_detect; 142 engine->vram.init = nouveau_mem_detect;
143 engine->vram.takedown = nouveau_stub_takedown;
142 engine->vram.flags_valid = nouveau_mem_flags_valid; 144 engine->vram.flags_valid = nouveau_mem_flags_valid;
143 break; 145 break;
144 case 0x20: 146 case 0x20:
@@ -187,6 +189,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
187 engine->pm.clock_pre = nv04_pm_clock_pre; 189 engine->pm.clock_pre = nv04_pm_clock_pre;
188 engine->pm.clock_set = nv04_pm_clock_set; 190 engine->pm.clock_set = nv04_pm_clock_set;
189 engine->vram.init = nouveau_mem_detect; 191 engine->vram.init = nouveau_mem_detect;
192 engine->vram.takedown = nouveau_stub_takedown;
190 engine->vram.flags_valid = nouveau_mem_flags_valid; 193 engine->vram.flags_valid = nouveau_mem_flags_valid;
191 break; 194 break;
192 case 0x30: 195 case 0x30:
@@ -237,6 +240,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
237 engine->pm.voltage_get = nouveau_voltage_gpio_get; 240 engine->pm.voltage_get = nouveau_voltage_gpio_get;
238 engine->pm.voltage_set = nouveau_voltage_gpio_set; 241 engine->pm.voltage_set = nouveau_voltage_gpio_set;
239 engine->vram.init = nouveau_mem_detect; 242 engine->vram.init = nouveau_mem_detect;
243 engine->vram.takedown = nouveau_stub_takedown;
240 engine->vram.flags_valid = nouveau_mem_flags_valid; 244 engine->vram.flags_valid = nouveau_mem_flags_valid;
241 break; 245 break;
242 case 0x40: 246 case 0x40:
@@ -289,6 +293,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
289 engine->pm.voltage_set = nouveau_voltage_gpio_set; 293 engine->pm.voltage_set = nouveau_voltage_gpio_set;
290 engine->pm.temp_get = nv40_temp_get; 294 engine->pm.temp_get = nv40_temp_get;
291 engine->vram.init = nouveau_mem_detect; 295 engine->vram.init = nouveau_mem_detect;
296 engine->vram.takedown = nouveau_stub_takedown;
292 engine->vram.flags_valid = nouveau_mem_flags_valid; 297 engine->vram.flags_valid = nouveau_mem_flags_valid;
293 break; 298 break;
294 case 0x50: 299 case 0x50:
@@ -366,6 +371,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
366 else 371 else
367 engine->pm.temp_get = nv40_temp_get; 372 engine->pm.temp_get = nv40_temp_get;
368 engine->vram.init = nv50_vram_init; 373 engine->vram.init = nv50_vram_init;
374 engine->vram.takedown = nv50_vram_fini;
369 engine->vram.get = nv50_vram_new; 375 engine->vram.get = nv50_vram_new;
370 engine->vram.put = nv50_vram_del; 376 engine->vram.put = nv50_vram_del;
371 engine->vram.flags_valid = nv50_vram_flags_valid; 377 engine->vram.flags_valid = nv50_vram_flags_valid;
@@ -411,9 +417,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
411 engine->gpio.irq_unregister = nv50_gpio_irq_unregister; 417 engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
412 engine->gpio.irq_enable = nv50_gpio_irq_enable; 418 engine->gpio.irq_enable = nv50_gpio_irq_enable;
413 engine->vram.init = nvc0_vram_init; 419 engine->vram.init = nvc0_vram_init;
420 engine->vram.takedown = nv50_vram_fini;
414 engine->vram.get = nvc0_vram_new; 421 engine->vram.get = nvc0_vram_new;
415 engine->vram.put = nv50_vram_del; 422 engine->vram.put = nv50_vram_del;
416 engine->vram.flags_valid = nvc0_vram_flags_valid; 423 engine->vram.flags_valid = nvc0_vram_flags_valid;
424 engine->pm.temp_get = nv84_temp_get;
417 break; 425 break;
418 default: 426 default:
419 NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset); 427 NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
@@ -447,8 +455,8 @@ nouveau_card_init_channel(struct drm_device *dev)
447 struct drm_nouveau_private *dev_priv = dev->dev_private; 455 struct drm_nouveau_private *dev_priv = dev->dev_private;
448 int ret; 456 int ret;
449 457
450 ret = nouveau_channel_alloc(dev, &dev_priv->channel, 458 ret = nouveau_channel_alloc(dev, &dev_priv->channel, NULL,
451 (struct drm_file *)-2, NvDmaFB, NvDmaTT); 459 NvDmaFB, NvDmaTT);
452 if (ret) 460 if (ret)
453 return ret; 461 return ret;
454 462
@@ -527,7 +535,7 @@ nouveau_card_init(struct drm_device *dev)
527 535
528 nouveau_pm_init(dev); 536 nouveau_pm_init(dev);
529 537
530 ret = nouveau_mem_vram_init(dev); 538 ret = engine->vram.init(dev);
531 if (ret) 539 if (ret)
532 goto out_bios; 540 goto out_bios;
533 541
@@ -539,10 +547,14 @@ nouveau_card_init(struct drm_device *dev)
539 if (ret) 547 if (ret)
540 goto out_gpuobj; 548 goto out_gpuobj;
541 549
542 ret = nouveau_mem_gart_init(dev); 550 ret = nouveau_mem_vram_init(dev);
543 if (ret) 551 if (ret)
544 goto out_instmem; 552 goto out_instmem;
545 553
554 ret = nouveau_mem_gart_init(dev);
555 if (ret)
556 goto out_ttmvram;
557
546 /* PMC */ 558 /* PMC */
547 ret = engine->mc.init(dev); 559 ret = engine->mc.init(dev);
548 if (ret) 560 if (ret)
@@ -563,7 +575,7 @@ nouveau_card_init(struct drm_device *dev)
563 if (ret) 575 if (ret)
564 goto out_timer; 576 goto out_timer;
565 577
566 if (!nouveau_noaccel) { 578 if (!dev_priv->noaccel) {
567 switch (dev_priv->card_type) { 579 switch (dev_priv->card_type) {
568 case NV_04: 580 case NV_04:
569 nv04_graph_create(dev); 581 nv04_graph_create(dev);
@@ -675,14 +687,14 @@ out_vblank:
675 drm_vblank_cleanup(dev); 687 drm_vblank_cleanup(dev);
676 engine->display.destroy(dev); 688 engine->display.destroy(dev);
677out_fifo: 689out_fifo:
678 if (!nouveau_noaccel) 690 if (!dev_priv->noaccel)
679 engine->fifo.takedown(dev); 691 engine->fifo.takedown(dev);
680out_engine: 692out_engine:
681 if (!nouveau_noaccel) { 693 if (!dev_priv->noaccel) {
682 for (e = e - 1; e >= 0; e--) { 694 for (e = e - 1; e >= 0; e--) {
683 if (!dev_priv->eng[e]) 695 if (!dev_priv->eng[e])
684 continue; 696 continue;
685 dev_priv->eng[e]->fini(dev, e); 697 dev_priv->eng[e]->fini(dev, e, false);
686 dev_priv->eng[e]->destroy(dev,e ); 698 dev_priv->eng[e]->destroy(dev,e );
687 } 699 }
688 } 700 }
@@ -696,12 +708,14 @@ out_mc:
696 engine->mc.takedown(dev); 708 engine->mc.takedown(dev);
697out_gart: 709out_gart:
698 nouveau_mem_gart_fini(dev); 710 nouveau_mem_gart_fini(dev);
711out_ttmvram:
712 nouveau_mem_vram_fini(dev);
699out_instmem: 713out_instmem:
700 engine->instmem.takedown(dev); 714 engine->instmem.takedown(dev);
701out_gpuobj: 715out_gpuobj:
702 nouveau_gpuobj_takedown(dev); 716 nouveau_gpuobj_takedown(dev);
703out_vram: 717out_vram:
704 nouveau_mem_vram_fini(dev); 718 engine->vram.takedown(dev);
705out_bios: 719out_bios:
706 nouveau_pm_fini(dev); 720 nouveau_pm_fini(dev);
707 nouveau_bios_takedown(dev); 721 nouveau_bios_takedown(dev);
@@ -718,16 +732,21 @@ static void nouveau_card_takedown(struct drm_device *dev)
718 struct nouveau_engine *engine = &dev_priv->engine; 732 struct nouveau_engine *engine = &dev_priv->engine;
719 int e; 733 int e;
720 734
735 drm_kms_helper_poll_fini(dev);
736 nouveau_fbcon_fini(dev);
737
721 if (dev_priv->channel) { 738 if (dev_priv->channel) {
722 nouveau_fence_fini(dev);
723 nouveau_channel_put_unlocked(&dev_priv->channel); 739 nouveau_channel_put_unlocked(&dev_priv->channel);
740 nouveau_fence_fini(dev);
724 } 741 }
725 742
726 if (!nouveau_noaccel) { 743 engine->display.destroy(dev);
744
745 if (!dev_priv->noaccel) {
727 engine->fifo.takedown(dev); 746 engine->fifo.takedown(dev);
728 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) { 747 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
729 if (dev_priv->eng[e]) { 748 if (dev_priv->eng[e]) {
730 dev_priv->eng[e]->fini(dev, e); 749 dev_priv->eng[e]->fini(dev, e, false);
731 dev_priv->eng[e]->destroy(dev,e ); 750 dev_priv->eng[e]->destroy(dev,e );
732 } 751 }
733 } 752 }
@@ -748,10 +767,11 @@ static void nouveau_card_takedown(struct drm_device *dev)
748 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); 767 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
749 mutex_unlock(&dev->struct_mutex); 768 mutex_unlock(&dev->struct_mutex);
750 nouveau_mem_gart_fini(dev); 769 nouveau_mem_gart_fini(dev);
770 nouveau_mem_vram_fini(dev);
751 771
752 engine->instmem.takedown(dev); 772 engine->instmem.takedown(dev);
753 nouveau_gpuobj_takedown(dev); 773 nouveau_gpuobj_takedown(dev);
754 nouveau_mem_vram_fini(dev); 774 engine->vram.takedown(dev);
755 775
756 nouveau_irq_fini(dev); 776 nouveau_irq_fini(dev);
757 drm_vblank_cleanup(dev); 777 drm_vblank_cleanup(dev);
@@ -762,6 +782,41 @@ static void nouveau_card_takedown(struct drm_device *dev)
762 vga_client_register(dev->pdev, NULL, NULL, NULL); 782 vga_client_register(dev->pdev, NULL, NULL, NULL);
763} 783}
764 784
785int
786nouveau_open(struct drm_device *dev, struct drm_file *file_priv)
787{
788 struct drm_nouveau_private *dev_priv = dev->dev_private;
789 struct nouveau_fpriv *fpriv;
790 int ret;
791
792 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
793 if (unlikely(!fpriv))
794 return -ENOMEM;
795
796 spin_lock_init(&fpriv->lock);
797 INIT_LIST_HEAD(&fpriv->channels);
798
799 if (dev_priv->card_type == NV_50) {
800 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
801 &fpriv->vm);
802 if (ret) {
803 kfree(fpriv);
804 return ret;
805 }
806 } else
807 if (dev_priv->card_type >= NV_C0) {
808 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
809 &fpriv->vm);
810 if (ret) {
811 kfree(fpriv);
812 return ret;
813 }
814 }
815
816 file_priv->driver_priv = fpriv;
817 return 0;
818}
819
765/* here a client dies, release the stuff that was allocated for its 820/* here a client dies, release the stuff that was allocated for its
766 * file_priv */ 821 * file_priv */
767void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv) 822void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
@@ -769,6 +824,14 @@ void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
769 nouveau_channel_cleanup(dev, file_priv); 824 nouveau_channel_cleanup(dev, file_priv);
770} 825}
771 826
827void
828nouveau_postclose(struct drm_device *dev, struct drm_file *file_priv)
829{
830 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
831 nouveau_vm_ref(NULL, &fpriv->vm, NULL);
832 kfree(fpriv);
833}
834
772/* first module load, setup the mmio/fb mapping */ 835/* first module load, setup the mmio/fb mapping */
773/* KMS: we need mmio at load time, not when the first drm client opens. */ 836/* KMS: we need mmio at load time, not when the first drm client opens. */
774int nouveau_firstopen(struct drm_device *dev) 837int nouveau_firstopen(struct drm_device *dev)
@@ -933,6 +996,25 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
933 NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", 996 NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
934 dev_priv->card_type, reg0); 997 dev_priv->card_type, reg0);
935 998
999 /* Determine whether we'll attempt acceleration or not, some
1000 * cards are disabled by default here due to them being known
1001 * non-functional, or never been tested due to lack of hw.
1002 */
1003 dev_priv->noaccel = !!nouveau_noaccel;
1004 if (nouveau_noaccel == -1) {
1005 switch (dev_priv->chipset) {
1006 case 0xc1: /* known broken */
1007 case 0xc8: /* never tested */
1008 NV_INFO(dev, "acceleration disabled by default, pass "
1009 "noaccel=0 to force enable\n");
1010 dev_priv->noaccel = true;
1011 break;
1012 default:
1013 dev_priv->noaccel = false;
1014 break;
1015 }
1016 }
1017
936 ret = nouveau_remove_conflicting_drivers(dev); 1018 ret = nouveau_remove_conflicting_drivers(dev);
937 if (ret) 1019 if (ret)
938 goto err_mmio; 1020 goto err_mmio;
@@ -997,11 +1079,7 @@ void nouveau_lastclose(struct drm_device *dev)
997int nouveau_unload(struct drm_device *dev) 1079int nouveau_unload(struct drm_device *dev)
998{ 1080{
999 struct drm_nouveau_private *dev_priv = dev->dev_private; 1081 struct drm_nouveau_private *dev_priv = dev->dev_private;
1000 struct nouveau_engine *engine = &dev_priv->engine;
1001 1082
1002 drm_kms_helper_poll_fini(dev);
1003 nouveau_fbcon_fini(dev);
1004 engine->display.destroy(dev);
1005 nouveau_card_takedown(dev); 1083 nouveau_card_takedown(dev);
1006 1084
1007 iounmap(dev_priv->mmio); 1085 iounmap(dev_priv->mmio);
@@ -1031,7 +1109,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
1031 case NOUVEAU_GETPARAM_BUS_TYPE: 1109 case NOUVEAU_GETPARAM_BUS_TYPE:
1032 if (drm_pci_device_is_agp(dev)) 1110 if (drm_pci_device_is_agp(dev))
1033 getparam->value = NV_AGP; 1111 getparam->value = NV_AGP;
1034 else if (drm_pci_device_is_pcie(dev)) 1112 else if (pci_is_pcie(dev->pdev))
1035 getparam->value = NV_PCIE; 1113 getparam->value = NV_PCIE;
1036 else 1114 else
1037 getparam->value = NV_PCI; 1115 getparam->value = NV_PCI;
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
index 649b0413b09f..081ca7b03e8a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -43,7 +43,7 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
43 43
44 /* Set the default sensor's contants */ 44 /* Set the default sensor's contants */
45 sensor->offset_constant = 0; 45 sensor->offset_constant = 0;
46 sensor->offset_mult = 1; 46 sensor->offset_mult = 0;
47 sensor->offset_div = 1; 47 sensor->offset_div = 1;
48 sensor->slope_mult = 1; 48 sensor->slope_mult = 1;
49 sensor->slope_div = 1; 49 sensor->slope_div = 1;
@@ -99,6 +99,13 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
99 sensor->slope_mult = 431; 99 sensor->slope_mult = 431;
100 sensor->slope_div = 10000; 100 sensor->slope_div = 10000;
101 break; 101 break;
102
103 case 0x67:
104 sensor->offset_mult = -26149;
105 sensor->offset_div = 100;
106 sensor->slope_mult = 484;
107 sensor->slope_div = 10000;
108 break;
102 } 109 }
103 } 110 }
104 111
@@ -109,7 +116,7 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
109 116
110 /* Read the entries from the table */ 117 /* Read the entries from the table */
111 for (i = 0; i < entries; i++) { 118 for (i = 0; i < entries; i++) {
112 u16 value = ROM16(temp[1]); 119 s16 value = ROM16(temp[1]);
113 120
114 switch (temp[0]) { 121 switch (temp[0]) {
115 case 0x01: 122 case 0x01:
@@ -160,8 +167,8 @@ nv40_sensor_setup(struct drm_device *dev)
160 struct drm_nouveau_private *dev_priv = dev->dev_private; 167 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 168 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
162 struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants; 169 struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
163 u32 offset = sensor->offset_mult / sensor->offset_div; 170 s32 offset = sensor->offset_mult / sensor->offset_div;
164 u32 sensor_calibration; 171 s32 sensor_calibration;
165 172
166 /* set up the sensors */ 173 /* set up the sensors */
167 sensor_calibration = 120 - offset - sensor->offset_constant; 174 sensor_calibration = 120 - offset - sensor->offset_constant;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 519a6b4bba46..244fd38fdb84 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -369,23 +369,26 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
369} 369}
370 370
371static void 371static void
372nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) 372nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
373{ 373{
374 struct nouveau_vm_pgd *vpgd, *tmp; 374 struct nouveau_vm_pgd *vpgd, *tmp;
375 struct nouveau_gpuobj *pgd = NULL;
375 376
376 if (!pgd) 377 if (!mpgd)
377 return; 378 return;
378 379
379 mutex_lock(&vm->mm->mutex); 380 mutex_lock(&vm->mm->mutex);
380 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { 381 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
381 if (vpgd->obj != pgd) 382 if (vpgd->obj == mpgd) {
382 continue; 383 pgd = vpgd->obj;
383 384 list_del(&vpgd->head);
384 list_del(&vpgd->head); 385 kfree(vpgd);
385 nouveau_gpuobj_ref(NULL, &vpgd->obj); 386 break;
386 kfree(vpgd); 387 }
387 } 388 }
388 mutex_unlock(&vm->mm->mutex); 389 mutex_unlock(&vm->mm->mutex);
390
391 nouveau_gpuobj_ref(NULL, &pgd);
389} 392}
390 393
391static void 394static void
@@ -396,8 +399,8 @@ nouveau_vm_del(struct nouveau_vm *vm)
396 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { 399 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
397 nouveau_vm_unlink(vm, vpgd->obj); 400 nouveau_vm_unlink(vm, vpgd->obj);
398 } 401 }
399 WARN_ON(nouveau_mm_fini(&vm->mm) != 0);
400 402
403 nouveau_mm_fini(&vm->mm);
401 kfree(vm->pgt); 404 kfree(vm->pgt);
402 kfree(vm); 405 kfree(vm);
403} 406}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index c48a9fc2b47b..579ca8cc223c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -41,6 +41,8 @@ struct nouveau_vm_pgd {
41}; 41};
42 42
43struct nouveau_vma { 43struct nouveau_vma {
44 struct list_head head;
45 int refcount;
44 struct nouveau_vm *vm; 46 struct nouveau_vm *vm;
45 struct nouveau_mm_node *node; 47 struct nouveau_mm_node *node;
46 u64 offset; 48 u64 offset;
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index f1a3ae491995..118261d4927a 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -1035,7 +1035,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
1035 drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs); 1035 drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
1036 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); 1036 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
1037 1037
1038 ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, 1038 ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
1039 0, 0x0000, &nv_crtc->cursor.nvbo); 1039 0, 0x0000, &nv_crtc->cursor.nvbo);
1040 if (!ret) { 1040 if (!ret) {
1041 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); 1041 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index 3626ee7db3ba..dbdea8ed3925 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -450,13 +450,13 @@ nv04_graph_context_del(struct nouveau_channel *chan, int engine)
450 unsigned long flags; 450 unsigned long flags;
451 451
452 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 452 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
453 nv04_graph_fifo_access(dev, false); 453 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
454 454
455 /* Unload the context if it's the currently active one */ 455 /* Unload the context if it's the currently active one */
456 if (nv04_graph_channel(dev) == chan) 456 if (nv04_graph_channel(dev) == chan)
457 nv04_graph_unload_context(dev); 457 nv04_graph_unload_context(dev);
458 458
459 nv04_graph_fifo_access(dev, true); 459 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
460 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 460 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
461 461
462 /* Free the context resources */ 462 /* Free the context resources */
@@ -538,24 +538,18 @@ nv04_graph_init(struct drm_device *dev, int engine)
538} 538}
539 539
540static int 540static int
541nv04_graph_fini(struct drm_device *dev, int engine) 541nv04_graph_fini(struct drm_device *dev, int engine, bool suspend)
542{ 542{
543 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
544 if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
545 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
546 return -EBUSY;
547 }
543 nv04_graph_unload_context(dev); 548 nv04_graph_unload_context(dev);
544 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); 549 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
545 return 0; 550 return 0;
546} 551}
547 552
548void
549nv04_graph_fifo_access(struct drm_device *dev, bool enabled)
550{
551 if (enabled)
552 nv_wr32(dev, NV04_PGRAPH_FIFO,
553 nv_rd32(dev, NV04_PGRAPH_FIFO) | 1);
554 else
555 nv_wr32(dev, NV04_PGRAPH_FIFO,
556 nv_rd32(dev, NV04_PGRAPH_FIFO) & ~1);
557}
558
559static int 553static int
560nv04_graph_mthd_set_ref(struct nouveau_channel *chan, 554nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
561 u32 class, u32 mthd, u32 data) 555 u32 class, u32 mthd, u32 data)
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index b8611b955313..c1248e0740a3 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -28,6 +28,31 @@ int nv04_instmem_init(struct drm_device *dev)
28 /* RAMIN always available */ 28 /* RAMIN always available */
29 dev_priv->ramin_available = true; 29 dev_priv->ramin_available = true;
30 30
31 /* Reserve space at end of VRAM for PRAMIN */
32 if (dev_priv->card_type >= NV_40) {
33 u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
34 u32 rsvd;
35
36 /* estimate grctx size, the magics come from nv40_grctx.c */
37 if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
38 else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
39 else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
40 else rsvd = 0x4a40 * vs;
41 rsvd += 16 * 1024;
42 rsvd *= dev_priv->engine.fifo.channels;
43
44 /* pciegart table */
45 if (pci_is_pcie(dev->pdev))
46 rsvd += 512 * 1024;
47
48 /* object storage */
49 rsvd += 512 * 1024;
50
51 dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
52 } else {
53 dev_priv->ramin_rsvd_vram = 512 * 1024;
54 }
55
31 /* Setup shared RAMHT */ 56 /* Setup shared RAMHT */
32 ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096, 57 ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096,
33 NVOBJ_FLAG_ZERO_ALLOC, &ramht); 58 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
@@ -112,7 +137,8 @@ nv04_instmem_resume(struct drm_device *dev)
112} 137}
113 138
114int 139int
115nv04_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) 140nv04_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
141 u32 size, u32 align)
116{ 142{
117 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; 143 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
118 struct drm_mm_node *ramin = NULL; 144 struct drm_mm_node *ramin = NULL;
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 0930c6cb88e0..7255e4a4d3f3 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -708,8 +708,8 @@ static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan,
708 0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c); 708 0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c);
709 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst); 709 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
710 nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000); 710 nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
711 nv04_graph_fifo_access(dev, true); 711 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
712 nv04_graph_fifo_access(dev, false); 712 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
713 713
714 /* Restore the FIFO state */ 714 /* Restore the FIFO state */
715 for (i = 0; i < ARRAY_SIZE(fifo); i++) 715 for (i = 0; i < ARRAY_SIZE(fifo); i++)
@@ -879,13 +879,13 @@ nv10_graph_context_del(struct nouveau_channel *chan, int engine)
879 unsigned long flags; 879 unsigned long flags;
880 880
881 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 881 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
882 nv04_graph_fifo_access(dev, false); 882 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
883 883
884 /* Unload the context if it's the currently active one */ 884 /* Unload the context if it's the currently active one */
885 if (nv10_graph_channel(dev) == chan) 885 if (nv10_graph_channel(dev) == chan)
886 nv10_graph_unload_context(dev); 886 nv10_graph_unload_context(dev);
887 887
888 nv04_graph_fifo_access(dev, true); 888 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
889 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 889 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
890 890
891 /* Free the context resources */ 891 /* Free the context resources */
@@ -957,8 +957,13 @@ nv10_graph_init(struct drm_device *dev, int engine)
957} 957}
958 958
959static int 959static int
960nv10_graph_fini(struct drm_device *dev, int engine) 960nv10_graph_fini(struct drm_device *dev, int engine, bool suspend)
961{ 961{
962 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
963 if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
964 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
965 return -EBUSY;
966 }
962 nv10_graph_unload_context(dev); 967 nv10_graph_unload_context(dev);
963 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); 968 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
964 return 0; 969 return 0;
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index affc7d7dd029..183e37512ef9 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -454,13 +454,13 @@ nv20_graph_context_del(struct nouveau_channel *chan, int engine)
454 unsigned long flags; 454 unsigned long flags;
455 455
456 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 456 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
457 nv04_graph_fifo_access(dev, false); 457 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
458 458
459 /* Unload the context if it's the currently active one */ 459 /* Unload the context if it's the currently active one */
460 if (nv10_graph_channel(dev) == chan) 460 if (nv10_graph_channel(dev) == chan)
461 nv20_graph_unload_context(dev); 461 nv20_graph_unload_context(dev);
462 462
463 nv04_graph_fifo_access(dev, true); 463 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
464 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 464 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
465 465
466 /* Free the context resources */ 466 /* Free the context resources */
@@ -654,8 +654,13 @@ nv30_graph_init(struct drm_device *dev, int engine)
654} 654}
655 655
656int 656int
657nv20_graph_fini(struct drm_device *dev, int engine) 657nv20_graph_fini(struct drm_device *dev, int engine, bool suspend)
658{ 658{
659 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
660 if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
661 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
662 return -EBUSY;
663 }
659 nv20_graph_unload_context(dev); 664 nv20_graph_unload_context(dev);
660 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); 665 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
661 return 0; 666 return 0;
@@ -753,6 +758,7 @@ nv20_graph_create(struct drm_device *dev)
753 break; 758 break;
754 default: 759 default:
755 NV_ERROR(dev, "PGRAPH: unknown chipset\n"); 760 NV_ERROR(dev, "PGRAPH: unknown chipset\n");
761 kfree(pgraph);
756 return 0; 762 return 0;
757 } 763 }
758 } else { 764 } else {
@@ -774,6 +780,7 @@ nv20_graph_create(struct drm_device *dev)
774 break; 780 break;
775 default: 781 default:
776 NV_ERROR(dev, "PGRAPH: unknown chipset\n"); 782 NV_ERROR(dev, "PGRAPH: unknown chipset\n");
783 kfree(pgraph);
777 return 0; 784 return 0;
778 } 785 }
779 } 786 }
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 5beb01b8ace1..ba14a93d8afa 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -35,89 +35,6 @@ struct nv40_graph_engine {
35 u32 grctx_size; 35 u32 grctx_size;
36}; 36};
37 37
38static struct nouveau_channel *
39nv40_graph_channel(struct drm_device *dev)
40{
41 struct drm_nouveau_private *dev_priv = dev->dev_private;
42 struct nouveau_gpuobj *grctx;
43 uint32_t inst;
44 int i;
45
46 inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
47 if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
48 return NULL;
49 inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
50
51 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
52 if (!dev_priv->channels.ptr[i])
53 continue;
54
55 grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
56 if (grctx && grctx->pinst == inst)
57 return dev_priv->channels.ptr[i];
58 }
59
60 return NULL;
61}
62
63static int
64nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
65{
66 uint32_t old_cp, tv = 1000, tmp;
67 int i;
68
69 old_cp = nv_rd32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER);
70 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
71
72 tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0310);
73 tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
74 NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;
75 nv_wr32(dev, NV40_PGRAPH_CTXCTL_0310, tmp);
76
77 tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0304);
78 tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;
79 nv_wr32(dev, NV40_PGRAPH_CTXCTL_0304, tmp);
80
81 nouveau_wait_for_idle(dev);
82
83 for (i = 0; i < tv; i++) {
84 if (nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C) == 0)
85 break;
86 }
87
88 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
89
90 if (i == tv) {
91 uint32_t ucstat = nv_rd32(dev, NV40_PGRAPH_CTXCTL_UCODE_STAT);
92 NV_ERROR(dev, "Failed: Instance=0x%08x Save=%d\n", inst, save);
93 NV_ERROR(dev, "IP: 0x%02x, Opcode: 0x%08x\n",
94 ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT,
95 ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK);
96 NV_ERROR(dev, "0x40030C = 0x%08x\n",
97 nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C));
98 return -EBUSY;
99 }
100
101 return 0;
102}
103
104static int
105nv40_graph_unload_context(struct drm_device *dev)
106{
107 uint32_t inst;
108 int ret;
109
110 inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
111 if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
112 return 0;
113 inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE;
114
115 ret = nv40_graph_transfer_context(dev, inst, 1);
116
117 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst);
118 return ret;
119}
120
121static int 38static int
122nv40_graph_context_new(struct nouveau_channel *chan, int engine) 39nv40_graph_context_new(struct nouveau_channel *chan, int engine)
123{ 40{
@@ -163,16 +80,16 @@ nv40_graph_context_del(struct nouveau_channel *chan, int engine)
163 struct nouveau_gpuobj *grctx = chan->engctx[engine]; 80 struct nouveau_gpuobj *grctx = chan->engctx[engine];
164 struct drm_device *dev = chan->dev; 81 struct drm_device *dev = chan->dev;
165 struct drm_nouveau_private *dev_priv = dev->dev_private; 82 struct drm_nouveau_private *dev_priv = dev->dev_private;
83 u32 inst = 0x01000000 | (grctx->pinst >> 4);
166 unsigned long flags; 84 unsigned long flags;
167 85
168 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 86 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
169 nv04_graph_fifo_access(dev, false); 87 nv_mask(dev, 0x400720, 0x00000000, 0x00000001);
170 88 if (nv_rd32(dev, 0x40032c) == inst)
171 /* Unload the context if it's the currently active one */ 89 nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
172 if (nv40_graph_channel(dev) == chan) 90 if (nv_rd32(dev, 0x400330) == inst)
173 nv40_graph_unload_context(dev); 91 nv_mask(dev, 0x400330, 0x01000000, 0x00000000);
174 92 nv_mask(dev, 0x400720, 0x00000001, 0x00000001);
175 nv04_graph_fifo_access(dev, true);
176 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 93 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
177 94
178 /* Free the context resources */ 95 /* Free the context resources */
@@ -429,9 +346,20 @@ nv40_graph_init(struct drm_device *dev, int engine)
429} 346}
430 347
431static int 348static int
432nv40_graph_fini(struct drm_device *dev, int engine) 349nv40_graph_fini(struct drm_device *dev, int engine, bool suspend)
433{ 350{
434 nv40_graph_unload_context(dev); 351 u32 inst = nv_rd32(dev, 0x40032c);
352 if (inst & 0x01000000) {
353 nv_wr32(dev, 0x400720, 0x00000000);
354 nv_wr32(dev, 0x400784, inst);
355 nv_mask(dev, 0x400310, 0x00000020, 0x00000020);
356 nv_mask(dev, 0x400304, 0x00000001, 0x00000001);
357 if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000)) {
358 u32 insn = nv_rd32(dev, 0x400308);
359 NV_ERROR(dev, "PGRAPH: ctxprog timeout 0x%08x\n", insn);
360 }
361 nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
362 }
435 return 0; 363 return 0;
436} 364}
437 365
diff --git a/drivers/gpu/drm/nouveau/nv40_mpeg.c b/drivers/gpu/drm/nouveau/nv40_mpeg.c
index 6d2af292a2e3..ad03a0e1fc7d 100644
--- a/drivers/gpu/drm/nouveau/nv40_mpeg.c
+++ b/drivers/gpu/drm/nouveau/nv40_mpeg.c
@@ -137,7 +137,7 @@ nv40_mpeg_init(struct drm_device *dev, int engine)
137} 137}
138 138
139static int 139static int
140nv40_mpeg_fini(struct drm_device *dev, int engine) 140nv40_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
141{ 141{
142 /*XXX: context save? */ 142 /*XXX: context save? */
143 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); 143 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index ebabacf38da9..46ad59ea2185 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -104,7 +104,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
104 OUT_RING(evo, nv_crtc->lut.depth == 8 ? 104 OUT_RING(evo, nv_crtc->lut.depth == 8 ?
105 NV50_EVO_CRTC_CLUT_MODE_OFF : 105 NV50_EVO_CRTC_CLUT_MODE_OFF :
106 NV50_EVO_CRTC_CLUT_MODE_ON); 106 NV50_EVO_CRTC_CLUT_MODE_ON);
107 OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.start << PAGE_SHIFT) >> 8); 107 OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8);
108 if (dev_priv->chipset != 0x50) { 108 if (dev_priv->chipset != 0x50) {
109 BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); 109 BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
110 OUT_RING(evo, NvEvoVRAM); 110 OUT_RING(evo, NvEvoVRAM);
@@ -372,7 +372,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
372 372
373 nouveau_bo_unmap(cursor); 373 nouveau_bo_unmap(cursor);
374 374
375 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT); 375 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset);
376 nv_crtc->cursor.show(nv_crtc, true); 376 nv_crtc->cursor.show(nv_crtc, true);
377 377
378out: 378out:
@@ -546,7 +546,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
546 } 546 }
547 } 547 }
548 548
549 nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT; 549 nv_crtc->fb.offset = fb->nvbo->bo.offset;
550 nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); 550 nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
551 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; 551 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
552 if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { 552 if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
@@ -747,7 +747,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
747 } 747 }
748 nv_crtc->lut.depth = 0; 748 nv_crtc->lut.depth = 0;
749 749
750 ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM, 750 ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM,
751 0, 0x0000, &nv_crtc->lut.nvbo); 751 0, 0x0000, &nv_crtc->lut.nvbo);
752 if (!ret) { 752 if (!ret) {
753 ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM); 753 ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
@@ -773,7 +773,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
773 drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs); 773 drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
774 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); 774 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
775 775
776 ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, 776 ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
777 0, 0x0000, &nv_crtc->cursor.nvbo); 777 0, 0x0000, &nv_crtc->cursor.nvbo);
778 if (!ret) { 778 if (!ret) {
779 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); 779 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 08da478ba544..db1a5f4b711d 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -415,8 +415,6 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
415 415
416 /* synchronise with the rendering channel, if necessary */ 416 /* synchronise with the rendering channel, if necessary */
417 if (likely(chan)) { 417 if (likely(chan)) {
418 u64 offset = dispc->sem.bo->vma.offset + dispc->sem.offset;
419
420 ret = RING_SPACE(chan, 10); 418 ret = RING_SPACE(chan, 10);
421 if (ret) { 419 if (ret) {
422 WIND_RING(evo); 420 WIND_RING(evo);
@@ -438,6 +436,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
438 else 436 else
439 OUT_RING (chan, chan->vram_handle); 437 OUT_RING (chan, chan->vram_handle);
440 } else { 438 } else {
439 u64 offset = chan->dispc_vma[nv_crtc->index].offset;
440 offset += dispc->sem.offset;
441 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4); 441 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
442 OUT_RING (chan, upper_32_bits(offset)); 442 OUT_RING (chan, upper_32_bits(offset));
443 OUT_RING (chan, lower_32_bits(offset)); 443 OUT_RING (chan, lower_32_bits(offset));
@@ -484,7 +484,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
484 OUT_RING (evo, 0x00000000); 484 OUT_RING (evo, 0x00000000);
485 OUT_RING (evo, 0x00000000); 485 OUT_RING (evo, 0x00000000);
486 BEGIN_RING(evo, 0, 0x0800, 5); 486 BEGIN_RING(evo, 0, 0x0800, 5);
487 OUT_RING (evo, (nv_fb->nvbo->bo.mem.start << PAGE_SHIFT) >> 8); 487 OUT_RING (evo, nv_fb->nvbo->bo.offset >> 8);
488 OUT_RING (evo, 0); 488 OUT_RING (evo, 0);
489 OUT_RING (evo, (fb->height << 16) | fb->width); 489 OUT_RING (evo, (fb->height << 16) | fb->width);
490 OUT_RING (evo, nv_fb->r_pitch); 490 OUT_RING (evo, nv_fb->r_pitch);
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index c8e83c1a4de8..c99d9751880c 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -38,6 +38,7 @@ nv50_evo_channel_del(struct nouveau_channel **pevo)
38 return; 38 return;
39 *pevo = NULL; 39 *pevo = NULL;
40 40
41 nouveau_ramht_ref(NULL, &evo->ramht, evo);
41 nouveau_gpuobj_channel_takedown(evo); 42 nouveau_gpuobj_channel_takedown(evo);
42 nouveau_bo_unmap(evo->pushbuf_bo); 43 nouveau_bo_unmap(evo->pushbuf_bo);
43 nouveau_bo_ref(NULL, &evo->pushbuf_bo); 44 nouveau_bo_ref(NULL, &evo->pushbuf_bo);
@@ -116,7 +117,7 @@ nv50_evo_channel_new(struct drm_device *dev, int chid,
116 evo->user_get = 4; 117 evo->user_get = 4;
117 evo->user_put = 0; 118 evo->user_put = 0;
118 119
119 ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, 120 ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
120 &evo->pushbuf_bo); 121 &evo->pushbuf_bo);
121 if (ret == 0) 122 if (ret == 0)
122 ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM); 123 ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
@@ -153,7 +154,7 @@ nv50_evo_channel_init(struct nouveau_channel *evo)
153{ 154{
154 struct drm_device *dev = evo->dev; 155 struct drm_device *dev = evo->dev;
155 int id = evo->id, ret, i; 156 int id = evo->id, ret, i;
156 u64 pushbuf = evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT; 157 u64 pushbuf = evo->pushbuf_bo->bo.offset;
157 u32 tmp; 158 u32 tmp;
158 159
159 tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); 160 tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
@@ -331,16 +332,15 @@ nv50_evo_create(struct drm_device *dev)
331 if (ret) 332 if (ret)
332 goto err; 333 goto err;
333 334
334 ret = nouveau_bo_new(dev, NULL, 4096, 0x1000, TTM_PL_FLAG_VRAM, 335 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
335 0, 0x0000, &dispc->sem.bo); 336 0, 0x0000, &dispc->sem.bo);
336 if (!ret) { 337 if (!ret) {
337 offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT;
338
339 ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM); 338 ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
340 if (!ret) 339 if (!ret)
341 ret = nouveau_bo_map(dispc->sem.bo); 340 ret = nouveau_bo_map(dispc->sem.bo);
342 if (ret) 341 if (ret)
343 nouveau_bo_ref(NULL, &dispc->sem.bo); 342 nouveau_bo_ref(NULL, &dispc->sem.bo);
343 offset = dispc->sem.bo->bo.offset;
344 } 344 }
345 345
346 if (ret) 346 if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 791ded1c5c6d..dc75a7206524 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -159,7 +159,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
159 struct drm_device *dev = nfbdev->dev; 159 struct drm_device *dev = nfbdev->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private; 160 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 struct nouveau_channel *chan = dev_priv->channel; 161 struct nouveau_channel *chan = dev_priv->channel;
162 struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo; 162 struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
163 int ret, format; 163 int ret, format;
164 164
165 switch (info->var.bits_per_pixel) { 165 switch (info->var.bits_per_pixel) {
@@ -247,8 +247,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
247 OUT_RING(chan, info->fix.line_length); 247 OUT_RING(chan, info->fix.line_length);
248 OUT_RING(chan, info->var.xres_virtual); 248 OUT_RING(chan, info->var.xres_virtual);
249 OUT_RING(chan, info->var.yres_virtual); 249 OUT_RING(chan, info->var.yres_virtual);
250 OUT_RING(chan, upper_32_bits(nvbo->vma.offset)); 250 OUT_RING(chan, upper_32_bits(fb->vma.offset));
251 OUT_RING(chan, lower_32_bits(nvbo->vma.offset)); 251 OUT_RING(chan, lower_32_bits(fb->vma.offset));
252 BEGIN_RING(chan, NvSub2D, 0x0230, 2); 252 BEGIN_RING(chan, NvSub2D, 0x0230, 2);
253 OUT_RING(chan, format); 253 OUT_RING(chan, format);
254 OUT_RING(chan, 1); 254 OUT_RING(chan, 1);
@@ -256,8 +256,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
256 OUT_RING(chan, info->fix.line_length); 256 OUT_RING(chan, info->fix.line_length);
257 OUT_RING(chan, info->var.xres_virtual); 257 OUT_RING(chan, info->var.xres_virtual);
258 OUT_RING(chan, info->var.yres_virtual); 258 OUT_RING(chan, info->var.yres_virtual);
259 OUT_RING(chan, upper_32_bits(nvbo->vma.offset)); 259 OUT_RING(chan, upper_32_bits(fb->vma.offset));
260 OUT_RING(chan, lower_32_bits(nvbo->vma.offset)); 260 OUT_RING(chan, lower_32_bits(fb->vma.offset));
261 261
262 return 0; 262 return 0;
263} 263}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 40680f2b4231..d43c46caa76e 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -124,7 +124,6 @@ static void
124nv50_graph_init_reset(struct drm_device *dev) 124nv50_graph_init_reset(struct drm_device *dev)
125{ 125{
126 uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21); 126 uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21);
127
128 NV_DEBUG(dev, "\n"); 127 NV_DEBUG(dev, "\n");
129 128
130 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e); 129 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
@@ -254,9 +253,13 @@ nv50_graph_init(struct drm_device *dev, int engine)
254} 253}
255 254
256static int 255static int
257nv50_graph_fini(struct drm_device *dev, int engine) 256nv50_graph_fini(struct drm_device *dev, int engine, bool suspend)
258{ 257{
259 NV_DEBUG(dev, "\n"); 258 nv_mask(dev, 0x400500, 0x00010001, 0x00000000);
259 if (!nv_wait(dev, 0x400700, ~0, 0) && suspend) {
260 nv_mask(dev, 0x400500, 0x00010001, 0x00010001);
261 return -EBUSY;
262 }
260 nv50_graph_unload_context(dev); 263 nv50_graph_unload_context(dev);
261 nv_wr32(dev, 0x40013c, 0x00000000); 264 nv_wr32(dev, 0x40013c, 0x00000000);
262 return 0; 265 return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 4f95a1e5822e..a7c12c94a5a6 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -305,9 +305,9 @@ struct nv50_gpuobj_node {
305 u32 align; 305 u32 align;
306}; 306};
307 307
308
309int 308int
310nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) 309nv50_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
310 u32 size, u32 align)
311{ 311{
312 struct drm_device *dev = gpuobj->dev; 312 struct drm_device *dev = gpuobj->dev;
313 struct drm_nouveau_private *dev_priv = dev->dev_private; 313 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -336,7 +336,7 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
336 if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER)) 336 if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER))
337 flags |= NV_MEM_ACCESS_SYS; 337 flags |= NV_MEM_ACCESS_SYS;
338 338
339 ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, flags, 339 ret = nouveau_vm_get(chan->vm, size, 12, flags,
340 &node->chan_vma); 340 &node->chan_vma);
341 if (ret) { 341 if (ret) {
342 vram->put(dev, &node->vram); 342 vram->put(dev, &node->vram);
@@ -345,7 +345,7 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
345 } 345 }
346 346
347 nouveau_vm_map(&node->chan_vma, node->vram); 347 nouveau_vm_map(&node->chan_vma, node->vram);
348 gpuobj->vinst = node->chan_vma.offset; 348 gpuobj->linst = node->chan_vma.offset;
349 } 349 }
350 350
351 gpuobj->size = size; 351 gpuobj->size = size;
diff --git a/drivers/gpu/drm/nouveau/nv50_mpeg.c b/drivers/gpu/drm/nouveau/nv50_mpeg.c
index 1dc5913f78c5..b57a2d180ad2 100644
--- a/drivers/gpu/drm/nouveau/nv50_mpeg.c
+++ b/drivers/gpu/drm/nouveau/nv50_mpeg.c
@@ -160,7 +160,7 @@ nv50_mpeg_init(struct drm_device *dev, int engine)
160} 160}
161 161
162static int 162static int
163nv50_mpeg_fini(struct drm_device *dev, int engine) 163nv50_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
164{ 164{
165 /*XXX: context save for s/r */ 165 /*XXX: context save for s/r */
166 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); 166 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index c25c59386420..ffe8b483b7b0 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -318,6 +318,8 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_entry *entry)
318 uint32_t tmp; 318 uint32_t tmp;
319 319
320 tmp = nv_rd32(dev, 0x61c700 + (or * 0x800)); 320 tmp = nv_rd32(dev, 0x61c700 + (or * 0x800));
321 if (!tmp)
322 tmp = nv_rd32(dev, 0x610798 + (or * 8));
321 323
322 switch ((tmp & 0x00000f00) >> 8) { 324 switch ((tmp & 0x00000f00) >> 8) {
323 case 8: 325 case 8:
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 1a0dd491a0e4..40b84f22d819 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -156,7 +156,7 @@ nv50_vm_flush(struct nouveau_vm *vm)
156 pinstmem->flush(vm->dev); 156 pinstmem->flush(vm->dev);
157 157
158 /* BAR */ 158 /* BAR */
159 if (vm != dev_priv->chan_vm) { 159 if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) {
160 nv50_vm_flush_engine(vm->dev, 6); 160 nv50_vm_flush_engine(vm->dev, 6);
161 return; 161 return;
162 } 162 }
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
index ffbc3d8cf5be..af32daecd1ed 100644
--- a/drivers/gpu/drm/nouveau/nv50_vram.c
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -51,9 +51,7 @@ void
51nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem) 51nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
52{ 52{
53 struct drm_nouveau_private *dev_priv = dev->dev_private; 53 struct drm_nouveau_private *dev_priv = dev->dev_private;
54 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; 54 struct nouveau_mm *mm = dev_priv->engine.vram.mm;
55 struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
56 struct nouveau_mm *mm = man->priv;
57 struct nouveau_mm_node *this; 55 struct nouveau_mm_node *this;
58 struct nouveau_mem *mem; 56 struct nouveau_mem *mem;
59 57
@@ -84,9 +82,7 @@ nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
84 u32 memtype, struct nouveau_mem **pmem) 82 u32 memtype, struct nouveau_mem **pmem)
85{ 83{
86 struct drm_nouveau_private *dev_priv = dev->dev_private; 84 struct drm_nouveau_private *dev_priv = dev->dev_private;
87 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; 85 struct nouveau_mm *mm = dev_priv->engine.vram.mm;
88 struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
89 struct nouveau_mm *mm = man->priv;
90 struct nouveau_mm_node *r; 86 struct nouveau_mm_node *r;
91 struct nouveau_mem *mem; 87 struct nouveau_mem *mem;
92 int comp = (memtype & 0x300) >> 8; 88 int comp = (memtype & 0x300) >> 8;
@@ -190,22 +186,35 @@ int
190nv50_vram_init(struct drm_device *dev) 186nv50_vram_init(struct drm_device *dev)
191{ 187{
192 struct drm_nouveau_private *dev_priv = dev->dev_private; 188 struct drm_nouveau_private *dev_priv = dev->dev_private;
189 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
190 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
191 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
192 u32 rblock, length;
193 193
194 dev_priv->vram_size = nv_rd32(dev, 0x10020c); 194 dev_priv->vram_size = nv_rd32(dev, 0x10020c);
195 dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; 195 dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
196 dev_priv->vram_size &= 0xffffffff00ULL; 196 dev_priv->vram_size &= 0xffffffff00ULL;
197 197
198 switch (dev_priv->chipset) { 198 /* IGPs, no funky reordering happens here, they don't have VRAM */
199 case 0xaa: 199 if (dev_priv->chipset == 0xaa ||
200 case 0xac: 200 dev_priv->chipset == 0xac ||
201 case 0xaf: 201 dev_priv->chipset == 0xaf) {
202 dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12; 202 dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12;
203 dev_priv->vram_rblock_size = 4096; 203 rblock = 4096 >> 12;
204 break; 204 } else {
205 default: 205 rblock = nv50_vram_rblock(dev) >> 12;
206 dev_priv->vram_rblock_size = nv50_vram_rblock(dev);
207 break;
208 } 206 }
209 207
210 return 0; 208 length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
209
210 return nouveau_mm_init(&vram->mm, rsvd_head, length, rblock);
211}
212
213void
214nv50_vram_fini(struct drm_device *dev)
215{
216 struct drm_nouveau_private *dev_priv = dev->dev_private;
217 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
218
219 nouveau_mm_fini(&vram->mm);
211} 220}
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c
index 75b809a51748..edece9c616eb 100644
--- a/drivers/gpu/drm/nouveau/nv84_crypt.c
+++ b/drivers/gpu/drm/nouveau/nv84_crypt.c
@@ -138,7 +138,7 @@ nv84_crypt_isr(struct drm_device *dev)
138} 138}
139 139
140static int 140static int
141nv84_crypt_fini(struct drm_device *dev, int engine) 141nv84_crypt_fini(struct drm_device *dev, int engine, bool suspend)
142{ 142{
143 nv_wr32(dev, 0x102140, 0x00000000); 143 nv_wr32(dev, 0x102140, 0x00000000);
144 return 0; 144 return 0;
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.c b/drivers/gpu/drm/nouveau/nva3_copy.c
index b86820a61220..8f356d58e409 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.c
+++ b/drivers/gpu/drm/nouveau/nva3_copy.c
@@ -140,7 +140,7 @@ nva3_copy_init(struct drm_device *dev, int engine)
140} 140}
141 141
142static int 142static int
143nva3_copy_fini(struct drm_device *dev, int engine) 143nva3_copy_fini(struct drm_device *dev, int engine, bool suspend)
144{ 144{
145 nv_mask(dev, 0x104048, 0x00000003, 0x00000000); 145 nv_mask(dev, 0x104048, 0x00000003, 0x00000000);
146 146
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.c b/drivers/gpu/drm/nouveau/nvc0_copy.c
index 208fa7ab3f42..dddf006f6d88 100644
--- a/drivers/gpu/drm/nouveau/nvc0_copy.c
+++ b/drivers/gpu/drm/nouveau/nvc0_copy.c
@@ -48,14 +48,14 @@ nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
48 struct nouveau_gpuobj *ctx = NULL; 48 struct nouveau_gpuobj *ctx = NULL;
49 int ret; 49 int ret;
50 50
51 ret = nouveau_gpuobj_new(dev, NULL, 256, 256, 51 ret = nouveau_gpuobj_new(dev, chan, 256, 256,
52 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER | 52 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER |
53 NVOBJ_FLAG_ZERO_ALLOC, &ctx); 53 NVOBJ_FLAG_ZERO_ALLOC, &ctx);
54 if (ret) 54 if (ret)
55 return ret; 55 return ret;
56 56
57 nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->vinst)); 57 nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->linst));
58 nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->vinst)); 58 nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->linst));
59 dev_priv->engine.instmem.flush(dev); 59 dev_priv->engine.instmem.flush(dev);
60 60
61 chan->engctx[engine] = ctx; 61 chan->engctx[engine] = ctx;
@@ -127,7 +127,7 @@ nvc0_copy_init(struct drm_device *dev, int engine)
127} 127}
128 128
129static int 129static int
130nvc0_copy_fini(struct drm_device *dev, int engine) 130nvc0_copy_fini(struct drm_device *dev, int engine, bool suspend)
131{ 131{
132 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine); 132 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
133 133
diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c
index 26a996025dd2..08e6b118f021 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fb.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2010 Red Hat Inc. 2 * Copyright 2011 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -23,16 +23,80 @@
23 */ 23 */
24 24
25#include "drmP.h" 25#include "drmP.h"
26 26#include "drm.h"
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28#include "nouveau_drm.h"
29
30struct nvc0_fb_priv {
31 struct page *r100c10_page;
32 dma_addr_t r100c10;
33};
34
35static void
36nvc0_fb_destroy(struct drm_device *dev)
37{
38 struct drm_nouveau_private *dev_priv = dev->dev_private;
39 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
40 struct nvc0_fb_priv *priv = pfb->priv;
41
42 if (priv->r100c10_page) {
43 pci_unmap_page(dev->pdev, priv->r100c10, PAGE_SIZE,
44 PCI_DMA_BIDIRECTIONAL);
45 __free_page(priv->r100c10_page);
46 }
47
48 kfree(priv);
49 pfb->priv = NULL;
50}
51
52static int
53nvc0_fb_create(struct drm_device *dev)
54{
55 struct drm_nouveau_private *dev_priv = dev->dev_private;
56 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
57 struct nvc0_fb_priv *priv;
58
59 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
60 if (!priv)
61 return -ENOMEM;
62 pfb->priv = priv;
63
64 priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
65 if (!priv->r100c10_page) {
66 nvc0_fb_destroy(dev);
67 return -ENOMEM;
68 }
69
70 priv->r100c10 = pci_map_page(dev->pdev, priv->r100c10_page, 0,
71 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
72 if (pci_dma_mapping_error(dev->pdev, priv->r100c10)) {
73 nvc0_fb_destroy(dev);
74 return -EFAULT;
75 }
76
77 return 0;
78}
28 79
29int 80int
30nvc0_fb_init(struct drm_device *dev) 81nvc0_fb_init(struct drm_device *dev)
31{ 82{
83 struct drm_nouveau_private *dev_priv = dev->dev_private;
84 struct nvc0_fb_priv *priv;
85 int ret;
86
87 if (!dev_priv->engine.fb.priv) {
88 ret = nvc0_fb_create(dev);
89 if (ret)
90 return ret;
91 }
92 priv = dev_priv->engine.fb.priv;
93
94 nv_wr32(dev, 0x100c10, priv->r100c10 >> 8);
32 return 0; 95 return 0;
33} 96}
34 97
35void 98void
36nvc0_fb_takedown(struct drm_device *dev) 99nvc0_fb_takedown(struct drm_device *dev)
37{ 100{
101 nvc0_fb_destroy(dev);
38} 102}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index fa5d4c234383..a495e48197ca 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -159,7 +159,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
159 struct drm_device *dev = nfbdev->dev; 159 struct drm_device *dev = nfbdev->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private; 160 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 struct nouveau_channel *chan = dev_priv->channel; 161 struct nouveau_channel *chan = dev_priv->channel;
162 struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo; 162 struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
163 int ret, format; 163 int ret, format;
164 164
165 ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d); 165 ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d);
@@ -203,8 +203,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
203 BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1); 203 BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1);
204 OUT_RING (chan, 0x0000902d); 204 OUT_RING (chan, 0x0000902d);
205 BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2); 205 BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2);
206 OUT_RING (chan, upper_32_bits(chan->notifier_bo->bo.offset)); 206 OUT_RING (chan, upper_32_bits(chan->notifier_vma.offset));
207 OUT_RING (chan, lower_32_bits(chan->notifier_bo->bo.offset)); 207 OUT_RING (chan, lower_32_bits(chan->notifier_vma.offset));
208 BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1); 208 BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1);
209 OUT_RING (chan, 0); 209 OUT_RING (chan, 0);
210 BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1); 210 BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1);
@@ -249,8 +249,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
249 OUT_RING (chan, info->fix.line_length); 249 OUT_RING (chan, info->fix.line_length);
250 OUT_RING (chan, info->var.xres_virtual); 250 OUT_RING (chan, info->var.xres_virtual);
251 OUT_RING (chan, info->var.yres_virtual); 251 OUT_RING (chan, info->var.yres_virtual);
252 OUT_RING (chan, upper_32_bits(nvbo->vma.offset)); 252 OUT_RING (chan, upper_32_bits(fb->vma.offset));
253 OUT_RING (chan, lower_32_bits(nvbo->vma.offset)); 253 OUT_RING (chan, lower_32_bits(fb->vma.offset));
254 BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10); 254 BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10);
255 OUT_RING (chan, format); 255 OUT_RING (chan, format);
256 OUT_RING (chan, 1); 256 OUT_RING (chan, 1);
@@ -260,8 +260,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
260 OUT_RING (chan, info->fix.line_length); 260 OUT_RING (chan, info->fix.line_length);
261 OUT_RING (chan, info->var.xres_virtual); 261 OUT_RING (chan, info->var.xres_virtual);
262 OUT_RING (chan, info->var.yres_virtual); 262 OUT_RING (chan, info->var.yres_virtual);
263 OUT_RING (chan, upper_32_bits(nvbo->vma.offset)); 263 OUT_RING (chan, upper_32_bits(fb->vma.offset));
264 OUT_RING (chan, lower_32_bits(nvbo->vma.offset)); 264 OUT_RING (chan, lower_32_bits(fb->vma.offset));
265 FIRE_RING (chan); 265 FIRE_RING (chan);
266 266
267 return 0; 267 return 0;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index fb4f5943e01b..6f9f341c3e86 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -210,10 +210,10 @@ nvc0_fifo_unload_context(struct drm_device *dev)
210 int i; 210 int i;
211 211
212 for (i = 0; i < 128; i++) { 212 for (i = 0; i < 128; i++) {
213 if (!(nv_rd32(dev, 0x003004 + (i * 4)) & 1)) 213 if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
214 continue; 214 continue;
215 215
216 nv_mask(dev, 0x003004 + (i * 4), 0x00000001, 0x00000000); 216 nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
217 nv_wr32(dev, 0x002634, i); 217 nv_wr32(dev, 0x002634, i);
218 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) { 218 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
219 NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n", 219 NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index ca6db204d644..5b2f6f420468 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -28,7 +28,34 @@
28 28
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_mm.h" 30#include "nouveau_mm.h"
31
31#include "nvc0_graph.h" 32#include "nvc0_graph.h"
33#include "nvc0_grhub.fuc.h"
34#include "nvc0_grgpc.fuc.h"
35
36static void
37nvc0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
38{
39 NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
40 nv_rd32(dev, base + 0x400));
41 NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
42 nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
43 nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
44 NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
45 nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
46 nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
47}
48
49static void
50nvc0_graph_ctxctl_debug(struct drm_device *dev)
51{
52 u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff;
53 u32 gpc;
54
55 nvc0_graph_ctxctl_debug_unit(dev, 0x409000);
56 for (gpc = 0; gpc < gpcnr; gpc++)
57 nvc0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
58}
32 59
33static int 60static int
34nvc0_graph_load_context(struct nouveau_channel *chan) 61nvc0_graph_load_context(struct nouveau_channel *chan)
@@ -72,24 +99,44 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
72 if (!ctx) 99 if (!ctx)
73 return -ENOMEM; 100 return -ENOMEM;
74 101
75 nvc0_graph_load_context(chan); 102 if (!nouveau_ctxfw) {
76 103 nv_wr32(dev, 0x409840, 0x80000000);
77 nv_wo32(grch->grctx, 0x1c, 1); 104 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
78 nv_wo32(grch->grctx, 0x20, 0); 105 nv_wr32(dev, 0x409504, 0x00000001);
79 nv_wo32(grch->grctx, 0x28, 0); 106 if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
80 nv_wo32(grch->grctx, 0x2c, 0); 107 NV_ERROR(dev, "PGRAPH: HUB_SET_CHAN timeout\n");
81 dev_priv->engine.instmem.flush(dev); 108 nvc0_graph_ctxctl_debug(dev);
82 109 ret = -EBUSY;
83 ret = nvc0_grctx_generate(chan); 110 goto err;
84 if (ret) { 111 }
85 kfree(ctx); 112 } else {
86 return ret; 113 nvc0_graph_load_context(chan);
114
115 nv_wo32(grch->grctx, 0x1c, 1);
116 nv_wo32(grch->grctx, 0x20, 0);
117 nv_wo32(grch->grctx, 0x28, 0);
118 nv_wo32(grch->grctx, 0x2c, 0);
119 dev_priv->engine.instmem.flush(dev);
87 } 120 }
88 121
89 ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst); 122 ret = nvc0_grctx_generate(chan);
90 if (ret) { 123 if (ret)
91 kfree(ctx); 124 goto err;
92 return ret; 125
126 if (!nouveau_ctxfw) {
127 nv_wr32(dev, 0x409840, 0x80000000);
128 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
129 nv_wr32(dev, 0x409504, 0x00000002);
130 if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
131 NV_ERROR(dev, "PGRAPH: HUB_CTX_SAVE timeout\n");
132 nvc0_graph_ctxctl_debug(dev);
133 ret = -EBUSY;
134 goto err;
135 }
136 } else {
137 ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst);
138 if (ret)
139 goto err;
93 } 140 }
94 141
95 for (i = 0; i < priv->grctx_size; i += 4) 142 for (i = 0; i < priv->grctx_size; i += 4)
@@ -97,6 +144,10 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
97 144
98 priv->grctx_vals = ctx; 145 priv->grctx_vals = ctx;
99 return 0; 146 return 0;
147
148err:
149 kfree(ctx);
150 return ret;
100} 151}
101 152
102static int 153static int
@@ -108,50 +159,50 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
108 int i = 0, gpc, tp, ret; 159 int i = 0, gpc, tp, ret;
109 u32 magic; 160 u32 magic;
110 161
111 ret = nouveau_gpuobj_new(dev, NULL, 0x2000, 256, NVOBJ_FLAG_VM, 162 ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM,
112 &grch->unk408004); 163 &grch->unk408004);
113 if (ret) 164 if (ret)
114 return ret; 165 return ret;
115 166
116 ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 256, NVOBJ_FLAG_VM, 167 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM,
117 &grch->unk40800c); 168 &grch->unk40800c);
118 if (ret) 169 if (ret)
119 return ret; 170 return ret;
120 171
121 ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, 172 ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096,
122 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER, 173 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
123 &grch->unk418810); 174 &grch->unk418810);
124 if (ret) 175 if (ret)
125 return ret; 176 return ret;
126 177
127 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0, NVOBJ_FLAG_VM, 178 ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM,
128 &grch->mmio); 179 &grch->mmio);
129 if (ret) 180 if (ret)
130 return ret; 181 return ret;
131 182
132 183
133 nv_wo32(grch->mmio, i++ * 4, 0x00408004); 184 nv_wo32(grch->mmio, i++ * 4, 0x00408004);
134 nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8); 185 nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8);
135 nv_wo32(grch->mmio, i++ * 4, 0x00408008); 186 nv_wo32(grch->mmio, i++ * 4, 0x00408008);
136 nv_wo32(grch->mmio, i++ * 4, 0x80000018); 187 nv_wo32(grch->mmio, i++ * 4, 0x80000018);
137 188
138 nv_wo32(grch->mmio, i++ * 4, 0x0040800c); 189 nv_wo32(grch->mmio, i++ * 4, 0x0040800c);
139 nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8); 190 nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8);
140 nv_wo32(grch->mmio, i++ * 4, 0x00408010); 191 nv_wo32(grch->mmio, i++ * 4, 0x00408010);
141 nv_wo32(grch->mmio, i++ * 4, 0x80000000); 192 nv_wo32(grch->mmio, i++ * 4, 0x80000000);
142 193
143 nv_wo32(grch->mmio, i++ * 4, 0x00418810); 194 nv_wo32(grch->mmio, i++ * 4, 0x00418810);
144 nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->vinst >> 12); 195 nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->linst >> 12);
145 nv_wo32(grch->mmio, i++ * 4, 0x00419848); 196 nv_wo32(grch->mmio, i++ * 4, 0x00419848);
146 nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->vinst >> 12); 197 nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->linst >> 12);
147 198
148 nv_wo32(grch->mmio, i++ * 4, 0x00419004); 199 nv_wo32(grch->mmio, i++ * 4, 0x00419004);
149 nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8); 200 nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8);
150 nv_wo32(grch->mmio, i++ * 4, 0x00419008); 201 nv_wo32(grch->mmio, i++ * 4, 0x00419008);
151 nv_wo32(grch->mmio, i++ * 4, 0x00000000); 202 nv_wo32(grch->mmio, i++ * 4, 0x00000000);
152 203
153 nv_wo32(grch->mmio, i++ * 4, 0x00418808); 204 nv_wo32(grch->mmio, i++ * 4, 0x00418808);
154 nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8); 205 nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8);
155 nv_wo32(grch->mmio, i++ * 4, 0x0041880c); 206 nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
156 nv_wo32(grch->mmio, i++ * 4, 0x80000018); 207 nv_wo32(grch->mmio, i++ * 4, 0x80000018);
157 208
@@ -159,7 +210,7 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
159 nv_wo32(grch->mmio, i++ * 4, 0x00405830); 210 nv_wo32(grch->mmio, i++ * 4, 0x00405830);
160 nv_wo32(grch->mmio, i++ * 4, magic); 211 nv_wo32(grch->mmio, i++ * 4, magic);
161 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 212 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
162 for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x02fc) { 213 for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x0324) {
163 u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800); 214 u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800);
164 nv_wo32(grch->mmio, i++ * 4, reg); 215 nv_wo32(grch->mmio, i++ * 4, reg);
165 nv_wo32(grch->mmio, i++ * 4, magic); 216 nv_wo32(grch->mmio, i++ * 4, magic);
@@ -186,7 +237,7 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
186 return -ENOMEM; 237 return -ENOMEM;
187 chan->engctx[NVOBJ_ENGINE_GR] = grch; 238 chan->engctx[NVOBJ_ENGINE_GR] = grch;
188 239
189 ret = nouveau_gpuobj_new(dev, NULL, priv->grctx_size, 256, 240 ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256,
190 NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC, 241 NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
191 &grch->grctx); 242 &grch->grctx);
192 if (ret) 243 if (ret)
@@ -197,8 +248,8 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
197 if (ret) 248 if (ret)
198 goto error; 249 goto error;
199 250
200 nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->vinst) | 4); 251 nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4);
201 nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->vinst)); 252 nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst));
202 pinstmem->flush(dev); 253 pinstmem->flush(dev);
203 254
204 if (!priv->grctx_vals) { 255 if (!priv->grctx_vals) {
@@ -210,15 +261,20 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
210 for (i = 0; i < priv->grctx_size; i += 4) 261 for (i = 0; i < priv->grctx_size; i += 4)
211 nv_wo32(grctx, i, priv->grctx_vals[i / 4]); 262 nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
212 263
213 nv_wo32(grctx, 0xf4, 0); 264 if (!nouveau_ctxfw) {
214 nv_wo32(grctx, 0xf8, 0); 265 nv_wo32(grctx, 0x00, grch->mmio_nr);
215 nv_wo32(grctx, 0x10, grch->mmio_nr); 266 nv_wo32(grctx, 0x04, grch->mmio->linst >> 8);
216 nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->vinst)); 267 } else {
217 nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->vinst)); 268 nv_wo32(grctx, 0xf4, 0);
218 nv_wo32(grctx, 0x1c, 1); 269 nv_wo32(grctx, 0xf8, 0);
219 nv_wo32(grctx, 0x20, 0); 270 nv_wo32(grctx, 0x10, grch->mmio_nr);
220 nv_wo32(grctx, 0x28, 0); 271 nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst));
221 nv_wo32(grctx, 0x2c, 0); 272 nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst));
273 nv_wo32(grctx, 0x1c, 1);
274 nv_wo32(grctx, 0x20, 0);
275 nv_wo32(grctx, 0x28, 0);
276 nv_wo32(grctx, 0x2c, 0);
277 }
222 pinstmem->flush(dev); 278 pinstmem->flush(dev);
223 return 0; 279 return 0;
224 280
@@ -248,7 +304,7 @@ nvc0_graph_object_new(struct nouveau_channel *chan, int engine,
248} 304}
249 305
250static int 306static int
251nvc0_graph_fini(struct drm_device *dev, int engine) 307nvc0_graph_fini(struct drm_device *dev, int engine, bool suspend)
252{ 308{
253 return 0; 309 return 0;
254} 310}
@@ -296,6 +352,7 @@ static void
296nvc0_graph_init_gpc_0(struct drm_device *dev) 352nvc0_graph_init_gpc_0(struct drm_device *dev)
297{ 353{
298 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR); 354 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
355 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tp_total);
299 u32 data[TP_MAX / 8]; 356 u32 data[TP_MAX / 8];
300 u8 tpnr[GPC_MAX]; 357 u8 tpnr[GPC_MAX];
301 int i, gpc, tpc; 358 int i, gpc, tpc;
@@ -307,13 +364,6 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
307 * 465: 3/4/4/0 4 7 364 * 465: 3/4/4/0 4 7
308 * 470: 3/3/4/4 5 5 365 * 470: 3/3/4/4 5 5
309 * 480: 3/4/4/4 6 6 366 * 480: 3/4/4/4 6 6
310 *
311 * magicgpc918
312 * 450: 00200000 00000000001000000000000000000000
313 * 460: 00124925 00000000000100100100100100100101
314 * 465: 000ba2e9 00000000000010111010001011101001
315 * 470: 00092493 00000000000010010010010010010011
316 * 480: 00088889 00000000000010001000100010001001
317 */ 367 */
318 368
319 memset(data, 0x00, sizeof(data)); 369 memset(data, 0x00, sizeof(data));
@@ -336,10 +386,10 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
336 nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 | 386 nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
337 priv->tp_nr[gpc]); 387 priv->tp_nr[gpc]);
338 nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total); 388 nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total);
339 nv_wr32(dev, GPC_UNIT(gpc, 0x0918), priv->magicgpc918); 389 nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918);
340 } 390 }
341 391
342 nv_wr32(dev, GPC_BCAST(0x1bd4), priv->magicgpc918); 392 nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
343 nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr); 393 nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr);
344} 394}
345 395
@@ -419,8 +469,51 @@ nvc0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
419static int 469static int
420nvc0_graph_init_ctxctl(struct drm_device *dev) 470nvc0_graph_init_ctxctl(struct drm_device *dev)
421{ 471{
472 struct drm_nouveau_private *dev_priv = dev->dev_private;
422 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR); 473 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
423 u32 r000260; 474 u32 r000260;
475 int i;
476
477 if (!nouveau_ctxfw) {
478 /* load HUB microcode */
479 r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
480 nv_wr32(dev, 0x4091c0, 0x01000000);
481 for (i = 0; i < sizeof(nvc0_grhub_data) / 4; i++)
482 nv_wr32(dev, 0x4091c4, nvc0_grhub_data[i]);
483
484 nv_wr32(dev, 0x409180, 0x01000000);
485 for (i = 0; i < sizeof(nvc0_grhub_code) / 4; i++) {
486 if ((i & 0x3f) == 0)
487 nv_wr32(dev, 0x409188, i >> 6);
488 nv_wr32(dev, 0x409184, nvc0_grhub_code[i]);
489 }
490
491 /* load GPC microcode */
492 nv_wr32(dev, 0x41a1c0, 0x01000000);
493 for (i = 0; i < sizeof(nvc0_grgpc_data) / 4; i++)
494 nv_wr32(dev, 0x41a1c4, nvc0_grgpc_data[i]);
495
496 nv_wr32(dev, 0x41a180, 0x01000000);
497 for (i = 0; i < sizeof(nvc0_grgpc_code) / 4; i++) {
498 if ((i & 0x3f) == 0)
499 nv_wr32(dev, 0x41a188, i >> 6);
500 nv_wr32(dev, 0x41a184, nvc0_grgpc_code[i]);
501 }
502 nv_wr32(dev, 0x000260, r000260);
503
504 /* start HUB ucode running, it'll init the GPCs */
505 nv_wr32(dev, 0x409800, dev_priv->chipset);
506 nv_wr32(dev, 0x40910c, 0x00000000);
507 nv_wr32(dev, 0x409100, 0x00000002);
508 if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
509 NV_ERROR(dev, "PGRAPH: HUB_INIT timed out\n");
510 nvc0_graph_ctxctl_debug(dev);
511 return -EBUSY;
512 }
513
514 priv->grctx_size = nv_rd32(dev, 0x409804);
515 return 0;
516 }
424 517
425 /* load fuc microcode */ 518 /* load fuc microcode */
426 r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000); 519 r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
@@ -528,6 +621,22 @@ nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
528} 621}
529 622
530static void 623static void
624nvc0_graph_ctxctl_isr(struct drm_device *dev)
625{
626 u32 ustat = nv_rd32(dev, 0x409c18);
627
628 if (ustat & 0x00000001)
629 NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
630 if (ustat & 0x00080000)
631 NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
632 if (ustat & ~0x00080001)
633 NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
634
635 nvc0_graph_ctxctl_debug(dev);
636 nv_wr32(dev, 0x409c20, ustat);
637}
638
639static void
531nvc0_graph_isr(struct drm_device *dev) 640nvc0_graph_isr(struct drm_device *dev)
532{ 641{
533 u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12; 642 u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
@@ -578,11 +687,7 @@ nvc0_graph_isr(struct drm_device *dev)
578 } 687 }
579 688
580 if (stat & 0x00080000) { 689 if (stat & 0x00080000) {
581 u32 ustat = nv_rd32(dev, 0x409c18); 690 nvc0_graph_ctxctl_isr(dev);
582
583 NV_INFO(dev, "PGRAPH: CTXCTRL ustat 0x%08x\n", ustat);
584
585 nv_wr32(dev, 0x409c20, ustat);
586 nv_wr32(dev, 0x400100, 0x00080000); 691 nv_wr32(dev, 0x400100, 0x00080000);
587 stat &= ~0x00080000; 692 stat &= ~0x00080000;
588 } 693 }
@@ -606,7 +711,7 @@ nvc0_runk140_isr(struct drm_device *dev)
606 u32 st0 = nv_mask(dev, reg + 0x1020, 0, 0); 711 u32 st0 = nv_mask(dev, reg + 0x1020, 0, 0);
607 u32 st1 = nv_mask(dev, reg + 0x1420, 0, 0); 712 u32 st1 = nv_mask(dev, reg + 0x1420, 0, 0);
608 713
609 NV_INFO(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1); 714 NV_DEBUG(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1);
610 units &= ~(1 << unit); 715 units &= ~(1 << unit);
611 } 716 }
612} 717}
@@ -651,10 +756,12 @@ nvc0_graph_destroy(struct drm_device *dev, int engine)
651{ 756{
652 struct nvc0_graph_priv *priv = nv_engine(dev, engine); 757 struct nvc0_graph_priv *priv = nv_engine(dev, engine);
653 758
654 nvc0_graph_destroy_fw(&priv->fuc409c); 759 if (nouveau_ctxfw) {
655 nvc0_graph_destroy_fw(&priv->fuc409d); 760 nvc0_graph_destroy_fw(&priv->fuc409c);
656 nvc0_graph_destroy_fw(&priv->fuc41ac); 761 nvc0_graph_destroy_fw(&priv->fuc409d);
657 nvc0_graph_destroy_fw(&priv->fuc41ad); 762 nvc0_graph_destroy_fw(&priv->fuc41ac);
763 nvc0_graph_destroy_fw(&priv->fuc41ad);
764 }
658 765
659 nouveau_irq_unregister(dev, 12); 766 nouveau_irq_unregister(dev, 12);
660 nouveau_irq_unregister(dev, 25); 767 nouveau_irq_unregister(dev, 25);
@@ -675,13 +782,10 @@ nvc0_graph_create(struct drm_device *dev)
675 struct drm_nouveau_private *dev_priv = dev->dev_private; 782 struct drm_nouveau_private *dev_priv = dev->dev_private;
676 struct nvc0_graph_priv *priv; 783 struct nvc0_graph_priv *priv;
677 int ret, gpc, i; 784 int ret, gpc, i;
785 u32 fermi;
678 786
679 switch (dev_priv->chipset) { 787 fermi = nvc0_graph_class(dev);
680 case 0xc0: 788 if (!fermi) {
681 case 0xc3:
682 case 0xc4:
683 break;
684 default:
685 NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n"); 789 NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
686 return 0; 790 return 0;
687 } 791 }
@@ -701,15 +805,17 @@ nvc0_graph_create(struct drm_device *dev)
701 nouveau_irq_register(dev, 12, nvc0_graph_isr); 805 nouveau_irq_register(dev, 12, nvc0_graph_isr);
702 nouveau_irq_register(dev, 25, nvc0_runk140_isr); 806 nouveau_irq_register(dev, 25, nvc0_runk140_isr);
703 807
704 if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) || 808 if (nouveau_ctxfw) {
705 nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) || 809 NV_INFO(dev, "PGRAPH: using external firmware\n");
706 nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) || 810 if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
707 nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) { 811 nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
708 ret = 0; 812 nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
709 goto error; 813 nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
814 ret = 0;
815 goto error;
816 }
710 } 817 }
711 818
712
713 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4); 819 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
714 if (ret) 820 if (ret)
715 goto error; 821 goto error;
@@ -735,25 +841,28 @@ nvc0_graph_create(struct drm_device *dev)
735 case 0xc0: 841 case 0xc0:
736 if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */ 842 if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */
737 priv->magic_not_rop_nr = 0x07; 843 priv->magic_not_rop_nr = 0x07;
738 /* filled values up to tp_total, the rest 0 */
739 priv->magicgpc918 = 0x000ba2e9;
740 } else 844 } else
741 if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */ 845 if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */
742 priv->magic_not_rop_nr = 0x05; 846 priv->magic_not_rop_nr = 0x05;
743 priv->magicgpc918 = 0x00092493;
744 } else 847 } else
745 if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */ 848 if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */
746 priv->magic_not_rop_nr = 0x06; 849 priv->magic_not_rop_nr = 0x06;
747 priv->magicgpc918 = 0x00088889;
748 } 850 }
749 break; 851 break;
750 case 0xc3: /* 450, 4/0/0/0, 2 */ 852 case 0xc3: /* 450, 4/0/0/0, 2 */
751 priv->magic_not_rop_nr = 0x03; 853 priv->magic_not_rop_nr = 0x03;
752 priv->magicgpc918 = 0x00200000;
753 break; 854 break;
754 case 0xc4: /* 460, 3/4/0/0, 4 */ 855 case 0xc4: /* 460, 3/4/0/0, 4 */
755 priv->magic_not_rop_nr = 0x01; 856 priv->magic_not_rop_nr = 0x01;
756 priv->magicgpc918 = 0x00124925; 857 break;
858 case 0xc1: /* 2/0/0/0, 1 */
859 priv->magic_not_rop_nr = 0x01;
860 break;
861 case 0xc8: /* 4/4/3/4, 5 */
862 priv->magic_not_rop_nr = 0x06;
863 break;
864 case 0xce: /* 4/4/0/0, 4 */
865 priv->magic_not_rop_nr = 0x03;
757 break; 866 break;
758 } 867 }
759 868
@@ -763,13 +872,16 @@ nvc0_graph_create(struct drm_device *dev)
763 priv->tp_nr[3], priv->rop_nr); 872 priv->tp_nr[3], priv->rop_nr);
764 /* use 0xc3's values... */ 873 /* use 0xc3's values... */
765 priv->magic_not_rop_nr = 0x03; 874 priv->magic_not_rop_nr = 0x03;
766 priv->magicgpc918 = 0x00200000;
767 } 875 }
768 876
769 NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */ 877 NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
770 NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */ 878 NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
771 NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip); 879 NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip);
772 NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */ 880 NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
881 if (fermi >= 0x9197)
882 NVOBJ_CLASS(dev, 0x9197, GR); /* 3D (NVC1-) */
883 if (fermi >= 0x9297)
884 NVOBJ_CLASS(dev, 0x9297, GR); /* 3D (NVC8-) */
773 NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */ 885 NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
774 return 0; 886 return 0;
775 887
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.fuc b/drivers/gpu/drm/nouveau/nvc0_graph.fuc
new file mode 100644
index 000000000000..2a4b6dc8f9de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.fuc
@@ -0,0 +1,400 @@
1/* fuc microcode util functions for nvc0 PGRAPH
2 *
3 * Copyright 2011 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Ben Skeggs
24 */
25
26define(`mmctx_data', `.b32 eval((($2 - 1) << 26) | $1)')
27define(`queue_init', `.skip eval((2 * 4) + ((8 * 4) * 2))')
28
29ifdef(`include_code', `
30// Error codes
31define(`E_BAD_COMMAND', 0x01)
32define(`E_CMD_OVERFLOW', 0x02)
33
34// Util macros to help with debugging ucode hangs etc
35define(`T_WAIT', 0)
36define(`T_MMCTX', 1)
37define(`T_STRWAIT', 2)
38define(`T_STRINIT', 3)
39define(`T_AUTO', 4)
40define(`T_CHAN', 5)
41define(`T_LOAD', 6)
42define(`T_SAVE', 7)
43define(`T_LCHAN', 8)
44define(`T_LCTXH', 9)
45
46define(`trace_set', `
47 mov $r8 0x83c
48 shl b32 $r8 6
49 clear b32 $r9
50 bset $r9 $1
51 iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7]
52')
53
54define(`trace_clr', `
55 mov $r8 0x85c
56 shl b32 $r8 6
57 clear b32 $r9
58 bset $r9 $1
59 iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7]
60')
61
62// queue_put - add request to queue
63//
64// In : $r13 queue pointer
65// $r14 command
66// $r15 data
67//
68queue_put:
69 // make sure we have space..
70 ld b32 $r8 D[$r13 + 0x0] // GET
71 ld b32 $r9 D[$r13 + 0x4] // PUT
72 xor $r8 8
73 cmpu b32 $r8 $r9
74 bra ne queue_put_next
75 mov $r15 E_CMD_OVERFLOW
76 call error
77 ret
78
79 // store cmd/data on queue
80 queue_put_next:
81 and $r8 $r9 7
82 shl b32 $r8 3
83 add b32 $r8 $r13
84 add b32 $r8 8
85 st b32 D[$r8 + 0x0] $r14
86 st b32 D[$r8 + 0x4] $r15
87
88 // update PUT
89 add b32 $r9 1
90 and $r9 0xf
91 st b32 D[$r13 + 0x4] $r9
92 ret
93
94// queue_get - fetch request from queue
95//
96// In : $r13 queue pointer
97//
98// Out: $p1 clear on success (data available)
99// $r14 command
100// $r15 data
101//
102queue_get:
103 bset $flags $p1
104 ld b32 $r8 D[$r13 + 0x0] // GET
105 ld b32 $r9 D[$r13 + 0x4] // PUT
106 cmpu b32 $r8 $r9
107 bra e queue_get_done
108 // fetch first cmd/data pair
109 and $r9 $r8 7
110 shl b32 $r9 3
111 add b32 $r9 $r13
112 add b32 $r9 8
113 ld b32 $r14 D[$r9 + 0x0]
114 ld b32 $r15 D[$r9 + 0x4]
115
116 // update GET
117 add b32 $r8 1
118 and $r8 0xf
119 st b32 D[$r13 + 0x0] $r8
120 bclr $flags $p1
121queue_get_done:
122 ret
123
124// nv_rd32 - read 32-bit value from nv register
125//
126// In : $r14 register
127// Out: $r15 value
128//
129nv_rd32:
130 mov $r11 0x728
131 shl b32 $r11 6
132 mov b32 $r12 $r14
133 bset $r12 31 // MMIO_CTRL_PENDING
134 iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
135 nv_rd32_wait:
136 iord $r12 I[$r11 + 0x000]
137 xbit $r12 $r12 31
138 bra ne nv_rd32_wait
139 mov $r10 6 // DONE_MMIO_RD
140 call wait_doneo
141 iord $r15 I[$r11 + 0x100] // MMIO_RDVAL
142 ret
143
144// nv_wr32 - write 32-bit value to nv register
145//
146// In : $r14 register
147// $r15 value
148//
149nv_wr32:
150 mov $r11 0x728
151 shl b32 $r11 6
152 iowr I[$r11 + 0x200] $r15 // MMIO_WRVAL
153 mov b32 $r12 $r14
154 bset $r12 31 // MMIO_CTRL_PENDING
155 bset $r12 30 // MMIO_CTRL_WRITE
156 iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
157 nv_wr32_wait:
158 iord $r12 I[$r11 + 0x000]
159 xbit $r12 $r12 31
160 bra ne nv_wr32_wait
161 ret
162
163// (re)set watchdog timer
164//
165// In : $r15 timeout
166//
167watchdog_reset:
168 mov $r8 0x430
169 shl b32 $r8 6
170 bset $r15 31
171 iowr I[$r8 + 0x000] $r15
172 ret
173
174// clear watchdog timer
175watchdog_clear:
176 mov $r8 0x430
177 shl b32 $r8 6
178 iowr I[$r8 + 0x000] $r0
179 ret
180
181// wait_done{z,o} - wait on FUC_DONE bit to become clear/set
182//
183// In : $r10 bit to wait on
184//
185define(`wait_done', `
186$1:
187 trace_set(T_WAIT);
188 mov $r8 0x818
189 shl b32 $r8 6
190 iowr I[$r8 + 0x000] $r10 // CC_SCRATCH[6] = wait bit
191 wait_done_$1:
192 mov $r8 0x400
193 shl b32 $r8 6
194 iord $r8 I[$r8 + 0x000] // DONE
195 xbit $r8 $r8 $r10
196 bra $2 wait_done_$1
197 trace_clr(T_WAIT)
198 ret
199')
200wait_done(wait_donez, ne)
201wait_done(wait_doneo, e)
202
203// mmctx_size - determine size of a mmio list transfer
204//
205// In : $r14 mmio list head
206// $r15 mmio list tail
207// Out: $r15 transfer size (in bytes)
208//
209mmctx_size:
210 clear b32 $r9
211 nv_mmctx_size_loop:
212 ld b32 $r8 D[$r14]
213 shr b32 $r8 26
214 add b32 $r8 1
215 shl b32 $r8 2
216 add b32 $r9 $r8
217 add b32 $r14 4
218 cmpu b32 $r14 $r15
219 bra ne nv_mmctx_size_loop
220 mov b32 $r15 $r9
221 ret
222
223// mmctx_xfer - execute a list of mmio transfers
224//
225// In : $r10 flags
226// bit 0: direction (0 = save, 1 = load)
227// bit 1: set if first transfer
228// bit 2: set if last transfer
229// $r11 base
230// $r12 mmio list head
231// $r13 mmio list tail
232// $r14 multi_stride
233// $r15 multi_mask
234//
235mmctx_xfer:
236 trace_set(T_MMCTX)
237 mov $r8 0x710
238 shl b32 $r8 6
239 clear b32 $r9
240 or $r11 $r11
241 bra e mmctx_base_disabled
242 iowr I[$r8 + 0x000] $r11 // MMCTX_BASE
243 bset $r9 0 // BASE_EN
244 mmctx_base_disabled:
245 or $r14 $r14
246 bra e mmctx_multi_disabled
247 iowr I[$r8 + 0x200] $r14 // MMCTX_MULTI_STRIDE
248 iowr I[$r8 + 0x300] $r15 // MMCTX_MULTI_MASK
249 bset $r9 1 // MULTI_EN
250 mmctx_multi_disabled:
251 add b32 $r8 0x100
252
253 xbit $r11 $r10 0
254 shl b32 $r11 16 // DIR
255 bset $r11 12 // QLIMIT = 0x10
256 xbit $r14 $r10 1
257 shl b32 $r14 17
258 or $r11 $r14 // START_TRIGGER
259 iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
260
261 // loop over the mmio list, and send requests to the hw
262 mmctx_exec_loop:
263 // wait for space in mmctx queue
264 mmctx_wait_free:
265 iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
266 and $r14 0x1f
267 bra e mmctx_wait_free
268
269 // queue up an entry
270 ld b32 $r14 D[$r12]
271 or $r14 $r9
272 iowr I[$r8 + 0x300] $r14
273 add b32 $r12 4
274 cmpu b32 $r12 $r13
275 bra ne mmctx_exec_loop
276
277 xbit $r11 $r10 2
278 bra ne mmctx_stop
279 // wait for queue to empty
280 mmctx_fini_wait:
281 iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
282 and $r11 0x1f
283 cmpu b32 $r11 0x10
284 bra ne mmctx_fini_wait
285 mov $r10 2 // DONE_MMCTX
286 call wait_donez
287 bra mmctx_done
288 mmctx_stop:
289 xbit $r11 $r10 0
290 shl b32 $r11 16 // DIR
291 bset $r11 12 // QLIMIT = 0x10
292 bset $r11 18 // STOP_TRIGGER
293 iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
294 mmctx_stop_wait:
295 // wait for STOP_TRIGGER to clear
296 iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
297 xbit $r11 $r11 18
298 bra ne mmctx_stop_wait
299 mmctx_done:
300 trace_clr(T_MMCTX)
301 ret
302
303// Wait for DONE_STRAND
304//
305strand_wait:
306 push $r10
307 mov $r10 2
308 call wait_donez
309 pop $r10
310 ret
311
312// unknown - call before issuing strand commands
313//
314strand_pre:
315 mov $r8 0x4afc
316 sethi $r8 0x20000
317 mov $r9 0xc
318 iowr I[$r8] $r9
319 call strand_wait
320 ret
321
322// unknown - call after issuing strand commands
323//
324strand_post:
325 mov $r8 0x4afc
326 sethi $r8 0x20000
327 mov $r9 0xd
328 iowr I[$r8] $r9
329 call strand_wait
330 ret
331
332// Selects strand set?!
333//
334// In: $r14 id
335//
336strand_set:
337 mov $r10 0x4ffc
338 sethi $r10 0x20000
339 sub b32 $r11 $r10 0x500
340 mov $r12 0xf
341 iowr I[$r10 + 0x000] $r12 // 0x93c = 0xf
342 mov $r12 0xb
343 iowr I[$r11 + 0x000] $r12 // 0x928 = 0xb
344 call strand_wait
345 iowr I[$r10 + 0x000] $r14 // 0x93c = <id>
346 mov $r12 0xa
347 iowr I[$r11 + 0x000] $r12 // 0x928 = 0xa
348 call strand_wait
349 ret
350
351// Initialise strand context data
352//
353// In : $r15 context base
354// Out: $r15 context size (in bytes)
355//
356// Strandset(?) 3 hardcoded currently
357//
358strand_ctx_init:
359 trace_set(T_STRINIT)
360 call strand_pre
361 mov $r14 3
362 call strand_set
363 mov $r10 0x46fc
364 sethi $r10 0x20000
365 add b32 $r11 $r10 0x400
366 iowr I[$r10 + 0x100] $r0 // STRAND_FIRST_GENE = 0
367 mov $r12 1
368 iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_FIRST_GENE
369 call strand_wait
370 sub b32 $r12 $r0 1
371 iowr I[$r10 + 0x000] $r12 // STRAND_GENE_CNT = 0xffffffff
372 mov $r12 2
373 iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_GENE_CNT
374 call strand_wait
375 call strand_post
376
377 // read the size of each strand, poke the context offset of
378 // each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
379 // about it later then.
380 mov $r8 0x880
381 shl b32 $r8 6
382 iord $r9 I[$r8 + 0x000] // STRANDS
383 add b32 $r8 0x2200
384 shr b32 $r14 $r15 8
385 ctx_init_strand_loop:
386 iowr I[$r8 + 0x000] $r14 // STRAND_SAVE_SWBASE
387 iowr I[$r8 + 0x100] $r14 // STRAND_LOAD_SWBASE
388 iord $r10 I[$r8 + 0x200] // STRAND_SIZE
389 shr b32 $r10 6
390 add b32 $r10 1
391 add b32 $r14 $r10
392 add b32 $r8 4
393 sub b32 $r9 1
394 bra ne ctx_init_strand_loop
395
396 shl b32 $r14 8
397 sub b32 $r15 $r14 $r15
398 trace_clr(T_STRINIT)
399 ret
400')
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h
index f5d184e0689d..55689e997286 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.h
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.h
@@ -57,8 +57,7 @@ struct nvc0_graph_priv {
57 struct nouveau_gpuobj *unk4188b4; 57 struct nouveau_gpuobj *unk4188b4;
58 struct nouveau_gpuobj *unk4188b8; 58 struct nouveau_gpuobj *unk4188b8;
59 59
60 u8 magic_not_rop_nr; 60 u8 magic_not_rop_nr;
61 u32 magicgpc918;
62}; 61};
63 62
64struct nvc0_graph_chan { 63struct nvc0_graph_chan {
@@ -72,4 +71,25 @@ struct nvc0_graph_chan {
72 71
73int nvc0_grctx_generate(struct nouveau_channel *); 72int nvc0_grctx_generate(struct nouveau_channel *);
74 73
74/* nvc0_graph.c uses this also to determine supported chipsets */
75static inline u32
76nvc0_graph_class(struct drm_device *dev)
77{
78 struct drm_nouveau_private *dev_priv = dev->dev_private;
79
80 switch (dev_priv->chipset) {
81 case 0xc0:
82 case 0xc3:
83 case 0xc4:
84 case 0xce: /* guess, mmio trace shows only 0x9097 state */
85 return 0x9097;
86 case 0xc1:
87 return 0x9197;
88 case 0xc8:
89 return 0x9297;
90 default:
91 return 0;
92 }
93}
94
75#endif 95#endif
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
index 6df066114133..31018eaf5279 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -45,6 +45,9 @@ nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
45static void 45static void
46nvc0_grctx_generate_9097(struct drm_device *dev) 46nvc0_grctx_generate_9097(struct drm_device *dev)
47{ 47{
48 u32 fermi = nvc0_graph_class(dev);
49 u32 mthd;
50
48 nv_mthd(dev, 0x9097, 0x0800, 0x00000000); 51 nv_mthd(dev, 0x9097, 0x0800, 0x00000000);
49 nv_mthd(dev, 0x9097, 0x0840, 0x00000000); 52 nv_mthd(dev, 0x9097, 0x0840, 0x00000000);
50 nv_mthd(dev, 0x9097, 0x0880, 0x00000000); 53 nv_mthd(dev, 0x9097, 0x0880, 0x00000000);
@@ -824,134 +827,10 @@ nvc0_grctx_generate_9097(struct drm_device *dev)
824 nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001); 827 nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001);
825 nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001); 828 nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001);
826 nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001); 829 nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001);
827 nv_mthd(dev, 0x9097, 0x3400, 0x00000000); 830 if (fermi == 0x9097) {
828 nv_mthd(dev, 0x9097, 0x3404, 0x00000000); 831 for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
829 nv_mthd(dev, 0x9097, 0x3408, 0x00000000); 832 nv_mthd(dev, 0x9097, mthd, 0x00000000);
830 nv_mthd(dev, 0x9097, 0x340c, 0x00000000); 833 }
831 nv_mthd(dev, 0x9097, 0x3410, 0x00000000);
832 nv_mthd(dev, 0x9097, 0x3414, 0x00000000);
833 nv_mthd(dev, 0x9097, 0x3418, 0x00000000);
834 nv_mthd(dev, 0x9097, 0x341c, 0x00000000);
835 nv_mthd(dev, 0x9097, 0x3420, 0x00000000);
836 nv_mthd(dev, 0x9097, 0x3424, 0x00000000);
837 nv_mthd(dev, 0x9097, 0x3428, 0x00000000);
838 nv_mthd(dev, 0x9097, 0x342c, 0x00000000);
839 nv_mthd(dev, 0x9097, 0x3430, 0x00000000);
840 nv_mthd(dev, 0x9097, 0x3434, 0x00000000);
841 nv_mthd(dev, 0x9097, 0x3438, 0x00000000);
842 nv_mthd(dev, 0x9097, 0x343c, 0x00000000);
843 nv_mthd(dev, 0x9097, 0x3440, 0x00000000);
844 nv_mthd(dev, 0x9097, 0x3444, 0x00000000);
845 nv_mthd(dev, 0x9097, 0x3448, 0x00000000);
846 nv_mthd(dev, 0x9097, 0x344c, 0x00000000);
847 nv_mthd(dev, 0x9097, 0x3450, 0x00000000);
848 nv_mthd(dev, 0x9097, 0x3454, 0x00000000);
849 nv_mthd(dev, 0x9097, 0x3458, 0x00000000);
850 nv_mthd(dev, 0x9097, 0x345c, 0x00000000);
851 nv_mthd(dev, 0x9097, 0x3460, 0x00000000);
852 nv_mthd(dev, 0x9097, 0x3464, 0x00000000);
853 nv_mthd(dev, 0x9097, 0x3468, 0x00000000);
854 nv_mthd(dev, 0x9097, 0x346c, 0x00000000);
855 nv_mthd(dev, 0x9097, 0x3470, 0x00000000);
856 nv_mthd(dev, 0x9097, 0x3474, 0x00000000);
857 nv_mthd(dev, 0x9097, 0x3478, 0x00000000);
858 nv_mthd(dev, 0x9097, 0x347c, 0x00000000);
859 nv_mthd(dev, 0x9097, 0x3480, 0x00000000);
860 nv_mthd(dev, 0x9097, 0x3484, 0x00000000);
861 nv_mthd(dev, 0x9097, 0x3488, 0x00000000);
862 nv_mthd(dev, 0x9097, 0x348c, 0x00000000);
863 nv_mthd(dev, 0x9097, 0x3490, 0x00000000);
864 nv_mthd(dev, 0x9097, 0x3494, 0x00000000);
865 nv_mthd(dev, 0x9097, 0x3498, 0x00000000);
866 nv_mthd(dev, 0x9097, 0x349c, 0x00000000);
867 nv_mthd(dev, 0x9097, 0x34a0, 0x00000000);
868 nv_mthd(dev, 0x9097, 0x34a4, 0x00000000);
869 nv_mthd(dev, 0x9097, 0x34a8, 0x00000000);
870 nv_mthd(dev, 0x9097, 0x34ac, 0x00000000);
871 nv_mthd(dev, 0x9097, 0x34b0, 0x00000000);
872 nv_mthd(dev, 0x9097, 0x34b4, 0x00000000);
873 nv_mthd(dev, 0x9097, 0x34b8, 0x00000000);
874 nv_mthd(dev, 0x9097, 0x34bc, 0x00000000);
875 nv_mthd(dev, 0x9097, 0x34c0, 0x00000000);
876 nv_mthd(dev, 0x9097, 0x34c4, 0x00000000);
877 nv_mthd(dev, 0x9097, 0x34c8, 0x00000000);
878 nv_mthd(dev, 0x9097, 0x34cc, 0x00000000);
879 nv_mthd(dev, 0x9097, 0x34d0, 0x00000000);
880 nv_mthd(dev, 0x9097, 0x34d4, 0x00000000);
881 nv_mthd(dev, 0x9097, 0x34d8, 0x00000000);
882 nv_mthd(dev, 0x9097, 0x34dc, 0x00000000);
883 nv_mthd(dev, 0x9097, 0x34e0, 0x00000000);
884 nv_mthd(dev, 0x9097, 0x34e4, 0x00000000);
885 nv_mthd(dev, 0x9097, 0x34e8, 0x00000000);
886 nv_mthd(dev, 0x9097, 0x34ec, 0x00000000);
887 nv_mthd(dev, 0x9097, 0x34f0, 0x00000000);
888 nv_mthd(dev, 0x9097, 0x34f4, 0x00000000);
889 nv_mthd(dev, 0x9097, 0x34f8, 0x00000000);
890 nv_mthd(dev, 0x9097, 0x34fc, 0x00000000);
891 nv_mthd(dev, 0x9097, 0x3500, 0x00000000);
892 nv_mthd(dev, 0x9097, 0x3504, 0x00000000);
893 nv_mthd(dev, 0x9097, 0x3508, 0x00000000);
894 nv_mthd(dev, 0x9097, 0x350c, 0x00000000);
895 nv_mthd(dev, 0x9097, 0x3510, 0x00000000);
896 nv_mthd(dev, 0x9097, 0x3514, 0x00000000);
897 nv_mthd(dev, 0x9097, 0x3518, 0x00000000);
898 nv_mthd(dev, 0x9097, 0x351c, 0x00000000);
899 nv_mthd(dev, 0x9097, 0x3520, 0x00000000);
900 nv_mthd(dev, 0x9097, 0x3524, 0x00000000);
901 nv_mthd(dev, 0x9097, 0x3528, 0x00000000);
902 nv_mthd(dev, 0x9097, 0x352c, 0x00000000);
903 nv_mthd(dev, 0x9097, 0x3530, 0x00000000);
904 nv_mthd(dev, 0x9097, 0x3534, 0x00000000);
905 nv_mthd(dev, 0x9097, 0x3538, 0x00000000);
906 nv_mthd(dev, 0x9097, 0x353c, 0x00000000);
907 nv_mthd(dev, 0x9097, 0x3540, 0x00000000);
908 nv_mthd(dev, 0x9097, 0x3544, 0x00000000);
909 nv_mthd(dev, 0x9097, 0x3548, 0x00000000);
910 nv_mthd(dev, 0x9097, 0x354c, 0x00000000);
911 nv_mthd(dev, 0x9097, 0x3550, 0x00000000);
912 nv_mthd(dev, 0x9097, 0x3554, 0x00000000);
913 nv_mthd(dev, 0x9097, 0x3558, 0x00000000);
914 nv_mthd(dev, 0x9097, 0x355c, 0x00000000);
915 nv_mthd(dev, 0x9097, 0x3560, 0x00000000);
916 nv_mthd(dev, 0x9097, 0x3564, 0x00000000);
917 nv_mthd(dev, 0x9097, 0x3568, 0x00000000);
918 nv_mthd(dev, 0x9097, 0x356c, 0x00000000);
919 nv_mthd(dev, 0x9097, 0x3570, 0x00000000);
920 nv_mthd(dev, 0x9097, 0x3574, 0x00000000);
921 nv_mthd(dev, 0x9097, 0x3578, 0x00000000);
922 nv_mthd(dev, 0x9097, 0x357c, 0x00000000);
923 nv_mthd(dev, 0x9097, 0x3580, 0x00000000);
924 nv_mthd(dev, 0x9097, 0x3584, 0x00000000);
925 nv_mthd(dev, 0x9097, 0x3588, 0x00000000);
926 nv_mthd(dev, 0x9097, 0x358c, 0x00000000);
927 nv_mthd(dev, 0x9097, 0x3590, 0x00000000);
928 nv_mthd(dev, 0x9097, 0x3594, 0x00000000);
929 nv_mthd(dev, 0x9097, 0x3598, 0x00000000);
930 nv_mthd(dev, 0x9097, 0x359c, 0x00000000);
931 nv_mthd(dev, 0x9097, 0x35a0, 0x00000000);
932 nv_mthd(dev, 0x9097, 0x35a4, 0x00000000);
933 nv_mthd(dev, 0x9097, 0x35a8, 0x00000000);
934 nv_mthd(dev, 0x9097, 0x35ac, 0x00000000);
935 nv_mthd(dev, 0x9097, 0x35b0, 0x00000000);
936 nv_mthd(dev, 0x9097, 0x35b4, 0x00000000);
937 nv_mthd(dev, 0x9097, 0x35b8, 0x00000000);
938 nv_mthd(dev, 0x9097, 0x35bc, 0x00000000);
939 nv_mthd(dev, 0x9097, 0x35c0, 0x00000000);
940 nv_mthd(dev, 0x9097, 0x35c4, 0x00000000);
941 nv_mthd(dev, 0x9097, 0x35c8, 0x00000000);
942 nv_mthd(dev, 0x9097, 0x35cc, 0x00000000);
943 nv_mthd(dev, 0x9097, 0x35d0, 0x00000000);
944 nv_mthd(dev, 0x9097, 0x35d4, 0x00000000);
945 nv_mthd(dev, 0x9097, 0x35d8, 0x00000000);
946 nv_mthd(dev, 0x9097, 0x35dc, 0x00000000);
947 nv_mthd(dev, 0x9097, 0x35e0, 0x00000000);
948 nv_mthd(dev, 0x9097, 0x35e4, 0x00000000);
949 nv_mthd(dev, 0x9097, 0x35e8, 0x00000000);
950 nv_mthd(dev, 0x9097, 0x35ec, 0x00000000);
951 nv_mthd(dev, 0x9097, 0x35f0, 0x00000000);
952 nv_mthd(dev, 0x9097, 0x35f4, 0x00000000);
953 nv_mthd(dev, 0x9097, 0x35f8, 0x00000000);
954 nv_mthd(dev, 0x9097, 0x35fc, 0x00000000);
955 nv_mthd(dev, 0x9097, 0x030c, 0x00000001); 834 nv_mthd(dev, 0x9097, 0x030c, 0x00000001);
956 nv_mthd(dev, 0x9097, 0x1944, 0x00000000); 835 nv_mthd(dev, 0x9097, 0x1944, 0x00000000);
957 nv_mthd(dev, 0x9097, 0x1514, 0x00000000); 836 nv_mthd(dev, 0x9097, 0x1514, 0x00000000);
@@ -1321,6 +1200,37 @@ nvc0_grctx_generate_9097(struct drm_device *dev)
1321} 1200}
1322 1201
1323static void 1202static void
1203nvc0_grctx_generate_9197(struct drm_device *dev)
1204{
1205 u32 fermi = nvc0_graph_class(dev);
1206 u32 mthd;
1207
1208 if (fermi == 0x9197) {
1209 for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
1210 nv_mthd(dev, 0x9197, mthd, 0x00000000);
1211 }
1212 nv_mthd(dev, 0x9197, 0x02e4, 0x0000b001);
1213}
1214
1215static void
1216nvc0_grctx_generate_9297(struct drm_device *dev)
1217{
1218 u32 fermi = nvc0_graph_class(dev);
1219 u32 mthd;
1220
1221 if (fermi == 0x9297) {
1222 for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
1223 nv_mthd(dev, 0x9297, mthd, 0x00000000);
1224 }
1225 nv_mthd(dev, 0x9297, 0x036c, 0x00000000);
1226 nv_mthd(dev, 0x9297, 0x0370, 0x00000000);
1227 nv_mthd(dev, 0x9297, 0x07a4, 0x00000000);
1228 nv_mthd(dev, 0x9297, 0x07a8, 0x00000000);
1229 nv_mthd(dev, 0x9297, 0x0374, 0x00000000);
1230 nv_mthd(dev, 0x9297, 0x0378, 0x00000020);
1231}
1232
1233static void
1324nvc0_grctx_generate_902d(struct drm_device *dev) 1234nvc0_grctx_generate_902d(struct drm_device *dev)
1325{ 1235{
1326 nv_mthd(dev, 0x902d, 0x0200, 0x000000cf); 1236 nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
@@ -1559,8 +1469,15 @@ nvc0_grctx_generate_unk47xx(struct drm_device *dev)
1559static void 1469static void
1560nvc0_grctx_generate_shaders(struct drm_device *dev) 1470nvc0_grctx_generate_shaders(struct drm_device *dev)
1561{ 1471{
1562 nv_wr32(dev, 0x405800, 0x078000bf); 1472 struct drm_nouveau_private *dev_priv = dev->dev_private;
1563 nv_wr32(dev, 0x405830, 0x02180000); 1473
1474 if (dev_priv->chipset != 0xc1) {
1475 nv_wr32(dev, 0x405800, 0x078000bf);
1476 nv_wr32(dev, 0x405830, 0x02180000);
1477 } else {
1478 nv_wr32(dev, 0x405800, 0x0f8000bf);
1479 nv_wr32(dev, 0x405830, 0x02180218);
1480 }
1564 nv_wr32(dev, 0x405834, 0x00000000); 1481 nv_wr32(dev, 0x405834, 0x00000000);
1565 nv_wr32(dev, 0x405838, 0x00000000); 1482 nv_wr32(dev, 0x405838, 0x00000000);
1566 nv_wr32(dev, 0x405854, 0x00000000); 1483 nv_wr32(dev, 0x405854, 0x00000000);
@@ -1586,10 +1503,16 @@ nvc0_grctx_generate_unk60xx(struct drm_device *dev)
1586static void 1503static void
1587nvc0_grctx_generate_unk64xx(struct drm_device *dev) 1504nvc0_grctx_generate_unk64xx(struct drm_device *dev)
1588{ 1505{
1506 struct drm_nouveau_private *dev_priv = dev->dev_private;
1507
1589 nv_wr32(dev, 0x4064a8, 0x00000000); 1508 nv_wr32(dev, 0x4064a8, 0x00000000);
1590 nv_wr32(dev, 0x4064ac, 0x00003fff); 1509 nv_wr32(dev, 0x4064ac, 0x00003fff);
1591 nv_wr32(dev, 0x4064b4, 0x00000000); 1510 nv_wr32(dev, 0x4064b4, 0x00000000);
1592 nv_wr32(dev, 0x4064b8, 0x00000000); 1511 nv_wr32(dev, 0x4064b8, 0x00000000);
1512 if (dev_priv->chipset == 0xc1) {
1513 nv_wr32(dev, 0x4064c0, 0x80140078);
1514 nv_wr32(dev, 0x4064c4, 0x0086ffff);
1515 }
1593} 1516}
1594 1517
1595static void 1518static void
@@ -1622,21 +1545,14 @@ static void
1622nvc0_grctx_generate_rop(struct drm_device *dev) 1545nvc0_grctx_generate_rop(struct drm_device *dev)
1623{ 1546{
1624 struct drm_nouveau_private *dev_priv = dev->dev_private; 1547 struct drm_nouveau_private *dev_priv = dev->dev_private;
1548 int chipset = dev_priv->chipset;
1625 1549
1626 /* ROPC_BROADCAST */ 1550 /* ROPC_BROADCAST */
1627 nv_wr32(dev, 0x408800, 0x02802a3c); 1551 nv_wr32(dev, 0x408800, 0x02802a3c);
1628 nv_wr32(dev, 0x408804, 0x00000040); 1552 nv_wr32(dev, 0x408804, 0x00000040);
1629 nv_wr32(dev, 0x408808, 0x0003e00d); 1553 nv_wr32(dev, 0x408808, chipset != 0xc1 ? 0x0003e00d : 0x1003e005);
1630 switch (dev_priv->chipset) { 1554 nv_wr32(dev, 0x408900, 0x3080b801);
1631 case 0xc0: 1555 nv_wr32(dev, 0x408904, chipset != 0xc1 ? 0x02000001 : 0x62000001);
1632 nv_wr32(dev, 0x408900, 0x0080b801);
1633 break;
1634 case 0xc3:
1635 case 0xc4:
1636 nv_wr32(dev, 0x408900, 0x3080b801);
1637 break;
1638 }
1639 nv_wr32(dev, 0x408904, 0x02000001);
1640 nv_wr32(dev, 0x408908, 0x00c80929); 1556 nv_wr32(dev, 0x408908, 0x00c80929);
1641 nv_wr32(dev, 0x40890c, 0x00000000); 1557 nv_wr32(dev, 0x40890c, 0x00000000);
1642 nv_wr32(dev, 0x408980, 0x0000011d); 1558 nv_wr32(dev, 0x408980, 0x0000011d);
@@ -1645,6 +1561,8 @@ nvc0_grctx_generate_rop(struct drm_device *dev)
1645static void 1561static void
1646nvc0_grctx_generate_gpc(struct drm_device *dev) 1562nvc0_grctx_generate_gpc(struct drm_device *dev)
1647{ 1563{
1564 struct drm_nouveau_private *dev_priv = dev->dev_private;
1565 int chipset = dev_priv->chipset;
1648 int i; 1566 int i;
1649 1567
1650 /* GPC_BROADCAST */ 1568 /* GPC_BROADCAST */
@@ -1676,7 +1594,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
1676 nv_wr32(dev, 0x41880c, 0x00000000); 1594 nv_wr32(dev, 0x41880c, 0x00000000);
1677 nv_wr32(dev, 0x418810, 0x00000000); 1595 nv_wr32(dev, 0x418810, 0x00000000);
1678 nv_wr32(dev, 0x418828, 0x00008442); 1596 nv_wr32(dev, 0x418828, 0x00008442);
1679 nv_wr32(dev, 0x418830, 0x00000001); 1597 nv_wr32(dev, 0x418830, chipset != 0xc1 ? 0x00000001 : 0x10000001);
1680 nv_wr32(dev, 0x4188d8, 0x00000008); 1598 nv_wr32(dev, 0x4188d8, 0x00000008);
1681 nv_wr32(dev, 0x4188e0, 0x01000000); 1599 nv_wr32(dev, 0x4188e0, 0x01000000);
1682 nv_wr32(dev, 0x4188e8, 0x00000000); 1600 nv_wr32(dev, 0x4188e8, 0x00000000);
@@ -1684,7 +1602,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
1684 nv_wr32(dev, 0x4188f0, 0x00000000); 1602 nv_wr32(dev, 0x4188f0, 0x00000000);
1685 nv_wr32(dev, 0x4188f4, 0x00000000); 1603 nv_wr32(dev, 0x4188f4, 0x00000000);
1686 nv_wr32(dev, 0x4188f8, 0x00000000); 1604 nv_wr32(dev, 0x4188f8, 0x00000000);
1687 nv_wr32(dev, 0x4188fc, 0x00100000); 1605 nv_wr32(dev, 0x4188fc, chipset != 0xc1 ? 0x00100000 : 0x00100018);
1688 nv_wr32(dev, 0x41891c, 0x00ff00ff); 1606 nv_wr32(dev, 0x41891c, 0x00ff00ff);
1689 nv_wr32(dev, 0x418924, 0x00000000); 1607 nv_wr32(dev, 0x418924, 0x00000000);
1690 nv_wr32(dev, 0x418928, 0x00ffff00); 1608 nv_wr32(dev, 0x418928, 0x00ffff00);
@@ -1715,6 +1633,8 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
1715 nv_wr32(dev, 0x418c24, 0x00000000); 1633 nv_wr32(dev, 0x418c24, 0x00000000);
1716 nv_wr32(dev, 0x418c28, 0x00000000); 1634 nv_wr32(dev, 0x418c28, 0x00000000);
1717 nv_wr32(dev, 0x418c2c, 0x00000000); 1635 nv_wr32(dev, 0x418c2c, 0x00000000);
1636 if (chipset == 0xc1)
1637 nv_wr32(dev, 0x418c6c, 0x00000001);
1718 nv_wr32(dev, 0x418c80, 0x20200004); 1638 nv_wr32(dev, 0x418c80, 0x20200004);
1719 nv_wr32(dev, 0x418c8c, 0x00000001); 1639 nv_wr32(dev, 0x418c8c, 0x00000001);
1720 nv_wr32(dev, 0x419000, 0x00000780); 1640 nv_wr32(dev, 0x419000, 0x00000780);
@@ -1727,10 +1647,13 @@ static void
1727nvc0_grctx_generate_tp(struct drm_device *dev) 1647nvc0_grctx_generate_tp(struct drm_device *dev)
1728{ 1648{
1729 struct drm_nouveau_private *dev_priv = dev->dev_private; 1649 struct drm_nouveau_private *dev_priv = dev->dev_private;
1650 int chipset = dev_priv->chipset;
1730 1651
1731 /* GPC_BROADCAST.TP_BROADCAST */ 1652 /* GPC_BROADCAST.TP_BROADCAST */
1653 nv_wr32(dev, 0x419818, 0x00000000);
1654 nv_wr32(dev, 0x41983c, 0x00038bc7);
1732 nv_wr32(dev, 0x419848, 0x00000000); 1655 nv_wr32(dev, 0x419848, 0x00000000);
1733 nv_wr32(dev, 0x419864, 0x0000012a); 1656 nv_wr32(dev, 0x419864, chipset != 0xc1 ? 0x0000012a : 0x00000129);
1734 nv_wr32(dev, 0x419888, 0x00000000); 1657 nv_wr32(dev, 0x419888, 0x00000000);
1735 nv_wr32(dev, 0x419a00, 0x000001f0); 1658 nv_wr32(dev, 0x419a00, 0x000001f0);
1736 nv_wr32(dev, 0x419a04, 0x00000001); 1659 nv_wr32(dev, 0x419a04, 0x00000001);
@@ -1740,8 +1663,8 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
1740 nv_wr32(dev, 0x419a14, 0x00000200); 1663 nv_wr32(dev, 0x419a14, 0x00000200);
1741 nv_wr32(dev, 0x419a1c, 0x00000000); 1664 nv_wr32(dev, 0x419a1c, 0x00000000);
1742 nv_wr32(dev, 0x419a20, 0x00000800); 1665 nv_wr32(dev, 0x419a20, 0x00000800);
1743 if (dev_priv->chipset != 0xc0) 1666 if (chipset != 0xc0 && chipset != 0xc8)
1744 nv_wr32(dev, 0x00419ac4, 0x0007f440); /* 0xc3 */ 1667 nv_wr32(dev, 0x00419ac4, 0x0007f440);
1745 nv_wr32(dev, 0x419b00, 0x0a418820); 1668 nv_wr32(dev, 0x419b00, 0x0a418820);
1746 nv_wr32(dev, 0x419b04, 0x062080e6); 1669 nv_wr32(dev, 0x419b04, 0x062080e6);
1747 nv_wr32(dev, 0x419b08, 0x020398a4); 1670 nv_wr32(dev, 0x419b08, 0x020398a4);
@@ -1749,17 +1672,19 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
1749 nv_wr32(dev, 0x419b10, 0x0a418820); 1672 nv_wr32(dev, 0x419b10, 0x0a418820);
1750 nv_wr32(dev, 0x419b14, 0x000000e6); 1673 nv_wr32(dev, 0x419b14, 0x000000e6);
1751 nv_wr32(dev, 0x419bd0, 0x00900103); 1674 nv_wr32(dev, 0x419bd0, 0x00900103);
1752 nv_wr32(dev, 0x419be0, 0x00000001); 1675 nv_wr32(dev, 0x419be0, chipset != 0xc1 ? 0x00000001 : 0x00400001);
1753 nv_wr32(dev, 0x419be4, 0x00000000); 1676 nv_wr32(dev, 0x419be4, 0x00000000);
1754 nv_wr32(dev, 0x419c00, 0x00000002); 1677 nv_wr32(dev, 0x419c00, 0x00000002);
1755 nv_wr32(dev, 0x419c04, 0x00000006); 1678 nv_wr32(dev, 0x419c04, 0x00000006);
1756 nv_wr32(dev, 0x419c08, 0x00000002); 1679 nv_wr32(dev, 0x419c08, 0x00000002);
1757 nv_wr32(dev, 0x419c20, 0x00000000); 1680 nv_wr32(dev, 0x419c20, 0x00000000);
1758 nv_wr32(dev, 0x419cbc, 0x28137606); 1681 nv_wr32(dev, 0x419cb0, 0x00060048); //XXX: 0xce 0x00020048
1759 nv_wr32(dev, 0x419ce8, 0x00000000); 1682 nv_wr32(dev, 0x419ce8, 0x00000000);
1760 nv_wr32(dev, 0x419cf4, 0x00000183); 1683 nv_wr32(dev, 0x419cf4, 0x00000183);
1761 nv_wr32(dev, 0x419d20, 0x02180000); 1684 nv_wr32(dev, 0x419d20, chipset != 0xc1 ? 0x02180000 : 0x12180000);
1762 nv_wr32(dev, 0x419d24, 0x00001fff); 1685 nv_wr32(dev, 0x419d24, 0x00001fff);
1686 if (chipset == 0xc1)
1687 nv_wr32(dev, 0x419d44, 0x02180218);
1763 nv_wr32(dev, 0x419e04, 0x00000000); 1688 nv_wr32(dev, 0x419e04, 0x00000000);
1764 nv_wr32(dev, 0x419e08, 0x00000000); 1689 nv_wr32(dev, 0x419e08, 0x00000000);
1765 nv_wr32(dev, 0x419e0c, 0x00000000); 1690 nv_wr32(dev, 0x419e0c, 0x00000000);
@@ -1785,11 +1710,11 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
1785 nv_wr32(dev, 0x419e8c, 0x00000000); 1710 nv_wr32(dev, 0x419e8c, 0x00000000);
1786 nv_wr32(dev, 0x419e90, 0x00000000); 1711 nv_wr32(dev, 0x419e90, 0x00000000);
1787 nv_wr32(dev, 0x419e98, 0x00000000); 1712 nv_wr32(dev, 0x419e98, 0x00000000);
1788 if (dev_priv->chipset != 0xc0) 1713 if (chipset != 0xc0 && chipset != 0xc8)
1789 nv_wr32(dev, 0x419ee0, 0x00011110); 1714 nv_wr32(dev, 0x419ee0, 0x00011110);
1790 nv_wr32(dev, 0x419f50, 0x00000000); 1715 nv_wr32(dev, 0x419f50, 0x00000000);
1791 nv_wr32(dev, 0x419f54, 0x00000000); 1716 nv_wr32(dev, 0x419f54, 0x00000000);
1792 if (dev_priv->chipset != 0xc0) 1717 if (chipset != 0xc0 && chipset != 0xc8)
1793 nv_wr32(dev, 0x419f58, 0x00000000); 1718 nv_wr32(dev, 0x419f58, 0x00000000);
1794} 1719}
1795 1720
@@ -1801,6 +1726,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1801 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; 1726 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
1802 struct drm_device *dev = chan->dev; 1727 struct drm_device *dev = chan->dev;
1803 int i, gpc, tp, id; 1728 int i, gpc, tp, id;
1729 u32 fermi = nvc0_graph_class(dev);
1804 u32 r000260, tmp; 1730 u32 r000260, tmp;
1805 1731
1806 r000260 = nv_rd32(dev, 0x000260); 1732 r000260 = nv_rd32(dev, 0x000260);
@@ -1857,10 +1783,11 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1857 nv_wr32(dev, 0x40587c, 0x00000000); 1783 nv_wr32(dev, 0x40587c, 0x00000000);
1858 1784
1859 if (1) { 1785 if (1) {
1860 const u8 chipset_tp_max[] = { 16, 0, 0, 4, 8 }; 1786 const u8 chipset_tp_max[] = { 16, 4, 0, 4, 8, 0, 0, 0,
1787 16, 0, 0, 0, 0, 0, 8, 0 };
1861 u8 max = chipset_tp_max[dev_priv->chipset & 0x0f]; 1788 u8 max = chipset_tp_max[dev_priv->chipset & 0x0f];
1862 u8 tpnr[GPC_MAX]; 1789 u8 tpnr[GPC_MAX];
1863 u8 data[32]; 1790 u8 data[TP_MAX];
1864 1791
1865 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); 1792 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
1866 memset(data, 0x1f, sizeof(data)); 1793 memset(data, 0x1f, sizeof(data));
@@ -2633,6 +2560,8 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
2633 nv_icmd(dev, 0x0000053f, 0xffff0000); 2560 nv_icmd(dev, 0x0000053f, 0xffff0000);
2634 nv_icmd(dev, 0x00000585, 0x0000003f); 2561 nv_icmd(dev, 0x00000585, 0x0000003f);
2635 nv_icmd(dev, 0x00000576, 0x00000003); 2562 nv_icmd(dev, 0x00000576, 0x00000003);
2563 if (dev_priv->chipset == 0xc1)
2564 nv_icmd(dev, 0x0000057b, 0x00000059);
2636 nv_icmd(dev, 0x00000586, 0x00000040); 2565 nv_icmd(dev, 0x00000586, 0x00000040);
2637 nv_icmd(dev, 0x00000582, 0x00000080); 2566 nv_icmd(dev, 0x00000582, 0x00000080);
2638 nv_icmd(dev, 0x00000583, 0x00000080); 2567 nv_icmd(dev, 0x00000583, 0x00000080);
@@ -2865,6 +2794,10 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
2865 nv_wr32(dev, 0x404154, 0x00000400); 2794 nv_wr32(dev, 0x404154, 0x00000400);
2866 2795
2867 nvc0_grctx_generate_9097(dev); 2796 nvc0_grctx_generate_9097(dev);
2797 if (fermi >= 0x9197)
2798 nvc0_grctx_generate_9197(dev);
2799 if (fermi >= 0x9297)
2800 nvc0_grctx_generate_9297(dev);
2868 nvc0_grctx_generate_902d(dev); 2801 nvc0_grctx_generate_902d(dev);
2869 nvc0_grctx_generate_9039(dev); 2802 nvc0_grctx_generate_9039(dev);
2870 nvc0_grctx_generate_90c0(dev); 2803 nvc0_grctx_generate_90c0(dev);
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
new file mode 100644
index 000000000000..0ec2add72a76
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
@@ -0,0 +1,474 @@
1/* fuc microcode for nvc0 PGRAPH/GPC
2 *
3 * Copyright 2011 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Ben Skeggs
24 */
25
26/* To build:
27 * m4 nvc0_grgpc.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grgpc.fuc.h
28 */
29
30/* TODO
31 * - bracket certain functions with scratch writes, useful for debugging
32 * - watchdog timer around ctx operations
33 */
34
35.section nvc0_grgpc_data
36include(`nvc0_graph.fuc')
37gpc_id: .b32 0
38gpc_mmio_list_head: .b32 0
39gpc_mmio_list_tail: .b32 0
40
41tpc_count: .b32 0
42tpc_mask: .b32 0
43tpc_mmio_list_head: .b32 0
44tpc_mmio_list_tail: .b32 0
45
46cmd_queue: queue_init
47
48// chipset descriptions
49chipsets:
50.b8 0xc0 0 0 0
51.b16 nvc0_gpc_mmio_head
52.b16 nvc0_gpc_mmio_tail
53.b16 nvc0_tpc_mmio_head
54.b16 nvc0_tpc_mmio_tail
55.b8 0xc1 0 0 0
56.b16 nvc0_gpc_mmio_head
57.b16 nvc1_gpc_mmio_tail
58.b16 nvc0_tpc_mmio_head
59.b16 nvc1_tpc_mmio_tail
60.b8 0xc3 0 0 0
61.b16 nvc0_gpc_mmio_head
62.b16 nvc0_gpc_mmio_tail
63.b16 nvc0_tpc_mmio_head
64.b16 nvc3_tpc_mmio_tail
65.b8 0xc4 0 0 0
66.b16 nvc0_gpc_mmio_head
67.b16 nvc0_gpc_mmio_tail
68.b16 nvc0_tpc_mmio_head
69.b16 nvc3_tpc_mmio_tail
70.b8 0xc8 0 0 0
71.b16 nvc0_gpc_mmio_head
72.b16 nvc0_gpc_mmio_tail
73.b16 nvc0_tpc_mmio_head
74.b16 nvc0_tpc_mmio_tail
75.b8 0xce 0 0 0
76.b16 nvc0_gpc_mmio_head
77.b16 nvc0_gpc_mmio_tail
78.b16 nvc0_tpc_mmio_head
79.b16 nvc3_tpc_mmio_tail
80.b8 0 0 0 0
81
82// GPC mmio lists
83nvc0_gpc_mmio_head:
84mmctx_data(0x000380, 1)
85mmctx_data(0x000400, 6)
86mmctx_data(0x000450, 9)
87mmctx_data(0x000600, 1)
88mmctx_data(0x000684, 1)
89mmctx_data(0x000700, 5)
90mmctx_data(0x000800, 1)
91mmctx_data(0x000808, 3)
92mmctx_data(0x000828, 1)
93mmctx_data(0x000830, 1)
94mmctx_data(0x0008d8, 1)
95mmctx_data(0x0008e0, 1)
96mmctx_data(0x0008e8, 6)
97mmctx_data(0x00091c, 1)
98mmctx_data(0x000924, 3)
99mmctx_data(0x000b00, 1)
100mmctx_data(0x000b08, 6)
101mmctx_data(0x000bb8, 1)
102mmctx_data(0x000c08, 1)
103mmctx_data(0x000c10, 8)
104mmctx_data(0x000c80, 1)
105mmctx_data(0x000c8c, 1)
106mmctx_data(0x001000, 3)
107mmctx_data(0x001014, 1)
108nvc0_gpc_mmio_tail:
109mmctx_data(0x000c6c, 1);
110nvc1_gpc_mmio_tail:
111
112// TPC mmio lists
113nvc0_tpc_mmio_head:
114mmctx_data(0x000018, 1)
115mmctx_data(0x00003c, 1)
116mmctx_data(0x000048, 1)
117mmctx_data(0x000064, 1)
118mmctx_data(0x000088, 1)
119mmctx_data(0x000200, 6)
120mmctx_data(0x00021c, 2)
121mmctx_data(0x000300, 6)
122mmctx_data(0x0003d0, 1)
123mmctx_data(0x0003e0, 2)
124mmctx_data(0x000400, 3)
125mmctx_data(0x000420, 1)
126mmctx_data(0x0004b0, 1)
127mmctx_data(0x0004e8, 1)
128mmctx_data(0x0004f4, 1)
129mmctx_data(0x000520, 2)
130mmctx_data(0x000604, 4)
131mmctx_data(0x000644, 20)
132mmctx_data(0x000698, 1)
133mmctx_data(0x000750, 2)
134nvc0_tpc_mmio_tail:
135mmctx_data(0x000758, 1)
136mmctx_data(0x0002c4, 1)
137mmctx_data(0x0004bc, 1)
138mmctx_data(0x0006e0, 1)
139nvc3_tpc_mmio_tail:
140mmctx_data(0x000544, 1)
141nvc1_tpc_mmio_tail:
142
143
144.section nvc0_grgpc_code
145bra init
146define(`include_code')
147include(`nvc0_graph.fuc')
148
149// reports an exception to the host
150//
151// In: $r15 error code (see nvc0_graph.fuc)
152//
153error:
154 push $r14
155 mov $r14 -0x67ec // 0x9814
156 sethi $r14 0x400000
157 call nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code
158 add b32 $r14 0x41c
159 mov $r15 1
160 call nv_wr32 // HUB_CTXCTL_INTR_UP_SET
161 pop $r14
162 ret
163
164// GPC fuc initialisation, executed by triggering ucode start, will
165// fall through to main loop after completion.
166//
167// Input:
168// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
169// CC_SCRATCH[1]: context base
170//
171// Output:
172// CC_SCRATCH[0]:
173// 31:31: set to signal completion
174// CC_SCRATCH[1]:
175// 31:0: GPC context size
176//
177init:
178 clear b32 $r0
179 mov $sp $r0
180
181 // enable fifo access
182 mov $r1 0x1200
183 mov $r2 2
184 iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
185
186 // setup i0 handler, and route all interrupts to it
187 mov $r1 ih
188 mov $iv0 $r1
189 mov $r1 0x400
190 iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
191
192 // enable fifo interrupt
193 mov $r2 4
194 iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
195
196 // enable interrupts
197 bset $flags ie0
198
199 // figure out which GPC we are, and how many TPCs we have
200 mov $r1 0x608
201 shl b32 $r1 6
202 iord $r2 I[$r1 + 0x000] // UNITS
203 mov $r3 1
204 and $r2 0x1f
205 shl b32 $r3 $r2
206 sub b32 $r3 1
207 st b32 D[$r0 + tpc_count] $r2
208 st b32 D[$r0 + tpc_mask] $r3
209 add b32 $r1 0x400
210 iord $r2 I[$r1 + 0x000] // MYINDEX
211 st b32 D[$r0 + gpc_id] $r2
212
213 // find context data for this chipset
214 mov $r2 0x800
215 shl b32 $r2 6
216 iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
217 mov $r1 chipsets - 12
218 init_find_chipset:
219 add b32 $r1 12
220 ld b32 $r3 D[$r1 + 0x00]
221 cmpu b32 $r3 $r2
222 bra e init_context
223 cmpu b32 $r3 0
224 bra ne init_find_chipset
225 // unknown chipset
226 ret
227
228 // initialise context base, and size tracking
229 init_context:
230 mov $r2 0x800
231 shl b32 $r2 6
232 iord $r2 I[$r2 + 0x100] // CC_SCRATCH[1], initial base
233 clear b32 $r3 // track GPC context size here
234
235 // set mmctx base addresses now so we don't have to do it later,
236 // they don't currently ever change
237 mov $r4 0x700
238 shl b32 $r4 6
239 shr b32 $r5 $r2 8
240 iowr I[$r4 + 0x000] $r5 // MMCTX_SAVE_SWBASE
241 iowr I[$r4 + 0x100] $r5 // MMCTX_LOAD_SWBASE
242
243 // calculate GPC mmio context size, store the chipset-specific
244 // mmio list pointers somewhere we can get at them later without
245 // re-parsing the chipset list
246 clear b32 $r14
247 clear b32 $r15
248 ld b16 $r14 D[$r1 + 4]
249 ld b16 $r15 D[$r1 + 6]
250 st b16 D[$r0 + gpc_mmio_list_head] $r14
251 st b16 D[$r0 + gpc_mmio_list_tail] $r15
252 call mmctx_size
253 add b32 $r2 $r15
254 add b32 $r3 $r15
255
256 // calculate per-TPC mmio context size, store the list pointers
257 ld b16 $r14 D[$r1 + 8]
258 ld b16 $r15 D[$r1 + 10]
259 st b16 D[$r0 + tpc_mmio_list_head] $r14
260 st b16 D[$r0 + tpc_mmio_list_tail] $r15
261 call mmctx_size
262 ld b32 $r14 D[$r0 + tpc_count]
263 mulu $r14 $r15
264 add b32 $r2 $r14
265 add b32 $r3 $r14
266
267 // round up base/size to 256 byte boundary (for strand SWBASE)
268 add b32 $r4 0x1300
269 shr b32 $r3 2
270 iowr I[$r4 + 0x000] $r3 // MMCTX_LOAD_COUNT, wtf for?!?
271 shr b32 $r2 8
272 shr b32 $r3 6
273 add b32 $r2 1
274 add b32 $r3 1
275 shl b32 $r2 8
276 shl b32 $r3 8
277
278 // calculate size of strand context data
279 mov b32 $r15 $r2
280 call strand_ctx_init
281 add b32 $r3 $r15
282
283 // save context size, and tell HUB we're done
284 mov $r1 0x800
285 shl b32 $r1 6
286 iowr I[$r1 + 0x100] $r3 // CC_SCRATCH[1] = context size
287 add b32 $r1 0x800
288 clear b32 $r2
289 bset $r2 31
290 iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
291
292// Main program loop, very simple, sleeps until woken up by the interrupt
293// handler, pulls a command from the queue and executes its handler
294//
295main:
296 bset $flags $p0
297 sleep $p0
298 mov $r13 cmd_queue
299 call queue_get
300 bra $p1 main
301
302 // 0x0000-0x0003 are all context transfers
303 cmpu b32 $r14 0x04
304 bra nc main_not_ctx_xfer
305 // fetch $flags and mask off $p1/$p2
306 mov $r1 $flags
307 mov $r2 0x0006
308 not b32 $r2
309 and $r1 $r2
310 // set $p1/$p2 according to transfer type
311 shl b32 $r14 1
312 or $r1 $r14
313 mov $flags $r1
314 // transfer context data
315 call ctx_xfer
316 bra main
317
318 main_not_ctx_xfer:
319 shl b32 $r15 $r14 16
320 or $r15 E_BAD_COMMAND
321 call error
322 bra main
323
324// interrupt handler
325ih:
326 push $r8
327 mov $r8 $flags
328 push $r8
329 push $r9
330 push $r10
331 push $r11
332 push $r13
333 push $r14
334 push $r15
335
336 // incoming fifo command?
337 iord $r10 I[$r0 + 0x200] // INTR
338 and $r11 $r10 0x00000004
339 bra e ih_no_fifo
340 // queue incoming fifo command for later processing
341 mov $r11 0x1900
342 mov $r13 cmd_queue
343 iord $r14 I[$r11 + 0x100] // FIFO_CMD
344 iord $r15 I[$r11 + 0x000] // FIFO_DATA
345 call queue_put
346 add b32 $r11 0x400
347 mov $r14 1
348 iowr I[$r11 + 0x000] $r14 // FIFO_ACK
349
350 // ack, and wake up main()
351 ih_no_fifo:
352 iowr I[$r0 + 0x100] $r10 // INTR_ACK
353
354 pop $r15
355 pop $r14
356 pop $r13
357 pop $r11
358 pop $r10
359 pop $r9
360 pop $r8
361 mov $flags $r8
362 pop $r8
363 bclr $flags $p0
364 iret
365
366// Set this GPC's bit in HUB_BAR, used to signal completion of various
367// activities to the HUB fuc
368//
369hub_barrier_done:
370 mov $r15 1
371 ld b32 $r14 D[$r0 + gpc_id]
372 shl b32 $r15 $r14
373 mov $r14 -0x6be8 // 0x409418 - HUB_BAR_SET
374 sethi $r14 0x400000
375 call nv_wr32
376 ret
377
378// Disables various things, waits a bit, and re-enables them..
379//
380// Not sure how exactly this helps, perhaps "ENABLE" is not such a
381// good description for the bits we turn off? Anyways, without this,
382// funny things happen.
383//
384ctx_redswitch:
385 mov $r14 0x614
386 shl b32 $r14 6
387 mov $r15 0x020
388 iowr I[$r14] $r15 // GPC_RED_SWITCH = POWER
389 mov $r15 8
390 ctx_redswitch_delay:
391 sub b32 $r15 1
392 bra ne ctx_redswitch_delay
393 mov $r15 0xa20
394 iowr I[$r14] $r15 // GPC_RED_SWITCH = UNK11, ENABLE, POWER
395 ret
396
397// Transfer GPC context data between GPU and storage area
398//
399// In: $r15 context base address
400// $p1 clear on save, set on load
401// $p2 set if opposite direction done/will be done, so:
402// on save it means: "a load will follow this save"
403// on load it means: "a save preceeded this load"
404//
405ctx_xfer:
406 // set context base address
407 mov $r1 0xa04
408 shl b32 $r1 6
409 iowr I[$r1 + 0x000] $r15// MEM_BASE
410 bra not $p1 ctx_xfer_not_load
411 call ctx_redswitch
412 ctx_xfer_not_load:
413
414 // strands
415 mov $r1 0x4afc
416 sethi $r1 0x20000
417 mov $r2 0xc
418 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
419 call strand_wait
420 mov $r2 0x47fc
421 sethi $r2 0x20000
422 iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
423 xbit $r2 $flags $p1
424 add b32 $r2 3
425 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
426
427 // mmio context
428 xbit $r10 $flags $p1 // direction
429 or $r10 2 // first
430 mov $r11 0x0000
431 sethi $r11 0x500000
432 ld b32 $r12 D[$r0 + gpc_id]
433 shl b32 $r12 15
434 add b32 $r11 $r12 // base = NV_PGRAPH_GPCn
435 ld b32 $r12 D[$r0 + gpc_mmio_list_head]
436 ld b32 $r13 D[$r0 + gpc_mmio_list_tail]
437 mov $r14 0 // not multi
438 call mmctx_xfer
439
440 // per-TPC mmio context
441 xbit $r10 $flags $p1 // direction
442 or $r10 4 // last
443 mov $r11 0x4000
444 sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_TPC0
445 ld b32 $r12 D[$r0 + gpc_id]
446 shl b32 $r12 15
447 add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0
448 ld b32 $r12 D[$r0 + tpc_mmio_list_head]
449 ld b32 $r13 D[$r0 + tpc_mmio_list_tail]
450 ld b32 $r15 D[$r0 + tpc_mask]
451 mov $r14 0x800 // stride = 0x800
452 call mmctx_xfer
453
454 // wait for strands to finish
455 call strand_wait
456
457 // if load, or a save without a load following, do some
458 // unknown stuff that's done after finishing a block of
459 // strand commands
460 bra $p1 ctx_xfer_post
461 bra not $p2 ctx_xfer_done
462 ctx_xfer_post:
463 mov $r1 0x4afc
464 sethi $r1 0x20000
465 mov $r2 0xd
466 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0d
467 call strand_wait
468
469 // mark completion in HUB's barrier
470 ctx_xfer_done:
471 call hub_barrier_done
472 ret
473
474.align 256
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
new file mode 100644
index 000000000000..1896c898f5ba
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
@@ -0,0 +1,483 @@
1uint32_t nvc0_grgpc_data[] = {
2 0x00000000,
3 0x00000000,
4 0x00000000,
5 0x00000000,
6 0x00000000,
7 0x00000000,
8 0x00000000,
9 0x00000000,
10 0x00000000,
11 0x00000000,
12 0x00000000,
13 0x00000000,
14 0x00000000,
15 0x00000000,
16 0x00000000,
17 0x00000000,
18 0x00000000,
19 0x00000000,
20 0x00000000,
21 0x00000000,
22 0x00000000,
23 0x00000000,
24 0x00000000,
25 0x00000000,
26 0x00000000,
27 0x000000c0,
28 0x011000b0,
29 0x01640114,
30 0x000000c1,
31 0x011400b0,
32 0x01780114,
33 0x000000c3,
34 0x011000b0,
35 0x01740114,
36 0x000000c4,
37 0x011000b0,
38 0x01740114,
39 0x000000c8,
40 0x011000b0,
41 0x01640114,
42 0x000000ce,
43 0x011000b0,
44 0x01740114,
45 0x00000000,
46 0x00000380,
47 0x14000400,
48 0x20000450,
49 0x00000600,
50 0x00000684,
51 0x10000700,
52 0x00000800,
53 0x08000808,
54 0x00000828,
55 0x00000830,
56 0x000008d8,
57 0x000008e0,
58 0x140008e8,
59 0x0000091c,
60 0x08000924,
61 0x00000b00,
62 0x14000b08,
63 0x00000bb8,
64 0x00000c08,
65 0x1c000c10,
66 0x00000c80,
67 0x00000c8c,
68 0x08001000,
69 0x00001014,
70 0x00000c6c,
71 0x00000018,
72 0x0000003c,
73 0x00000048,
74 0x00000064,
75 0x00000088,
76 0x14000200,
77 0x0400021c,
78 0x14000300,
79 0x000003d0,
80 0x040003e0,
81 0x08000400,
82 0x00000420,
83 0x000004b0,
84 0x000004e8,
85 0x000004f4,
86 0x04000520,
87 0x0c000604,
88 0x4c000644,
89 0x00000698,
90 0x04000750,
91 0x00000758,
92 0x000002c4,
93 0x000004bc,
94 0x000006e0,
95 0x00000544,
96};
97
98uint32_t nvc0_grgpc_code[] = {
99 0x03060ef5,
100 0x9800d898,
101 0x86f001d9,
102 0x0489b808,
103 0xf00c1bf4,
104 0x21f502f7,
105 0x00f802ec,
106 0xb60798c4,
107 0x8dbb0384,
108 0x0880b600,
109 0x80008e80,
110 0x90b6018f,
111 0x0f94f001,
112 0xf801d980,
113 0x0131f400,
114 0x9800d898,
115 0x89b801d9,
116 0x210bf404,
117 0xb60789c4,
118 0x9dbb0394,
119 0x0890b600,
120 0x98009e98,
121 0x80b6019f,
122 0x0f84f001,
123 0xf400d880,
124 0x00f80132,
125 0x0728b7f1,
126 0xb906b4b6,
127 0xc9f002ec,
128 0x00bcd01f,
129 0xc800bccf,
130 0x1bf41fcc,
131 0x06a7f0fa,
132 0x010321f5,
133 0xf840bfcf,
134 0x28b7f100,
135 0x06b4b607,
136 0xb980bfd0,
137 0xc9f002ec,
138 0x1ec9f01f,
139 0xcf00bcd0,
140 0xccc800bc,
141 0xfa1bf41f,
142 0x87f100f8,
143 0x84b60430,
144 0x1ff9f006,
145 0xf8008fd0,
146 0x3087f100,
147 0x0684b604,
148 0xf80080d0,
149 0x3c87f100,
150 0x0684b608,
151 0x99f094bd,
152 0x0089d000,
153 0x081887f1,
154 0xd00684b6,
155 0x87f1008a,
156 0x84b60400,
157 0x0088cf06,
158 0xf4888aff,
159 0x87f1f31b,
160 0x84b6085c,
161 0xf094bd06,
162 0x89d00099,
163 0xf100f800,
164 0xb6083c87,
165 0x94bd0684,
166 0xd00099f0,
167 0x87f10089,
168 0x84b60818,
169 0x008ad006,
170 0x040087f1,
171 0xcf0684b6,
172 0x8aff0088,
173 0xf30bf488,
174 0x085c87f1,
175 0xbd0684b6,
176 0x0099f094,
177 0xf80089d0,
178 0x9894bd00,
179 0x85b600e8,
180 0x0180b61a,
181 0xbb0284b6,
182 0xe0b60098,
183 0x04efb804,
184 0xb9eb1bf4,
185 0x00f8029f,
186 0x083c87f1,
187 0xbd0684b6,
188 0x0199f094,
189 0xf10089d0,
190 0xb6071087,
191 0x94bd0684,
192 0xf405bbfd,
193 0x8bd0090b,
194 0x0099f000,
195 0xf405eefd,
196 0x8ed00c0b,
197 0xc08fd080,
198 0xb70199f0,
199 0xc8010080,
200 0xb4b600ab,
201 0x0cb9f010,
202 0xb601aec8,
203 0xbefd11e4,
204 0x008bd005,
205 0xf0008ecf,
206 0x0bf41fe4,
207 0x00ce98fa,
208 0xd005e9fd,
209 0xc0b6c08e,
210 0x04cdb804,
211 0xc8e81bf4,
212 0x1bf402ab,
213 0x008bcf18,
214 0xb01fb4f0,
215 0x1bf410b4,
216 0x02a7f0f7,
217 0xf4c921f4,
218 0xabc81b0e,
219 0x10b4b600,
220 0xf00cb9f0,
221 0x8bd012b9,
222 0x008bcf00,
223 0xf412bbc8,
224 0x87f1fa1b,
225 0x84b6085c,
226 0xf094bd06,
227 0x89d00199,
228 0xf900f800,
229 0x02a7f0a0,
230 0xfcc921f4,
231 0xf100f8a0,
232 0xf04afc87,
233 0x97f00283,
234 0x0089d00c,
235 0x020721f5,
236 0x87f100f8,
237 0x83f04afc,
238 0x0d97f002,
239 0xf50089d0,
240 0xf8020721,
241 0xfca7f100,
242 0x02a3f04f,
243 0x0500aba2,
244 0xd00fc7f0,
245 0xc7f000ac,
246 0x00bcd00b,
247 0x020721f5,
248 0xf000aed0,
249 0xbcd00ac7,
250 0x0721f500,
251 0xf100f802,
252 0xb6083c87,
253 0x94bd0684,
254 0xd00399f0,
255 0x21f50089,
256 0xe7f00213,
257 0x3921f503,
258 0xfca7f102,
259 0x02a3f046,
260 0x0400aba0,
261 0xf040a0d0,
262 0xbcd001c7,
263 0x0721f500,
264 0x010c9202,
265 0xf000acd0,
266 0xbcd002c7,
267 0x0721f500,
268 0x2621f502,
269 0x8087f102,
270 0x0684b608,
271 0xb70089cf,
272 0x95220080,
273 0x8ed008fe,
274 0x408ed000,
275 0xb6808acf,
276 0xa0b606a5,
277 0x00eabb01,
278 0xb60480b6,
279 0x1bf40192,
280 0x08e4b6e8,
281 0xf1f2efbc,
282 0xb6085c87,
283 0x94bd0684,
284 0xd00399f0,
285 0x00f80089,
286 0xe7f1e0f9,
287 0xe3f09814,
288 0x8d21f440,
289 0x041ce0b7,
290 0xf401f7f0,
291 0xe0fc8d21,
292 0x04bd00f8,
293 0xf10004fe,
294 0xf0120017,
295 0x12d00227,
296 0x3e17f100,
297 0x0010fe04,
298 0x040017f1,
299 0xf0c010d0,
300 0x12d00427,
301 0x1031f400,
302 0x060817f1,
303 0xcf0614b6,
304 0x37f00012,
305 0x1f24f001,
306 0xb60432bb,
307 0x02800132,
308 0x04038003,
309 0x040010b7,
310 0x800012cf,
311 0x27f10002,
312 0x24b60800,
313 0x0022cf06,
314 0xb65817f0,
315 0x13980c10,
316 0x0432b800,
317 0xb00b0bf4,
318 0x1bf40034,
319 0xf100f8f1,
320 0xb6080027,
321 0x22cf0624,
322 0xf134bd40,
323 0xb6070047,
324 0x25950644,
325 0x0045d008,
326 0xbd4045d0,
327 0x58f4bde4,
328 0x1f58021e,
329 0x020e4003,
330 0xf5040f40,
331 0xbb013d21,
332 0x3fbb002f,
333 0x041e5800,
334 0x40051f58,
335 0x0f400a0e,
336 0x3d21f50c,
337 0x030e9801,
338 0xbb00effd,
339 0x3ebb002e,
340 0x0040b700,
341 0x0235b613,
342 0xb60043d0,
343 0x35b60825,
344 0x0120b606,
345 0xb60130b6,
346 0x34b60824,
347 0x022fb908,
348 0x026321f5,
349 0xf1003fbb,
350 0xb6080017,
351 0x13d00614,
352 0x0010b740,
353 0xf024bd08,
354 0x12d01f29,
355 0x0031f400,
356 0xf00028f4,
357 0x21f41cd7,
358 0xf401f439,
359 0xf404e4b0,
360 0x81fe1e18,
361 0x0627f001,
362 0x12fd20bd,
363 0x01e4b604,
364 0xfe051efd,
365 0x21f50018,
366 0x0ef404c3,
367 0x10ef94d3,
368 0xf501f5f0,
369 0xf402ec21,
370 0x80f9c60e,
371 0xf90188fe,
372 0xf990f980,
373 0xf9b0f9a0,
374 0xf9e0f9d0,
375 0x800acff0,
376 0xf404abc4,
377 0xb7f11d0b,
378 0xd7f01900,
379 0x40becf1c,
380 0xf400bfcf,
381 0xb0b70421,
382 0xe7f00400,
383 0x00bed001,
384 0xfc400ad0,
385 0xfce0fcf0,
386 0xfcb0fcd0,
387 0xfc90fca0,
388 0x0088fe80,
389 0x32f480fc,
390 0xf001f800,
391 0x0e9801f7,
392 0x04febb00,
393 0x9418e7f1,
394 0xf440e3f0,
395 0x00f88d21,
396 0x0614e7f1,
397 0xf006e4b6,
398 0xefd020f7,
399 0x08f7f000,
400 0xf401f2b6,
401 0xf7f1fd1b,
402 0xefd00a20,
403 0xf100f800,
404 0xb60a0417,
405 0x1fd00614,
406 0x0711f400,
407 0x04a421f5,
408 0x4afc17f1,
409 0xf00213f0,
410 0x12d00c27,
411 0x0721f500,
412 0xfc27f102,
413 0x0223f047,
414 0xf00020d0,
415 0x20b6012c,
416 0x0012d003,
417 0xf001acf0,
418 0xb7f002a5,
419 0x50b3f000,
420 0xb6000c98,
421 0xbcbb0fc4,
422 0x010c9800,
423 0xf0020d98,
424 0x21f500e7,
425 0xacf0015c,
426 0x04a5f001,
427 0x4000b7f1,
428 0x9850b3f0,
429 0xc4b6000c,
430 0x00bcbb0f,
431 0x98050c98,
432 0x0f98060d,
433 0x00e7f104,
434 0x5c21f508,
435 0x0721f501,
436 0x0601f402,
437 0xf11412f4,
438 0xf04afc17,
439 0x27f00213,
440 0x0012d00d,
441 0x020721f5,
442 0x048f21f5,
443 0x000000f8,
444 0x00000000,
445 0x00000000,
446 0x00000000,
447 0x00000000,
448 0x00000000,
449 0x00000000,
450 0x00000000,
451 0x00000000,
452 0x00000000,
453 0x00000000,
454 0x00000000,
455 0x00000000,
456 0x00000000,
457 0x00000000,
458 0x00000000,
459 0x00000000,
460 0x00000000,
461 0x00000000,
462 0x00000000,
463 0x00000000,
464 0x00000000,
465 0x00000000,
466 0x00000000,
467 0x00000000,
468 0x00000000,
469 0x00000000,
470 0x00000000,
471 0x00000000,
472 0x00000000,
473 0x00000000,
474 0x00000000,
475 0x00000000,
476 0x00000000,
477 0x00000000,
478 0x00000000,
479 0x00000000,
480 0x00000000,
481 0x00000000,
482 0x00000000,
483};
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
new file mode 100644
index 000000000000..a1a599124cf4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
@@ -0,0 +1,808 @@
1/* fuc microcode for nvc0 PGRAPH/HUB
2 *
3 * Copyright 2011 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Ben Skeggs
24 */
25
26/* To build:
27 * m4 nvc0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grhub.fuc.h
28 */
29
30.section nvc0_grhub_data
31include(`nvc0_graph.fuc')
32gpc_count: .b32 0
33rop_count: .b32 0
34cmd_queue: queue_init
35hub_mmio_list_head: .b32 0
36hub_mmio_list_tail: .b32 0
37
38ctx_current: .b32 0
39
40chipsets:
41.b8 0xc0 0 0 0
42.b16 nvc0_hub_mmio_head
43.b16 nvc0_hub_mmio_tail
44.b8 0xc1 0 0 0
45.b16 nvc0_hub_mmio_head
46.b16 nvc1_hub_mmio_tail
47.b8 0xc3 0 0 0
48.b16 nvc0_hub_mmio_head
49.b16 nvc0_hub_mmio_tail
50.b8 0xc4 0 0 0
51.b16 nvc0_hub_mmio_head
52.b16 nvc0_hub_mmio_tail
53.b8 0xc8 0 0 0
54.b16 nvc0_hub_mmio_head
55.b16 nvc0_hub_mmio_tail
56.b8 0xce 0 0 0
57.b16 nvc0_hub_mmio_head
58.b16 nvc0_hub_mmio_tail
59.b8 0 0 0 0
60
61nvc0_hub_mmio_head:
62mmctx_data(0x17e91c, 2)
63mmctx_data(0x400204, 2)
64mmctx_data(0x404004, 11)
65mmctx_data(0x404044, 1)
66mmctx_data(0x404094, 14)
67mmctx_data(0x4040d0, 7)
68mmctx_data(0x4040f8, 1)
69mmctx_data(0x404130, 3)
70mmctx_data(0x404150, 3)
71mmctx_data(0x404164, 2)
72mmctx_data(0x404174, 3)
73mmctx_data(0x404200, 8)
74mmctx_data(0x404404, 14)
75mmctx_data(0x404460, 4)
76mmctx_data(0x404480, 1)
77mmctx_data(0x404498, 1)
78mmctx_data(0x404604, 4)
79mmctx_data(0x404618, 32)
80mmctx_data(0x404698, 21)
81mmctx_data(0x4046f0, 2)
82mmctx_data(0x404700, 22)
83mmctx_data(0x405800, 1)
84mmctx_data(0x405830, 3)
85mmctx_data(0x405854, 1)
86mmctx_data(0x405870, 4)
87mmctx_data(0x405a00, 2)
88mmctx_data(0x405a18, 1)
89mmctx_data(0x406020, 1)
90mmctx_data(0x406028, 4)
91mmctx_data(0x4064a8, 2)
92mmctx_data(0x4064b4, 2)
93mmctx_data(0x407804, 1)
94mmctx_data(0x40780c, 6)
95mmctx_data(0x4078bc, 1)
96mmctx_data(0x408000, 7)
97mmctx_data(0x408064, 1)
98mmctx_data(0x408800, 3)
99mmctx_data(0x408900, 4)
100mmctx_data(0x408980, 1)
101nvc0_hub_mmio_tail:
102mmctx_data(0x4064c0, 2)
103nvc1_hub_mmio_tail:
104
105.align 256
106chan_data:
107chan_mmio_count: .b32 0
108chan_mmio_address: .b32 0
109
110.align 256
111xfer_data: .b32 0
112
113.section nvc0_grhub_code
114bra init
115define(`include_code')
116include(`nvc0_graph.fuc')
117
118// reports an exception to the host
119//
120// In: $r15 error code (see nvc0_graph.fuc)
121//
122error:
123 push $r14
124 mov $r14 0x814
125 shl b32 $r14 6
126 iowr I[$r14 + 0x000] $r15 // CC_SCRATCH[5] = error code
127 mov $r14 0xc1c
128 shl b32 $r14 6
129 mov $r15 1
130 iowr I[$r14 + 0x000] $r15 // INTR_UP_SET
131 pop $r14
132 ret
133
134// HUB fuc initialisation, executed by triggering ucode start, will
135// fall through to main loop after completion.
136//
137// Input:
138// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
139//
140// Output:
141// CC_SCRATCH[0]:
142// 31:31: set to signal completion
143// CC_SCRATCH[1]:
144// 31:0: total PGRAPH context size
145//
146init:
147 clear b32 $r0
148 mov $sp $r0
149 mov $xdbase $r0
150
151 // enable fifo access
152 mov $r1 0x1200
153 mov $r2 2
154 iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
155
156 // setup i0 handler, and route all interrupts to it
157 mov $r1 ih
158 mov $iv0 $r1
159 mov $r1 0x400
160 iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
161
162 // route HUB_CHANNEL_SWITCH to fuc interrupt 8
163 mov $r3 0x404
164 shl b32 $r3 6
165 mov $r2 0x2003 // { HUB_CHANNEL_SWITCH, ZERO } -> intr 8
166 iowr I[$r3 + 0x000] $r2
167
168 // not sure what these are, route them because NVIDIA does, and
169 // the IRQ handler will signal the host if we ever get one.. we
170 // may find out if/why we need to handle these if so..
171 //
172 mov $r2 0x2004
173 iowr I[$r3 + 0x004] $r2 // { 0x04, ZERO } -> intr 9
174 mov $r2 0x200b
175 iowr I[$r3 + 0x008] $r2 // { 0x0b, ZERO } -> intr 10
176 mov $r2 0x200c
177 iowr I[$r3 + 0x01c] $r2 // { 0x0c, ZERO } -> intr 15
178
179 // enable all INTR_UP interrupts
180 mov $r2 0xc24
181 shl b32 $r2 6
182 not b32 $r3 $r0
183 iowr I[$r2] $r3
184
185 // enable fifo, ctxsw, 9, 10, 15 interrupts
186 mov $r2 -0x78fc // 0x8704
187 sethi $r2 0
188 iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
189
190 // fifo level triggered, rest edge
191 sub b32 $r1 0x100
192 mov $r2 4
193 iowr I[$r1] $r2
194
195 // enable interrupts
196 bset $flags ie0
197
198 // fetch enabled GPC/ROP counts
199 mov $r14 -0x69fc // 0x409604
200 sethi $r14 0x400000
201 call nv_rd32
202 extr $r1 $r15 16:20
203 st b32 D[$r0 + rop_count] $r1
204 and $r15 0x1f
205 st b32 D[$r0 + gpc_count] $r15
206
207 // set BAR_REQMASK to GPC mask
208 mov $r1 1
209 shl b32 $r1 $r15
210 sub b32 $r1 1
211 mov $r2 0x40c
212 shl b32 $r2 6
213 iowr I[$r2 + 0x000] $r1
214 iowr I[$r2 + 0x100] $r1
215
216 // find context data for this chipset
217 mov $r2 0x800
218 shl b32 $r2 6
219 iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
220 mov $r15 chipsets - 8
221 init_find_chipset:
222 add b32 $r15 8
223 ld b32 $r3 D[$r15 + 0x00]
224 cmpu b32 $r3 $r2
225 bra e init_context
226 cmpu b32 $r3 0
227 bra ne init_find_chipset
228 // unknown chipset
229 ret
230
231 // context size calculation, reserve first 256 bytes for use by fuc
232 init_context:
233 mov $r1 256
234
235 // calculate size of mmio context data
236 ld b16 $r14 D[$r15 + 4]
237 ld b16 $r15 D[$r15 + 6]
238 sethi $r14 0
239 st b32 D[$r0 + hub_mmio_list_head] $r14
240 st b32 D[$r0 + hub_mmio_list_tail] $r15
241 call mmctx_size
242
243 // set mmctx base addresses now so we don't have to do it later,
244 // they don't (currently) ever change
245 mov $r3 0x700
246 shl b32 $r3 6
247 shr b32 $r4 $r1 8
248 iowr I[$r3 + 0x000] $r4 // MMCTX_SAVE_SWBASE
249 iowr I[$r3 + 0x100] $r4 // MMCTX_LOAD_SWBASE
250 add b32 $r3 0x1300
251 add b32 $r1 $r15
252 shr b32 $r15 2
253 iowr I[$r3 + 0x000] $r15 // MMCTX_LOAD_COUNT, wtf for?!?
254
255 // strands, base offset needs to be aligned to 256 bytes
256 shr b32 $r1 8
257 add b32 $r1 1
258 shl b32 $r1 8
259 mov b32 $r15 $r1
260 call strand_ctx_init
261 add b32 $r1 $r15
262
263 // initialise each GPC in sequence by passing in the offset of its
264 // context data in GPCn_CC_SCRATCH[1], and starting its FUC (which
265 // has previously been uploaded by the host) running.
266 //
267 // the GPC fuc init sequence will set GPCn_CC_SCRATCH[0] bit 31
268 // when it has completed, and return the size of its context data
269 // in GPCn_CC_SCRATCH[1]
270 //
271 ld b32 $r3 D[$r0 + gpc_count]
272 mov $r4 0x2000
273 sethi $r4 0x500000
274 init_gpc:
275 // setup, and start GPC ucode running
276 add b32 $r14 $r4 0x804
277 mov b32 $r15 $r1
278 call nv_wr32 // CC_SCRATCH[1] = ctx offset
279 add b32 $r14 $r4 0x800
280 mov b32 $r15 $r2
281 call nv_wr32 // CC_SCRATCH[0] = chipset
282 add b32 $r14 $r4 0x10c
283 clear b32 $r15
284 call nv_wr32
285 add b32 $r14 $r4 0x104
286 call nv_wr32 // ENTRY
287 add b32 $r14 $r4 0x100
288 mov $r15 2 // CTRL_START_TRIGGER
289 call nv_wr32 // CTRL
290
291 // wait for it to complete, and adjust context size
292 add b32 $r14 $r4 0x800
293 init_gpc_wait:
294 call nv_rd32
295 xbit $r15 $r15 31
296 bra e init_gpc_wait
297 add b32 $r14 $r4 0x804
298 call nv_rd32
299 add b32 $r1 $r15
300
301 // next!
302 add b32 $r4 0x8000
303 sub b32 $r3 1
304 bra ne init_gpc
305
306 // save context size, and tell host we're ready
307 mov $r2 0x800
308 shl b32 $r2 6
309 iowr I[$r2 + 0x100] $r1 // CC_SCRATCH[1] = context size
310 add b32 $r2 0x800
311 clear b32 $r1
312 bset $r1 31
313 iowr I[$r2 + 0x000] $r1 // CC_SCRATCH[0] |= 0x80000000
314
315// Main program loop, very simple, sleeps until woken up by the interrupt
316// handler, pulls a command from the queue and executes its handler
317//
318main:
319 // sleep until we have something to do
320 bset $flags $p0
321 sleep $p0
322 mov $r13 cmd_queue
323 call queue_get
324 bra $p1 main
325
326 // context switch, requested by GPU?
327 cmpu b32 $r14 0x4001
328 bra ne main_not_ctx_switch
329 trace_set(T_AUTO)
330 mov $r1 0xb00
331 shl b32 $r1 6
332 iord $r2 I[$r1 + 0x100] // CHAN_NEXT
333 iord $r1 I[$r1 + 0x000] // CHAN_CUR
334
335 xbit $r3 $r1 31
336 bra e chsw_no_prev
337 xbit $r3 $r2 31
338 bra e chsw_prev_no_next
339 push $r2
340 mov b32 $r2 $r1
341 trace_set(T_SAVE)
342 bclr $flags $p1
343 bset $flags $p2
344 call ctx_xfer
345 trace_clr(T_SAVE);
346 pop $r2
347 trace_set(T_LOAD);
348 bset $flags $p1
349 call ctx_xfer
350 trace_clr(T_LOAD);
351 bra chsw_done
352 chsw_prev_no_next:
353 push $r2
354 mov b32 $r2 $r1
355 bclr $flags $p1
356 bclr $flags $p2
357 call ctx_xfer
358 pop $r2
359 mov $r1 0xb00
360 shl b32 $r1 6
361 iowr I[$r1] $r2
362 bra chsw_done
363 chsw_no_prev:
364 xbit $r3 $r2 31
365 bra e chsw_done
366 bset $flags $p1
367 bclr $flags $p2
368 call ctx_xfer
369
370 // ack the context switch request
371 chsw_done:
372 mov $r1 0xb0c
373 shl b32 $r1 6
374 mov $r2 1
375 iowr I[$r1 + 0x000] $r2 // 0x409b0c
376 trace_clr(T_AUTO)
377 bra main
378
379 // request to set current channel? (*not* a context switch)
380 main_not_ctx_switch:
381 cmpu b32 $r14 0x0001
382 bra ne main_not_ctx_chan
383 mov b32 $r2 $r15
384 call ctx_chan
385 bra main_done
386
387 // request to store current channel context?
388 main_not_ctx_chan:
389 cmpu b32 $r14 0x0002
390 bra ne main_not_ctx_save
391 trace_set(T_SAVE)
392 bclr $flags $p1
393 bclr $flags $p2
394 call ctx_xfer
395 trace_clr(T_SAVE)
396 bra main_done
397
398 main_not_ctx_save:
399 shl b32 $r15 $r14 16
400 or $r15 E_BAD_COMMAND
401 call error
402 bra main
403
404 main_done:
405 mov $r1 0x820
406 shl b32 $r1 6
407 clear b32 $r2
408 bset $r2 31
409 iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
410 bra main
411
412// interrupt handler
413ih:
414 push $r8
415 mov $r8 $flags
416 push $r8
417 push $r9
418 push $r10
419 push $r11
420 push $r13
421 push $r14
422 push $r15
423
424 // incoming fifo command?
425 iord $r10 I[$r0 + 0x200] // INTR
426 and $r11 $r10 0x00000004
427 bra e ih_no_fifo
428 // queue incoming fifo command for later processing
429 mov $r11 0x1900
430 mov $r13 cmd_queue
431 iord $r14 I[$r11 + 0x100] // FIFO_CMD
432 iord $r15 I[$r11 + 0x000] // FIFO_DATA
433 call queue_put
434 add b32 $r11 0x400
435 mov $r14 1
436 iowr I[$r11 + 0x000] $r14 // FIFO_ACK
437
438 // context switch request?
439 ih_no_fifo:
440 and $r11 $r10 0x00000100
441 bra e ih_no_ctxsw
442 // enqueue a context switch for later processing
443 mov $r13 cmd_queue
444 mov $r14 0x4001
445 call queue_put
446
447 // anything we didn't handle, bring it to the host's attention
448 ih_no_ctxsw:
449 mov $r11 0x104
450 not b32 $r11
451 and $r11 $r10 $r11
452 bra e ih_no_other
453 mov $r10 0xc1c
454 shl b32 $r10 6
455 iowr I[$r10] $r11 // INTR_UP_SET
456
457 // ack, and wake up main()
458 ih_no_other:
459 iowr I[$r0 + 0x100] $r10 // INTR_ACK
460
461 pop $r15
462 pop $r14
463 pop $r13
464 pop $r11
465 pop $r10
466 pop $r9
467 pop $r8
468 mov $flags $r8
469 pop $r8
470 bclr $flags $p0
471 iret
472
473// Not real sure, but, MEM_CMD 7 will hang forever if this isn't done
474ctx_4160s:
475 mov $r14 0x4160
476 sethi $r14 0x400000
477 mov $r15 1
478 call nv_wr32
479 ctx_4160s_wait:
480 call nv_rd32
481 xbit $r15 $r15 4
482 bra e ctx_4160s_wait
483 ret
484
485// Without clearing again at end of xfer, some things cause PGRAPH
486// to hang with STATUS=0x00000007 until it's cleared.. fbcon can
487// still function with it set however...
488ctx_4160c:
489 mov $r14 0x4160
490 sethi $r14 0x400000
491 clear b32 $r15
492 call nv_wr32
493 ret
494
495// Again, not real sure
496//
497// In: $r15 value to set 0x404170 to
498//
499ctx_4170s:
500 mov $r14 0x4170
501 sethi $r14 0x400000
502 or $r15 0x10
503 call nv_wr32
504 ret
505
506// Waits for a ctx_4170s() call to complete
507//
508ctx_4170w:
509 mov $r14 0x4170
510 sethi $r14 0x400000
511 call nv_rd32
512 and $r15 0x10
513 bra ne ctx_4170w
514 ret
515
516// Disables various things, waits a bit, and re-enables them..
517//
518// Not sure how exactly this helps, perhaps "ENABLE" is not such a
519// good description for the bits we turn off? Anyways, without this,
520// funny things happen.
521//
522ctx_redswitch:
523 mov $r14 0x614
524 shl b32 $r14 6
525 mov $r15 0x270
526 iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_GPC, POWER_ALL
527 mov $r15 8
528 ctx_redswitch_delay:
529 sub b32 $r15 1
530 bra ne ctx_redswitch_delay
531 mov $r15 0x770
532 iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
533 ret
534
535// Not a clue what this is for, except that unless the value is 0x10, the
536// strand context is saved (and presumably restored) incorrectly..
537//
538// In: $r15 value to set to (0x00/0x10 are used)
539//
540ctx_86c:
541 mov $r14 0x86c
542 shl b32 $r14 6
543 iowr I[$r14] $r15 // HUB(0x86c) = val
544 mov $r14 -0x75ec
545 sethi $r14 0x400000
546 call nv_wr32 // ROP(0xa14) = val
547 mov $r14 -0x5794
548 sethi $r14 0x410000
549 call nv_wr32 // GPC(0x86c) = val
550 ret
551
552// ctx_load - load's a channel's ctxctl data, and selects its vm
553//
554// In: $r2 channel address
555//
556ctx_load:
557 trace_set(T_CHAN)
558
559 // switch to channel, somewhat magic in parts..
560 mov $r10 12 // DONE_UNK12
561 call wait_donez
562 mov $r1 0xa24
563 shl b32 $r1 6
564 iowr I[$r1 + 0x000] $r0 // 0x409a24
565 mov $r3 0xb00
566 shl b32 $r3 6
567 iowr I[$r3 + 0x100] $r2 // CHAN_NEXT
568 mov $r1 0xa0c
569 shl b32 $r1 6
570 mov $r4 7
571 iowr I[$r1 + 0x000] $r2 // MEM_CHAN
572 iowr I[$r1 + 0x100] $r4 // MEM_CMD
573 ctx_chan_wait_0:
574 iord $r4 I[$r1 + 0x100]
575 and $r4 0x1f
576 bra ne ctx_chan_wait_0
577 iowr I[$r3 + 0x000] $r2 // CHAN_CUR
578
579 // load channel header, fetch PGRAPH context pointer
580 mov $xtargets $r0
581 bclr $r2 31
582 shl b32 $r2 4
583 add b32 $r2 2
584
585 trace_set(T_LCHAN)
586 mov $r1 0xa04
587 shl b32 $r1 6
588 iowr I[$r1 + 0x000] $r2 // MEM_BASE
589 mov $r1 0xa20
590 shl b32 $r1 6
591 mov $r2 0x0002
592 sethi $r2 0x80000000
593 iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vram
594 mov $r1 0x10 // chan + 0x0210
595 mov $r2 xfer_data
596 sethi $r2 0x00020000 // 16 bytes
597 xdld $r1 $r2
598 xdwait
599 trace_clr(T_LCHAN)
600
601 // update current context
602 ld b32 $r1 D[$r0 + xfer_data + 4]
603 shl b32 $r1 24
604 ld b32 $r2 D[$r0 + xfer_data + 0]
605 shr b32 $r2 8
606 or $r1 $r2
607 st b32 D[$r0 + ctx_current] $r1
608
609 // set transfer base to start of context, and fetch context header
610 trace_set(T_LCTXH)
611 mov $r2 0xa04
612 shl b32 $r2 6
613 iowr I[$r2 + 0x000] $r1 // MEM_BASE
614 mov $r2 1
615 mov $r1 0xa20
616 shl b32 $r1 6
617 iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vm
618 mov $r1 chan_data
619 sethi $r1 0x00060000 // 256 bytes
620 xdld $r0 $r1
621 xdwait
622 trace_clr(T_LCTXH)
623
624 trace_clr(T_CHAN)
625 ret
626
627// ctx_chan - handler for HUB_SET_CHAN command, will set a channel as
628// the active channel for ctxctl, but not actually transfer
629// any context data. intended for use only during initial
630// context construction.
631//
632// In: $r2 channel address
633//
634ctx_chan:
635 call ctx_4160s
636 call ctx_load
637 mov $r10 12 // DONE_UNK12
638 call wait_donez
639 mov $r1 0xa10
640 shl b32 $r1 6
641 mov $r2 5
642 iowr I[$r1 + 0x000] $r2 // MEM_CMD = 5 (???)
643 ctx_chan_wait:
644 iord $r2 I[$r1 + 0x000]
645 or $r2 $r2
646 bra ne ctx_chan_wait
647 call ctx_4160c
648 ret
649
650// Execute per-context state overrides list
651//
652// Only executed on the first load of a channel. Might want to look into
653// removing this and having the host directly modify the channel's context
654// to change this state... The nouveau DRM already builds this list as
655// it's definitely needed for NVIDIA's, so we may as well use it for now
656//
657// Input: $r1 mmio list length
658//
659ctx_mmio_exec:
660 // set transfer base to be the mmio list
661 ld b32 $r3 D[$r0 + chan_mmio_address]
662 mov $r2 0xa04
663 shl b32 $r2 6
664 iowr I[$r2 + 0x000] $r3 // MEM_BASE
665
666 clear b32 $r3
667 ctx_mmio_loop:
668 // fetch next 256 bytes of mmio list if necessary
669 and $r4 $r3 0xff
670 bra ne ctx_mmio_pull
671 mov $r5 xfer_data
672 sethi $r5 0x00060000 // 256 bytes
673 xdld $r3 $r5
674 xdwait
675
676 // execute a single list entry
677 ctx_mmio_pull:
678 ld b32 $r14 D[$r4 + xfer_data + 0x00]
679 ld b32 $r15 D[$r4 + xfer_data + 0x04]
680 call nv_wr32
681
682 // next!
683 add b32 $r3 8
684 sub b32 $r1 1
685 bra ne ctx_mmio_loop
686
687 // set transfer base back to the current context
688 ctx_mmio_done:
689 ld b32 $r3 D[$r0 + ctx_current]
690 iowr I[$r2 + 0x000] $r3 // MEM_BASE
691
692 // disable the mmio list now, we don't need/want to execute it again
693 st b32 D[$r0 + chan_mmio_count] $r0
694 mov $r1 chan_data
695 sethi $r1 0x00060000 // 256 bytes
696 xdst $r0 $r1
697 xdwait
698 ret
699
700// Transfer HUB context data between GPU and storage area
701//
702// In: $r2 channel address
703// $p1 clear on save, set on load
704// $p2 set if opposite direction done/will be done, so:
705// on save it means: "a load will follow this save"
706// on load it means: "a save preceeded this load"
707//
708ctx_xfer:
709 bra not $p1 ctx_xfer_pre
710 bra $p2 ctx_xfer_pre_load
711 ctx_xfer_pre:
712 mov $r15 0x10
713 call ctx_86c
714 call ctx_4160s
715 bra not $p1 ctx_xfer_exec
716
717 ctx_xfer_pre_load:
718 mov $r15 2
719 call ctx_4170s
720 call ctx_4170w
721 call ctx_redswitch
722 clear b32 $r15
723 call ctx_4170s
724 call ctx_load
725
726 // fetch context pointer, and initiate xfer on all GPCs
727 ctx_xfer_exec:
728 ld b32 $r1 D[$r0 + ctx_current]
729 mov $r2 0x414
730 shl b32 $r2 6
731 iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset
732 mov $r14 -0x5b00
733 sethi $r14 0x410000
734 mov b32 $r15 $r1
735 call nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer
736 add b32 $r14 4
737 xbit $r15 $flags $p1
738 xbit $r2 $flags $p2
739 shl b32 $r2 1
740 or $r15 $r2
741 call nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
742
743 // strands
744 mov $r1 0x4afc
745 sethi $r1 0x20000
746 mov $r2 0xc
747 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
748 call strand_wait
749 mov $r2 0x47fc
750 sethi $r2 0x20000
751 iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
752 xbit $r2 $flags $p1
753 add b32 $r2 3
754 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
755
756 // mmio context
757 xbit $r10 $flags $p1 // direction
758 or $r10 6 // first, last
759 mov $r11 0 // base = 0
760 ld b32 $r12 D[$r0 + hub_mmio_list_head]
761 ld b32 $r13 D[$r0 + hub_mmio_list_tail]
762 mov $r14 0 // not multi
763 call mmctx_xfer
764
765 // wait for GPCs to all complete
766 mov $r10 8 // DONE_BAR
767 call wait_doneo
768
769 // wait for strand xfer to complete
770 call strand_wait
771
772 // post-op
773 bra $p1 ctx_xfer_post
774 mov $r10 12 // DONE_UNK12
775 call wait_donez
776 mov $r1 0xa10
777 shl b32 $r1 6
778 mov $r2 5
779 iowr I[$r1] $r2 // MEM_CMD
780 ctx_xfer_post_save_wait:
781 iord $r2 I[$r1]
782 or $r2 $r2
783 bra ne ctx_xfer_post_save_wait
784
785 bra $p2 ctx_xfer_done
786 ctx_xfer_post:
787 mov $r15 2
788 call ctx_4170s
789 clear b32 $r15
790 call ctx_86c
791 call strand_post
792 call ctx_4170w
793 clear b32 $r15
794 call ctx_4170s
795
796 bra not $p1 ctx_xfer_no_post_mmio
797 ld b32 $r1 D[$r0 + chan_mmio_count]
798 or $r1 $r1
799 bra e ctx_xfer_no_post_mmio
800 call ctx_mmio_exec
801
802 ctx_xfer_no_post_mmio:
803 call ctx_4160c
804
805 ctx_xfer_done:
806 ret
807
808.align 256
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
new file mode 100644
index 000000000000..b3b541b6d044
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
@@ -0,0 +1,838 @@
1uint32_t nvc0_grhub_data[] = {
2 0x00000000,
3 0x00000000,
4 0x00000000,
5 0x00000000,
6 0x00000000,
7 0x00000000,
8 0x00000000,
9 0x00000000,
10 0x00000000,
11 0x00000000,
12 0x00000000,
13 0x00000000,
14 0x00000000,
15 0x00000000,
16 0x00000000,
17 0x00000000,
18 0x00000000,
19 0x00000000,
20 0x00000000,
21 0x00000000,
22 0x00000000,
23 0x00000000,
24 0x00000000,
25 0x000000c0,
26 0x012c0090,
27 0x000000c1,
28 0x01300090,
29 0x000000c3,
30 0x012c0090,
31 0x000000c4,
32 0x012c0090,
33 0x000000c8,
34 0x012c0090,
35 0x000000ce,
36 0x012c0090,
37 0x00000000,
38 0x0417e91c,
39 0x04400204,
40 0x28404004,
41 0x00404044,
42 0x34404094,
43 0x184040d0,
44 0x004040f8,
45 0x08404130,
46 0x08404150,
47 0x04404164,
48 0x08404174,
49 0x1c404200,
50 0x34404404,
51 0x0c404460,
52 0x00404480,
53 0x00404498,
54 0x0c404604,
55 0x7c404618,
56 0x50404698,
57 0x044046f0,
58 0x54404700,
59 0x00405800,
60 0x08405830,
61 0x00405854,
62 0x0c405870,
63 0x04405a00,
64 0x00405a18,
65 0x00406020,
66 0x0c406028,
67 0x044064a8,
68 0x044064b4,
69 0x00407804,
70 0x1440780c,
71 0x004078bc,
72 0x18408000,
73 0x00408064,
74 0x08408800,
75 0x0c408900,
76 0x00408980,
77 0x044064c0,
78 0x00000000,
79 0x00000000,
80 0x00000000,
81 0x00000000,
82 0x00000000,
83 0x00000000,
84 0x00000000,
85 0x00000000,
86 0x00000000,
87 0x00000000,
88 0x00000000,
89 0x00000000,
90 0x00000000,
91 0x00000000,
92 0x00000000,
93 0x00000000,
94 0x00000000,
95 0x00000000,
96 0x00000000,
97 0x00000000,
98 0x00000000,
99 0x00000000,
100 0x00000000,
101 0x00000000,
102 0x00000000,
103 0x00000000,
104 0x00000000,
105 0x00000000,
106 0x00000000,
107 0x00000000,
108 0x00000000,
109 0x00000000,
110 0x00000000,
111 0x00000000,
112 0x00000000,
113 0x00000000,
114 0x00000000,
115 0x00000000,
116 0x00000000,
117 0x00000000,
118 0x00000000,
119 0x00000000,
120 0x00000000,
121 0x00000000,
122 0x00000000,
123 0x00000000,
124 0x00000000,
125 0x00000000,
126 0x00000000,
127 0x00000000,
128 0x00000000,
129 0x00000000,
130 0x00000000,
131 0x00000000,
132 0x00000000,
133 0x00000000,
134 0x00000000,
135 0x00000000,
136 0x00000000,
137 0x00000000,
138 0x00000000,
139 0x00000000,
140 0x00000000,
141 0x00000000,
142 0x00000000,
143 0x00000000,
144 0x00000000,
145 0x00000000,
146 0x00000000,
147 0x00000000,
148 0x00000000,
149 0x00000000,
150 0x00000000,
151 0x00000000,
152 0x00000000,
153 0x00000000,
154 0x00000000,
155 0x00000000,
156 0x00000000,
157 0x00000000,
158 0x00000000,
159 0x00000000,
160 0x00000000,
161 0x00000000,
162 0x00000000,
163 0x00000000,
164 0x00000000,
165 0x00000000,
166 0x00000000,
167 0x00000000,
168 0x00000000,
169 0x00000000,
170 0x00000000,
171 0x00000000,
172 0x00000000,
173 0x00000000,
174 0x00000000,
175 0x00000000,
176 0x00000000,
177 0x00000000,
178 0x00000000,
179 0x00000000,
180 0x00000000,
181 0x00000000,
182 0x00000000,
183 0x00000000,
184 0x00000000,
185 0x00000000,
186 0x00000000,
187 0x00000000,
188 0x00000000,
189 0x00000000,
190 0x00000000,
191 0x00000000,
192 0x00000000,
193 0x00000000,
194 0x00000000,
195};
196
197uint32_t nvc0_grhub_code[] = {
198 0x03090ef5,
199 0x9800d898,
200 0x86f001d9,
201 0x0489b808,
202 0xf00c1bf4,
203 0x21f502f7,
204 0x00f802ec,
205 0xb60798c4,
206 0x8dbb0384,
207 0x0880b600,
208 0x80008e80,
209 0x90b6018f,
210 0x0f94f001,
211 0xf801d980,
212 0x0131f400,
213 0x9800d898,
214 0x89b801d9,
215 0x210bf404,
216 0xb60789c4,
217 0x9dbb0394,
218 0x0890b600,
219 0x98009e98,
220 0x80b6019f,
221 0x0f84f001,
222 0xf400d880,
223 0x00f80132,
224 0x0728b7f1,
225 0xb906b4b6,
226 0xc9f002ec,
227 0x00bcd01f,
228 0xc800bccf,
229 0x1bf41fcc,
230 0x06a7f0fa,
231 0x010321f5,
232 0xf840bfcf,
233 0x28b7f100,
234 0x06b4b607,
235 0xb980bfd0,
236 0xc9f002ec,
237 0x1ec9f01f,
238 0xcf00bcd0,
239 0xccc800bc,
240 0xfa1bf41f,
241 0x87f100f8,
242 0x84b60430,
243 0x1ff9f006,
244 0xf8008fd0,
245 0x3087f100,
246 0x0684b604,
247 0xf80080d0,
248 0x3c87f100,
249 0x0684b608,
250 0x99f094bd,
251 0x0089d000,
252 0x081887f1,
253 0xd00684b6,
254 0x87f1008a,
255 0x84b60400,
256 0x0088cf06,
257 0xf4888aff,
258 0x87f1f31b,
259 0x84b6085c,
260 0xf094bd06,
261 0x89d00099,
262 0xf100f800,
263 0xb6083c87,
264 0x94bd0684,
265 0xd00099f0,
266 0x87f10089,
267 0x84b60818,
268 0x008ad006,
269 0x040087f1,
270 0xcf0684b6,
271 0x8aff0088,
272 0xf30bf488,
273 0x085c87f1,
274 0xbd0684b6,
275 0x0099f094,
276 0xf80089d0,
277 0x9894bd00,
278 0x85b600e8,
279 0x0180b61a,
280 0xbb0284b6,
281 0xe0b60098,
282 0x04efb804,
283 0xb9eb1bf4,
284 0x00f8029f,
285 0x083c87f1,
286 0xbd0684b6,
287 0x0199f094,
288 0xf10089d0,
289 0xb6071087,
290 0x94bd0684,
291 0xf405bbfd,
292 0x8bd0090b,
293 0x0099f000,
294 0xf405eefd,
295 0x8ed00c0b,
296 0xc08fd080,
297 0xb70199f0,
298 0xc8010080,
299 0xb4b600ab,
300 0x0cb9f010,
301 0xb601aec8,
302 0xbefd11e4,
303 0x008bd005,
304 0xf0008ecf,
305 0x0bf41fe4,
306 0x00ce98fa,
307 0xd005e9fd,
308 0xc0b6c08e,
309 0x04cdb804,
310 0xc8e81bf4,
311 0x1bf402ab,
312 0x008bcf18,
313 0xb01fb4f0,
314 0x1bf410b4,
315 0x02a7f0f7,
316 0xf4c921f4,
317 0xabc81b0e,
318 0x10b4b600,
319 0xf00cb9f0,
320 0x8bd012b9,
321 0x008bcf00,
322 0xf412bbc8,
323 0x87f1fa1b,
324 0x84b6085c,
325 0xf094bd06,
326 0x89d00199,
327 0xf900f800,
328 0x02a7f0a0,
329 0xfcc921f4,
330 0xf100f8a0,
331 0xf04afc87,
332 0x97f00283,
333 0x0089d00c,
334 0x020721f5,
335 0x87f100f8,
336 0x83f04afc,
337 0x0d97f002,
338 0xf50089d0,
339 0xf8020721,
340 0xfca7f100,
341 0x02a3f04f,
342 0x0500aba2,
343 0xd00fc7f0,
344 0xc7f000ac,
345 0x00bcd00b,
346 0x020721f5,
347 0xf000aed0,
348 0xbcd00ac7,
349 0x0721f500,
350 0xf100f802,
351 0xb6083c87,
352 0x94bd0684,
353 0xd00399f0,
354 0x21f50089,
355 0xe7f00213,
356 0x3921f503,
357 0xfca7f102,
358 0x02a3f046,
359 0x0400aba0,
360 0xf040a0d0,
361 0xbcd001c7,
362 0x0721f500,
363 0x010c9202,
364 0xf000acd0,
365 0xbcd002c7,
366 0x0721f500,
367 0x2621f502,
368 0x8087f102,
369 0x0684b608,
370 0xb70089cf,
371 0x95220080,
372 0x8ed008fe,
373 0x408ed000,
374 0xb6808acf,
375 0xa0b606a5,
376 0x00eabb01,
377 0xb60480b6,
378 0x1bf40192,
379 0x08e4b6e8,
380 0xf1f2efbc,
381 0xb6085c87,
382 0x94bd0684,
383 0xd00399f0,
384 0x00f80089,
385 0xe7f1e0f9,
386 0xe4b60814,
387 0x00efd006,
388 0x0c1ce7f1,
389 0xf006e4b6,
390 0xefd001f7,
391 0xf8e0fc00,
392 0xfe04bd00,
393 0x07fe0004,
394 0x0017f100,
395 0x0227f012,
396 0xf10012d0,
397 0xfe05b917,
398 0x17f10010,
399 0x10d00400,
400 0x0437f1c0,
401 0x0634b604,
402 0x200327f1,
403 0xf10032d0,
404 0xd0200427,
405 0x27f10132,
406 0x32d0200b,
407 0x0c27f102,
408 0x0732d020,
409 0x0c2427f1,
410 0xb90624b6,
411 0x23d00003,
412 0x0427f100,
413 0x0023f087,
414 0xb70012d0,
415 0xf0010012,
416 0x12d00427,
417 0x1031f400,
418 0x9604e7f1,
419 0xf440e3f0,
420 0xf1c76821,
421 0x01018090,
422 0x801ff4f0,
423 0x17f0000f,
424 0x041fbb01,
425 0xf10112b6,
426 0xb6040c27,
427 0x21d00624,
428 0x4021d000,
429 0x080027f1,
430 0xcf0624b6,
431 0xf7f00022,
432 0x08f0b654,
433 0xb800f398,
434 0x0bf40432,
435 0x0034b00b,
436 0xf8f11bf4,
437 0x0017f100,
438 0x02fe5801,
439 0xf003ff58,
440 0x0e8000e3,
441 0x150f8014,
442 0x013d21f5,
443 0x070037f1,
444 0x950634b6,
445 0x34d00814,
446 0x4034d000,
447 0x130030b7,
448 0xb6001fbb,
449 0x3fd002f5,
450 0x0815b600,
451 0xb60110b6,
452 0x1fb90814,
453 0x6321f502,
454 0x001fbb02,
455 0xf1000398,
456 0xf0200047,
457 0x4ea05043,
458 0x1fb90804,
459 0x8d21f402,
460 0x08004ea0,
461 0xf4022fb9,
462 0x4ea08d21,
463 0xf4bd010c,
464 0xa08d21f4,
465 0xf401044e,
466 0x4ea08d21,
467 0xf7f00100,
468 0x8d21f402,
469 0x08004ea0,
470 0xc86821f4,
471 0x0bf41fff,
472 0x044ea0fa,
473 0x6821f408,
474 0xb7001fbb,
475 0xb6800040,
476 0x1bf40132,
477 0x0027f1b4,
478 0x0624b608,
479 0xb74021d0,
480 0xbd080020,
481 0x1f19f014,
482 0xf40021d0,
483 0x28f40031,
484 0x08d7f000,
485 0xf43921f4,
486 0xe4b1f401,
487 0x1bf54001,
488 0x87f100d1,
489 0x84b6083c,
490 0xf094bd06,
491 0x89d00499,
492 0x0017f100,
493 0x0614b60b,
494 0xcf4012cf,
495 0x13c80011,
496 0x7e0bf41f,
497 0xf41f23c8,
498 0x20f95a0b,
499 0xf10212b9,
500 0xb6083c87,
501 0x94bd0684,
502 0xd00799f0,
503 0x32f40089,
504 0x0231f401,
505 0x082921f5,
506 0x085c87f1,
507 0xbd0684b6,
508 0x0799f094,
509 0xfc0089d0,
510 0x3c87f120,
511 0x0684b608,
512 0x99f094bd,
513 0x0089d006,
514 0xf50131f4,
515 0xf1082921,
516 0xb6085c87,
517 0x94bd0684,
518 0xd00699f0,
519 0x0ef40089,
520 0xb920f931,
521 0x32f40212,
522 0x0232f401,
523 0x082921f5,
524 0x17f120fc,
525 0x14b60b00,
526 0x0012d006,
527 0xc8130ef4,
528 0x0bf41f23,
529 0x0131f40d,
530 0xf50232f4,
531 0xf1082921,
532 0xb60b0c17,
533 0x27f00614,
534 0x0012d001,
535 0x085c87f1,
536 0xbd0684b6,
537 0x0499f094,
538 0xf50089d0,
539 0xb0ff200e,
540 0x1bf401e4,
541 0x02f2b90d,
542 0x07b521f5,
543 0xb0420ef4,
544 0x1bf402e4,
545 0x3c87f12e,
546 0x0684b608,
547 0x99f094bd,
548 0x0089d007,
549 0xf40132f4,
550 0x21f50232,
551 0x87f10829,
552 0x84b6085c,
553 0xf094bd06,
554 0x89d00799,
555 0x110ef400,
556 0xf010ef94,
557 0x21f501f5,
558 0x0ef502ec,
559 0x17f1fed1,
560 0x14b60820,
561 0xf024bd06,
562 0x12d01f29,
563 0xbe0ef500,
564 0xfe80f9fe,
565 0x80f90188,
566 0xa0f990f9,
567 0xd0f9b0f9,
568 0xf0f9e0f9,
569 0xc4800acf,
570 0x0bf404ab,
571 0x00b7f11d,
572 0x08d7f019,
573 0xcf40becf,
574 0x21f400bf,
575 0x00b0b704,
576 0x01e7f004,
577 0xe400bed0,
578 0xf40100ab,
579 0xd7f00d0b,
580 0x01e7f108,
581 0x0421f440,
582 0x0104b7f1,
583 0xabffb0bd,
584 0x0d0bf4b4,
585 0x0c1ca7f1,
586 0xd006a4b6,
587 0x0ad000ab,
588 0xfcf0fc40,
589 0xfcd0fce0,
590 0xfca0fcb0,
591 0xfe80fc90,
592 0x80fc0088,
593 0xf80032f4,
594 0x60e7f101,
595 0x40e3f041,
596 0xf401f7f0,
597 0x21f48d21,
598 0x04ffc868,
599 0xf8fa0bf4,
600 0x60e7f100,
601 0x40e3f041,
602 0x21f4f4bd,
603 0xf100f88d,
604 0xf04170e7,
605 0xf5f040e3,
606 0x8d21f410,
607 0xe7f100f8,
608 0xe3f04170,
609 0x6821f440,
610 0xf410f4f0,
611 0x00f8f31b,
612 0x0614e7f1,
613 0xf106e4b6,
614 0xd00270f7,
615 0xf7f000ef,
616 0x01f2b608,
617 0xf1fd1bf4,
618 0xd00770f7,
619 0x00f800ef,
620 0x086ce7f1,
621 0xd006e4b6,
622 0xe7f100ef,
623 0xe3f08a14,
624 0x8d21f440,
625 0xa86ce7f1,
626 0xf441e3f0,
627 0x00f88d21,
628 0x083c87f1,
629 0xbd0684b6,
630 0x0599f094,
631 0xf00089d0,
632 0x21f40ca7,
633 0x2417f1c9,
634 0x0614b60a,
635 0xf10010d0,
636 0xb60b0037,
637 0x32d00634,
638 0x0c17f140,
639 0x0614b60a,
640 0xd00747f0,
641 0x14d00012,
642 0x4014cf40,
643 0xf41f44f0,
644 0x32d0fa1b,
645 0x000bfe00,
646 0xb61f2af0,
647 0x20b60424,
648 0x3c87f102,
649 0x0684b608,
650 0x99f094bd,
651 0x0089d008,
652 0x0a0417f1,
653 0xd00614b6,
654 0x17f10012,
655 0x14b60a20,
656 0x0227f006,
657 0x800023f1,
658 0xf00012d0,
659 0x27f11017,
660 0x23f00300,
661 0x0512fa02,
662 0x87f103f8,
663 0x84b6085c,
664 0xf094bd06,
665 0x89d00899,
666 0xc1019800,
667 0x981814b6,
668 0x25b6c002,
669 0x0512fd08,
670 0xf1160180,
671 0xb6083c87,
672 0x94bd0684,
673 0xd00999f0,
674 0x27f10089,
675 0x24b60a04,
676 0x0021d006,
677 0xf10127f0,
678 0xb60a2017,
679 0x12d00614,
680 0x0017f100,
681 0x0613f002,
682 0xf80501fa,
683 0x5c87f103,
684 0x0684b608,
685 0x99f094bd,
686 0x0089d009,
687 0x085c87f1,
688 0xbd0684b6,
689 0x0599f094,
690 0xf80089d0,
691 0x3121f500,
692 0xb821f506,
693 0x0ca7f006,
694 0xf1c921f4,
695 0xb60a1017,
696 0x27f00614,
697 0x0012d005,
698 0xfd0012cf,
699 0x1bf40522,
700 0x4921f5fa,
701 0x9800f806,
702 0x27f18103,
703 0x24b60a04,
704 0x0023d006,
705 0x34c434bd,
706 0x0f1bf4ff,
707 0x030057f1,
708 0xfa0653f0,
709 0x03f80535,
710 0x98c04e98,
711 0x21f4c14f,
712 0x0830b68d,
713 0xf40112b6,
714 0x0398df1b,
715 0x0023d016,
716 0xf1800080,
717 0xf0020017,
718 0x01fa0613,
719 0xf803f806,
720 0x0611f400,
721 0xf01102f4,
722 0x21f510f7,
723 0x21f50698,
724 0x11f40631,
725 0x02f7f01c,
726 0x065721f5,
727 0x066621f5,
728 0x067821f5,
729 0x21f5f4bd,
730 0x21f50657,
731 0x019806b8,
732 0x1427f116,
733 0x0624b604,
734 0xf10020d0,
735 0xf0a500e7,
736 0x1fb941e3,
737 0x8d21f402,
738 0xf004e0b6,
739 0x2cf001fc,
740 0x0124b602,
741 0xf405f2fd,
742 0x17f18d21,
743 0x13f04afc,
744 0x0c27f002,
745 0xf50012d0,
746 0xf1020721,
747 0xf047fc27,
748 0x20d00223,
749 0x012cf000,
750 0xd00320b6,
751 0xacf00012,
752 0x06a5f001,
753 0x9800b7f0,
754 0x0d98140c,
755 0x00e7f015,
756 0x015c21f5,
757 0xf508a7f0,
758 0xf5010321,
759 0xf4020721,
760 0xa7f02201,
761 0xc921f40c,
762 0x0a1017f1,
763 0xf00614b6,
764 0x12d00527,
765 0x0012cf00,
766 0xf40522fd,
767 0x02f4fa1b,
768 0x02f7f032,
769 0x065721f5,
770 0x21f5f4bd,
771 0x21f50698,
772 0x21f50226,
773 0xf4bd0666,
774 0x065721f5,
775 0x981011f4,
776 0x11fd8001,
777 0x070bf405,
778 0x07df21f5,
779 0x064921f5,
780 0x000000f8,
781 0x00000000,
782 0x00000000,
783 0x00000000,
784 0x00000000,
785 0x00000000,
786 0x00000000,
787 0x00000000,
788 0x00000000,
789 0x00000000,
790 0x00000000,
791 0x00000000,
792 0x00000000,
793 0x00000000,
794 0x00000000,
795 0x00000000,
796 0x00000000,
797 0x00000000,
798 0x00000000,
799 0x00000000,
800 0x00000000,
801 0x00000000,
802 0x00000000,
803 0x00000000,
804 0x00000000,
805 0x00000000,
806 0x00000000,
807 0x00000000,
808 0x00000000,
809 0x00000000,
810 0x00000000,
811 0x00000000,
812 0x00000000,
813 0x00000000,
814 0x00000000,
815 0x00000000,
816 0x00000000,
817 0x00000000,
818 0x00000000,
819 0x00000000,
820 0x00000000,
821 0x00000000,
822 0x00000000,
823 0x00000000,
824 0x00000000,
825 0x00000000,
826 0x00000000,
827 0x00000000,
828 0x00000000,
829 0x00000000,
830 0x00000000,
831 0x00000000,
832 0x00000000,
833 0x00000000,
834 0x00000000,
835 0x00000000,
836 0x00000000,
837 0x00000000,
838};
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index 82357d2df1f4..b701c439c92e 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -32,7 +32,6 @@ struct nvc0_instmem_priv {
32 struct nouveau_channel *bar1; 32 struct nouveau_channel *bar1;
33 struct nouveau_gpuobj *bar3_pgd; 33 struct nouveau_gpuobj *bar3_pgd;
34 struct nouveau_channel *bar3; 34 struct nouveau_channel *bar3;
35 struct nouveau_gpuobj *chan_pgd;
36}; 35};
37 36
38int 37int
@@ -181,17 +180,11 @@ nvc0_instmem_init(struct drm_device *dev)
181 goto error; 180 goto error;
182 181
183 /* channel vm */ 182 /* channel vm */
184 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL, &vm); 183 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
184 &dev_priv->chan_vm);
185 if (ret) 185 if (ret)
186 goto error; 186 goto error;
187 187
188 ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096, 0, &priv->chan_pgd);
189 if (ret)
190 goto error;
191
192 nouveau_vm_ref(vm, &dev_priv->chan_vm, priv->chan_pgd);
193 nouveau_vm_ref(NULL, &vm, NULL);
194
195 nvc0_instmem_resume(dev); 188 nvc0_instmem_resume(dev);
196 return 0; 189 return 0;
197error: 190error:
@@ -211,8 +204,7 @@ nvc0_instmem_takedown(struct drm_device *dev)
211 nv_wr32(dev, 0x1704, 0x00000000); 204 nv_wr32(dev, 0x1704, 0x00000000);
212 nv_wr32(dev, 0x1714, 0x00000000); 205 nv_wr32(dev, 0x1714, 0x00000000);
213 206
214 nouveau_vm_ref(NULL, &dev_priv->chan_vm, priv->chan_pgd); 207 nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
215 nouveau_gpuobj_ref(NULL, &priv->chan_pgd);
216 208
217 nvc0_channel_del(&priv->bar1); 209 nvc0_channel_del(&priv->bar1);
218 nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd); 210 nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd);
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
index a179e6c55afb..9e352944a35a 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -105,7 +105,11 @@ nvc0_vm_flush(struct nouveau_vm *vm)
105 struct drm_device *dev = vm->dev; 105 struct drm_device *dev = vm->dev;
106 struct nouveau_vm_pgd *vpgd; 106 struct nouveau_vm_pgd *vpgd;
107 unsigned long flags; 107 unsigned long flags;
108 u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5; 108 u32 engine;
109
110 engine = 1;
111 if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm)
112 engine |= 4;
109 113
110 pinstmem->flush(vm->dev); 114 pinstmem->flush(vm->dev);
111 115
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
index 67c6ec6f34ea..e45a24d84e98 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -61,9 +61,7 @@ nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
61 u32 type, struct nouveau_mem **pmem) 61 u32 type, struct nouveau_mem **pmem)
62{ 62{
63 struct drm_nouveau_private *dev_priv = dev->dev_private; 63 struct drm_nouveau_private *dev_priv = dev->dev_private;
64 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; 64 struct nouveau_mm *mm = dev_priv->engine.vram.mm;
65 struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
66 struct nouveau_mm *mm = man->priv;
67 struct nouveau_mm_node *r; 65 struct nouveau_mm_node *r;
68 struct nouveau_mem *mem; 66 struct nouveau_mem *mem;
69 int ret; 67 int ret;
@@ -105,9 +103,15 @@ int
105nvc0_vram_init(struct drm_device *dev) 103nvc0_vram_init(struct drm_device *dev)
106{ 104{
107 struct drm_nouveau_private *dev_priv = dev->dev_private; 105 struct drm_nouveau_private *dev_priv = dev->dev_private;
106 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
107 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
108 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
109 u32 length;
108 110
109 dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; 111 dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
110 dev_priv->vram_size *= nv_rd32(dev, 0x121c74); 112 dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
111 dev_priv->vram_rblock_size = 4096; 113
112 return 0; 114 length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
115
116 return nouveau_mm_init(&vram->mm, rsvd_head, length, 1);
113} 117}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 9541995e4b21..c742944d3805 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -764,7 +764,7 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
764} 764}
765 765
766static void atombios_crtc_program_pll(struct drm_crtc *crtc, 766static void atombios_crtc_program_pll(struct drm_crtc *crtc,
767 int crtc_id, 767 u32 crtc_id,
768 int pll_id, 768 int pll_id,
769 u32 encoder_mode, 769 u32 encoder_mode,
770 u32 encoder_id, 770 u32 encoder_id,
@@ -851,8 +851,7 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
851 args.v5.ucPpll = pll_id; 851 args.v5.ucPpll = pll_id;
852 break; 852 break;
853 case 6: 853 case 6:
854 args.v6.ulCrtcPclkFreq.ucCRTC = crtc_id; 854 args.v6.ulDispEngClkFreq = cpu_to_le32(crtc_id << 24 | clock / 10);
855 args.v6.ulCrtcPclkFreq.ulPixelClock = cpu_to_le32(clock / 10);
856 args.v6.ucRefDiv = ref_div; 855 args.v6.ucRefDiv = ref_div;
857 args.v6.usFbDiv = cpu_to_le16(fb_div); 856 args.v6.usFbDiv = cpu_to_le16(fb_div);
858 args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); 857 args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 8c0f9e36ff8e..645b84b3d203 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -627,6 +627,7 @@ struct radeon_dp_link_train_info {
627 u8 train_set[4]; 627 u8 train_set[4];
628 u8 link_status[DP_LINK_STATUS_SIZE]; 628 u8 link_status[DP_LINK_STATUS_SIZE];
629 u8 tries; 629 u8 tries;
630 bool use_dpencoder;
630}; 631};
631 632
632static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info) 633static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info)
@@ -646,7 +647,7 @@ static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp)
646 int rtp = 0; 647 int rtp = 0;
647 648
648 /* set training pattern on the source */ 649 /* set training pattern on the source */
649 if (ASIC_IS_DCE4(dp_info->rdev)) { 650 if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) {
650 switch (tp) { 651 switch (tp) {
651 case DP_TRAINING_PATTERN_1: 652 case DP_TRAINING_PATTERN_1:
652 rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1; 653 rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1;
@@ -706,7 +707,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
706 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp); 707 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);
707 708
708 /* start training on the source */ 709 /* start training on the source */
709 if (ASIC_IS_DCE4(dp_info->rdev)) 710 if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
710 atombios_dig_encoder_setup(dp_info->encoder, 711 atombios_dig_encoder_setup(dp_info->encoder,
711 ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0); 712 ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0);
712 else 713 else
@@ -731,7 +732,7 @@ static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info
731 DP_TRAINING_PATTERN_DISABLE); 732 DP_TRAINING_PATTERN_DISABLE);
732 733
733 /* disable the training pattern on the source */ 734 /* disable the training pattern on the source */
734 if (ASIC_IS_DCE4(dp_info->rdev)) 735 if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
735 atombios_dig_encoder_setup(dp_info->encoder, 736 atombios_dig_encoder_setup(dp_info->encoder,
736 ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0); 737 ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0);
737 else 738 else
@@ -869,7 +870,8 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
869 struct radeon_connector *radeon_connector; 870 struct radeon_connector *radeon_connector;
870 struct radeon_connector_atom_dig *dig_connector; 871 struct radeon_connector_atom_dig *dig_connector;
871 struct radeon_dp_link_train_info dp_info; 872 struct radeon_dp_link_train_info dp_info;
872 u8 tmp; 873 int index;
874 u8 tmp, frev, crev;
873 875
874 if (!radeon_encoder->enc_priv) 876 if (!radeon_encoder->enc_priv)
875 return; 877 return;
@@ -884,6 +886,18 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
884 (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP)) 886 (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP))
885 return; 887 return;
886 888
889 /* DPEncoderService newer than 1.1 can't program properly the
890 * training pattern. When facing such version use the
891 * DIGXEncoderControl (X== 1 | 2)
892 */
893 dp_info.use_dpencoder = true;
894 index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
895 if (atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) {
896 if (crev > 1) {
897 dp_info.use_dpencoder = false;
898 }
899 }
900
887 dp_info.enc_id = 0; 901 dp_info.enc_id = 0;
888 if (dig->dig_encoder) 902 if (dig->dig_encoder)
889 dp_info.enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; 903 dp_info.enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 15bd0477a3e8..14dce9f22172 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1382,9 +1382,6 @@ int evergreen_cp_resume(struct radeon_device *rdev)
1382 1382
1383 /* set the wb address wether it's enabled or not */ 1383 /* set the wb address wether it's enabled or not */
1384 WREG32(CP_RB_RPTR_ADDR, 1384 WREG32(CP_RB_RPTR_ADDR,
1385#ifdef __BIG_ENDIAN
1386 RB_RPTR_SWAP(2) |
1387#endif
1388 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); 1385 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
1389 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 1386 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1390 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 1387 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
@@ -2047,6 +2044,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2047 rdev->config.evergreen.tile_config |= 2044 rdev->config.evergreen.tile_config |=
2048 ((gb_addr_config & 0x30000000) >> 28) << 12; 2045 ((gb_addr_config & 0x30000000) >> 28) << 12;
2049 2046
2047 rdev->config.evergreen.backend_map = gb_backend_map;
2050 WREG32(GB_BACKEND_MAP, gb_backend_map); 2048 WREG32(GB_BACKEND_MAP, gb_backend_map);
2051 WREG32(GB_ADDR_CONFIG, gb_addr_config); 2049 WREG32(GB_ADDR_CONFIG, gb_addr_config);
2052 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 2050 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
@@ -2761,6 +2759,9 @@ int evergreen_irq_process(struct radeon_device *rdev)
2761 return IRQ_NONE; 2759 return IRQ_NONE;
2762 } 2760 }
2763restart_ih: 2761restart_ih:
2762 /* Order reading of wptr vs. reading of IH ring data */
2763 rmb();
2764
2764 /* display interrupts */ 2765 /* display interrupts */
2765 evergreen_irq_ack(rdev); 2766 evergreen_irq_ack(rdev);
2766 2767
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 23d36417158d..189e86522b5b 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -856,7 +856,6 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3
856 case SQ_PGM_START_PS: 856 case SQ_PGM_START_PS:
857 case SQ_PGM_START_HS: 857 case SQ_PGM_START_HS:
858 case SQ_PGM_START_LS: 858 case SQ_PGM_START_LS:
859 case GDS_ADDR_BASE:
860 case SQ_CONST_MEM_BASE: 859 case SQ_CONST_MEM_BASE:
861 case SQ_ALU_CONST_CACHE_GS_0: 860 case SQ_ALU_CONST_CACHE_GS_0:
862 case SQ_ALU_CONST_CACHE_GS_1: 861 case SQ_ALU_CONST_CACHE_GS_1:
@@ -946,6 +945,34 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3
946 } 945 }
947 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 946 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
948 break; 947 break;
948 case SX_MEMORY_EXPORT_BASE:
949 if (p->rdev->family >= CHIP_CAYMAN) {
950 dev_warn(p->dev, "bad SET_CONFIG_REG "
951 "0x%04X\n", reg);
952 return -EINVAL;
953 }
954 r = evergreen_cs_packet_next_reloc(p, &reloc);
955 if (r) {
956 dev_warn(p->dev, "bad SET_CONFIG_REG "
957 "0x%04X\n", reg);
958 return -EINVAL;
959 }
960 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
961 break;
962 case CAYMAN_SX_SCATTER_EXPORT_BASE:
963 if (p->rdev->family < CHIP_CAYMAN) {
964 dev_warn(p->dev, "bad SET_CONTEXT_REG "
965 "0x%04X\n", reg);
966 return -EINVAL;
967 }
968 r = evergreen_cs_packet_next_reloc(p, &reloc);
969 if (r) {
970 dev_warn(p->dev, "bad SET_CONTEXT_REG "
971 "0x%04X\n", reg);
972 return -EINVAL;
973 }
974 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
975 break;
949 default: 976 default:
950 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 977 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
951 return -EINVAL; 978 return -EINVAL;
@@ -1153,6 +1180,34 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
1153 return r; 1180 return r;
1154 } 1181 }
1155 break; 1182 break;
1183 case PACKET3_DISPATCH_DIRECT:
1184 if (pkt->count != 3) {
1185 DRM_ERROR("bad DISPATCH_DIRECT\n");
1186 return -EINVAL;
1187 }
1188 r = evergreen_cs_track_check(p);
1189 if (r) {
1190 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1191 return r;
1192 }
1193 break;
1194 case PACKET3_DISPATCH_INDIRECT:
1195 if (pkt->count != 1) {
1196 DRM_ERROR("bad DISPATCH_INDIRECT\n");
1197 return -EINVAL;
1198 }
1199 r = evergreen_cs_packet_next_reloc(p, &reloc);
1200 if (r) {
1201 DRM_ERROR("bad DISPATCH_INDIRECT\n");
1202 return -EINVAL;
1203 }
1204 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1205 r = evergreen_cs_track_check(p);
1206 if (r) {
1207 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1208 return r;
1209 }
1210 break;
1156 case PACKET3_WAIT_REG_MEM: 1211 case PACKET3_WAIT_REG_MEM:
1157 if (pkt->count != 5) { 1212 if (pkt->count != 5) {
1158 DRM_ERROR("bad WAIT_REG_MEM\n"); 1213 DRM_ERROR("bad WAIT_REG_MEM\n");
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index b7b2714f0b32..7363d9dec909 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -351,6 +351,7 @@
351#define COLOR_BUFFER_SIZE(x) ((x) << 0) 351#define COLOR_BUFFER_SIZE(x) ((x) << 0)
352#define POSITION_BUFFER_SIZE(x) ((x) << 8) 352#define POSITION_BUFFER_SIZE(x) ((x) << 8)
353#define SMX_BUFFER_SIZE(x) ((x) << 16) 353#define SMX_BUFFER_SIZE(x) ((x) << 16)
354#define SX_MEMORY_EXPORT_BASE 0x9010
354#define SX_MISC 0x28350 355#define SX_MISC 0x28350
355 356
356#define CB_PERF_CTR0_SEL_0 0x9A20 357#define CB_PERF_CTR0_SEL_0 0x9A20
@@ -1122,6 +1123,7 @@
1122#define CAYMAN_PA_SC_AA_CONFIG 0x28BE0 1123#define CAYMAN_PA_SC_AA_CONFIG 0x28BE0
1123#define CAYMAN_MSAA_NUM_SAMPLES_SHIFT 0 1124#define CAYMAN_MSAA_NUM_SAMPLES_SHIFT 0
1124#define CAYMAN_MSAA_NUM_SAMPLES_MASK 0x7 1125#define CAYMAN_MSAA_NUM_SAMPLES_MASK 0x7
1126#define CAYMAN_SX_SCATTER_EXPORT_BASE 0x28358
1125/* cayman packet3 addition */ 1127/* cayman packet3 addition */
1126#define CAYMAN_PACKET3_DEALLOC_STATE 0x14 1128#define CAYMAN_PACKET3_DEALLOC_STATE 0x14
1127 1129
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 559dbd412906..44c4750f4518 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -833,6 +833,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
833 rdev->config.cayman.tile_config |= 833 rdev->config.cayman.tile_config |=
834 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; 834 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
835 835
836 rdev->config.cayman.backend_map = gb_backend_map;
836 WREG32(GB_BACKEND_MAP, gb_backend_map); 837 WREG32(GB_BACKEND_MAP, gb_backend_map);
837 WREG32(GB_ADDR_CONFIG, gb_addr_config); 838 WREG32(GB_ADDR_CONFIG, gb_addr_config);
838 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 839 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index bc54b26cb32f..aa5571b73aa0 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1662,6 +1662,7 @@ void r600_gpu_init(struct radeon_device *rdev)
1662 R6XX_MAX_BACKENDS_MASK) >> 16)), 1662 R6XX_MAX_BACKENDS_MASK) >> 16)),
1663 (cc_rb_backend_disable >> 16)); 1663 (cc_rb_backend_disable >> 16));
1664 rdev->config.r600.tile_config = tiling_config; 1664 rdev->config.r600.tile_config = tiling_config;
1665 rdev->config.r600.backend_map = backend_map;
1665 tiling_config |= BACKEND_MAP(backend_map); 1666 tiling_config |= BACKEND_MAP(backend_map);
1666 WREG32(GB_TILING_CONFIG, tiling_config); 1667 WREG32(GB_TILING_CONFIG, tiling_config);
1667 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); 1668 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
@@ -2212,9 +2213,6 @@ int r600_cp_resume(struct radeon_device *rdev)
2212 2213
2213 /* set the wb address whether it's enabled or not */ 2214 /* set the wb address whether it's enabled or not */
2214 WREG32(CP_RB_RPTR_ADDR, 2215 WREG32(CP_RB_RPTR_ADDR,
2215#ifdef __BIG_ENDIAN
2216 RB_RPTR_SWAP(2) |
2217#endif
2218 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); 2216 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2219 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 2217 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2220 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 2218 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
@@ -2994,10 +2992,6 @@ int r600_irq_init(struct radeon_device *rdev)
2994 /* RPTR_REARM only works if msi's are enabled */ 2992 /* RPTR_REARM only works if msi's are enabled */
2995 if (rdev->msi_enabled) 2993 if (rdev->msi_enabled)
2996 ih_cntl |= RPTR_REARM; 2994 ih_cntl |= RPTR_REARM;
2997
2998#ifdef __BIG_ENDIAN
2999 ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
3000#endif
3001 WREG32(IH_CNTL, ih_cntl); 2995 WREG32(IH_CNTL, ih_cntl);
3002 2996
3003 /* force the active interrupt state to all disabled */ 2997 /* force the active interrupt state to all disabled */
@@ -3308,6 +3302,10 @@ int r600_irq_process(struct radeon_device *rdev)
3308 if (!rdev->ih.enabled || rdev->shutdown) 3302 if (!rdev->ih.enabled || rdev->shutdown)
3309 return IRQ_NONE; 3303 return IRQ_NONE;
3310 3304
3305 /* No MSIs, need a dummy read to flush PCI DMAs */
3306 if (!rdev->msi_enabled)
3307 RREG32(IH_RB_WPTR);
3308
3311 wptr = r600_get_ih_wptr(rdev); 3309 wptr = r600_get_ih_wptr(rdev);
3312 rptr = rdev->ih.rptr; 3310 rptr = rdev->ih.rptr;
3313 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 3311 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
@@ -3320,6 +3318,9 @@ int r600_irq_process(struct radeon_device *rdev)
3320 } 3318 }
3321 3319
3322restart_ih: 3320restart_ih:
3321 /* Order reading of wptr vs. reading of IH ring data */
3322 rmb();
3323
3323 /* display interrupts */ 3324 /* display interrupts */
3324 r600_irq_ack(rdev); 3325 r600_irq_ack(rdev);
3325 3326
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index c3ab959bdc7c..45fd592f9606 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -1802,8 +1802,8 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
1802 /* Set ring buffer size */ 1802 /* Set ring buffer size */
1803#ifdef __BIG_ENDIAN 1803#ifdef __BIG_ENDIAN
1804 RADEON_WRITE(R600_CP_RB_CNTL, 1804 RADEON_WRITE(R600_CP_RB_CNTL,
1805 RADEON_BUF_SWAP_32BIT | 1805 R600_BUF_SWAP_32BIT |
1806 RADEON_RB_NO_UPDATE | 1806 R600_RB_NO_UPDATE |
1807 (dev_priv->ring.rptr_update_l2qw << 8) | 1807 (dev_priv->ring.rptr_update_l2qw << 8) |
1808 dev_priv->ring.size_l2qw); 1808 dev_priv->ring.size_l2qw);
1809#else 1809#else
@@ -1820,15 +1820,15 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
1820 1820
1821#ifdef __BIG_ENDIAN 1821#ifdef __BIG_ENDIAN
1822 RADEON_WRITE(R600_CP_RB_CNTL, 1822 RADEON_WRITE(R600_CP_RB_CNTL,
1823 RADEON_BUF_SWAP_32BIT | 1823 R600_BUF_SWAP_32BIT |
1824 RADEON_RB_NO_UPDATE | 1824 R600_RB_NO_UPDATE |
1825 RADEON_RB_RPTR_WR_ENA | 1825 R600_RB_RPTR_WR_ENA |
1826 (dev_priv->ring.rptr_update_l2qw << 8) | 1826 (dev_priv->ring.rptr_update_l2qw << 8) |
1827 dev_priv->ring.size_l2qw); 1827 dev_priv->ring.size_l2qw);
1828#else 1828#else
1829 RADEON_WRITE(R600_CP_RB_CNTL, 1829 RADEON_WRITE(R600_CP_RB_CNTL,
1830 RADEON_RB_NO_UPDATE | 1830 R600_RB_NO_UPDATE |
1831 RADEON_RB_RPTR_WR_ENA | 1831 R600_RB_RPTR_WR_ENA |
1832 (dev_priv->ring.rptr_update_l2qw << 8) | 1832 (dev_priv->ring.rptr_update_l2qw << 8) |
1833 dev_priv->ring.size_l2qw); 1833 dev_priv->ring.size_l2qw);
1834#endif 1834#endif
@@ -1851,13 +1851,8 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
1851 - ((unsigned long) dev->sg->virtual) 1851 - ((unsigned long) dev->sg->virtual)
1852 + dev_priv->gart_vm_start; 1852 + dev_priv->gart_vm_start;
1853 } 1853 }
1854 RADEON_WRITE(R600_CP_RB_RPTR_ADDR, 1854 RADEON_WRITE(R600_CP_RB_RPTR_ADDR, (rptr_addr & 0xfffffffc));
1855#ifdef __BIG_ENDIAN 1855 RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI, upper_32_bits(rptr_addr));
1856 (2 << 0) |
1857#endif
1858 (rptr_addr & 0xfffffffc));
1859 RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI,
1860 upper_32_bits(rptr_addr));
1861 1856
1862#ifdef __BIG_ENDIAN 1857#ifdef __BIG_ENDIAN
1863 RADEON_WRITE(R600_CP_RB_CNTL, 1858 RADEON_WRITE(R600_CP_RB_CNTL,
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 909bda8dd550..db8ef1905d5f 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -1200,6 +1200,15 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
1200 } 1200 }
1201 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1201 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1202 break; 1202 break;
1203 case SX_MEMORY_EXPORT_BASE:
1204 r = r600_cs_packet_next_reloc(p, &reloc);
1205 if (r) {
1206 dev_warn(p->dev, "bad SET_CONFIG_REG "
1207 "0x%04X\n", reg);
1208 return -EINVAL;
1209 }
1210 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1211 break;
1203 default: 1212 default:
1204 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1213 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1205 return -EINVAL; 1214 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index ef0e0e016914..32807baf55e2 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -60,7 +60,7 @@
60 * are considered as fatal) 60 * are considered as fatal)
61 */ 61 */
62 62
63#include <asm/atomic.h> 63#include <linux/atomic.h>
64#include <linux/wait.h> 64#include <linux/wait.h>
65#include <linux/list.h> 65#include <linux/list.h>
66#include <linux/kref.h> 66#include <linux/kref.h>
@@ -1003,6 +1003,7 @@ struct r600_asic {
1003 unsigned tiling_npipes; 1003 unsigned tiling_npipes;
1004 unsigned tiling_group_size; 1004 unsigned tiling_group_size;
1005 unsigned tile_config; 1005 unsigned tile_config;
1006 unsigned backend_map;
1006 struct r100_gpu_lockup lockup; 1007 struct r100_gpu_lockup lockup;
1007}; 1008};
1008 1009
@@ -1028,6 +1029,7 @@ struct rv770_asic {
1028 unsigned tiling_npipes; 1029 unsigned tiling_npipes;
1029 unsigned tiling_group_size; 1030 unsigned tiling_group_size;
1030 unsigned tile_config; 1031 unsigned tile_config;
1032 unsigned backend_map;
1031 struct r100_gpu_lockup lockup; 1033 struct r100_gpu_lockup lockup;
1032}; 1034};
1033 1035
@@ -1054,6 +1056,7 @@ struct evergreen_asic {
1054 unsigned tiling_npipes; 1056 unsigned tiling_npipes;
1055 unsigned tiling_group_size; 1057 unsigned tiling_group_size;
1056 unsigned tile_config; 1058 unsigned tile_config;
1059 unsigned backend_map;
1057 struct r100_gpu_lockup lockup; 1060 struct r100_gpu_lockup lockup;
1058}; 1061};
1059 1062
@@ -1174,7 +1177,7 @@ struct radeon_device {
1174 /* Register mmio */ 1177 /* Register mmio */
1175 resource_size_t rmmio_base; 1178 resource_size_t rmmio_base;
1176 resource_size_t rmmio_size; 1179 resource_size_t rmmio_size;
1177 void *rmmio; 1180 void __iomem *rmmio;
1178 radeon_rreg_t mc_rreg; 1181 radeon_rreg_t mc_rreg;
1179 radeon_wreg_t mc_wreg; 1182 radeon_wreg_t mc_wreg;
1180 radeon_rreg_t pll_rreg; 1183 radeon_rreg_t pll_rreg;
@@ -1251,20 +1254,20 @@ int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
1251static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) 1254static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
1252{ 1255{
1253 if (reg < rdev->rmmio_size) 1256 if (reg < rdev->rmmio_size)
1254 return readl(((void __iomem *)rdev->rmmio) + reg); 1257 return readl((rdev->rmmio) + reg);
1255 else { 1258 else {
1256 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 1259 writel(reg, (rdev->rmmio) + RADEON_MM_INDEX);
1257 return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); 1260 return readl((rdev->rmmio) + RADEON_MM_DATA);
1258 } 1261 }
1259} 1262}
1260 1263
1261static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 1264static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1262{ 1265{
1263 if (reg < rdev->rmmio_size) 1266 if (reg < rdev->rmmio_size)
1264 writel(v, ((void __iomem *)rdev->rmmio) + reg); 1267 writel(v, (rdev->rmmio) + reg);
1265 else { 1268 else {
1266 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 1269 writel(reg, (rdev->rmmio) + RADEON_MM_INDEX);
1267 writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); 1270 writel(v, (rdev->rmmio) + RADEON_MM_DATA);
1268 } 1271 }
1269} 1272}
1270 1273
@@ -1296,10 +1299,10 @@ static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1296/* 1299/*
1297 * Registers read & write functions. 1300 * Registers read & write functions.
1298 */ 1301 */
1299#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg)) 1302#define RREG8(reg) readb((rdev->rmmio) + (reg))
1300#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg)) 1303#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
1301#define RREG16(reg) readw(((void __iomem *)rdev->rmmio) + (reg)) 1304#define RREG16(reg) readw((rdev->rmmio) + (reg))
1302#define WREG16(reg, v) writew(v, ((void __iomem *)rdev->rmmio) + (reg)) 1305#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
1303#define RREG32(reg) r100_mm_rreg(rdev, (reg)) 1306#define RREG32(reg) r100_mm_rreg(rdev, (reg))
1304#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg))) 1307#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
1305#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v)) 1308#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index b2449629537d..df8218bb83a6 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -625,7 +625,7 @@ static struct radeon_asic r600_asic = {
625 .fence_ring_emit = &r600_fence_ring_emit, 625 .fence_ring_emit = &r600_fence_ring_emit,
626 .cs_parse = &r600_cs_parse, 626 .cs_parse = &r600_cs_parse,
627 .copy_blit = &r600_copy_blit, 627 .copy_blit = &r600_copy_blit,
628 .copy_dma = &r600_copy_blit, 628 .copy_dma = NULL,
629 .copy = &r600_copy_blit, 629 .copy = &r600_copy_blit,
630 .get_engine_clock = &radeon_atom_get_engine_clock, 630 .get_engine_clock = &radeon_atom_get_engine_clock,
631 .set_engine_clock = &radeon_atom_set_engine_clock, 631 .set_engine_clock = &radeon_atom_set_engine_clock,
@@ -672,7 +672,7 @@ static struct radeon_asic rs780_asic = {
672 .fence_ring_emit = &r600_fence_ring_emit, 672 .fence_ring_emit = &r600_fence_ring_emit,
673 .cs_parse = &r600_cs_parse, 673 .cs_parse = &r600_cs_parse,
674 .copy_blit = &r600_copy_blit, 674 .copy_blit = &r600_copy_blit,
675 .copy_dma = &r600_copy_blit, 675 .copy_dma = NULL,
676 .copy = &r600_copy_blit, 676 .copy = &r600_copy_blit,
677 .get_engine_clock = &radeon_atom_get_engine_clock, 677 .get_engine_clock = &radeon_atom_get_engine_clock,
678 .set_engine_clock = &radeon_atom_set_engine_clock, 678 .set_engine_clock = &radeon_atom_set_engine_clock,
@@ -719,7 +719,7 @@ static struct radeon_asic rv770_asic = {
719 .fence_ring_emit = &r600_fence_ring_emit, 719 .fence_ring_emit = &r600_fence_ring_emit,
720 .cs_parse = &r600_cs_parse, 720 .cs_parse = &r600_cs_parse,
721 .copy_blit = &r600_copy_blit, 721 .copy_blit = &r600_copy_blit,
722 .copy_dma = &r600_copy_blit, 722 .copy_dma = NULL,
723 .copy = &r600_copy_blit, 723 .copy = &r600_copy_blit,
724 .get_engine_clock = &radeon_atom_get_engine_clock, 724 .get_engine_clock = &radeon_atom_get_engine_clock,
725 .set_engine_clock = &radeon_atom_set_engine_clock, 725 .set_engine_clock = &radeon_atom_set_engine_clock,
@@ -766,7 +766,7 @@ static struct radeon_asic evergreen_asic = {
766 .fence_ring_emit = &r600_fence_ring_emit, 766 .fence_ring_emit = &r600_fence_ring_emit,
767 .cs_parse = &evergreen_cs_parse, 767 .cs_parse = &evergreen_cs_parse,
768 .copy_blit = &evergreen_copy_blit, 768 .copy_blit = &evergreen_copy_blit,
769 .copy_dma = &evergreen_copy_blit, 769 .copy_dma = NULL,
770 .copy = &evergreen_copy_blit, 770 .copy = &evergreen_copy_blit,
771 .get_engine_clock = &radeon_atom_get_engine_clock, 771 .get_engine_clock = &radeon_atom_get_engine_clock,
772 .set_engine_clock = &radeon_atom_set_engine_clock, 772 .set_engine_clock = &radeon_atom_set_engine_clock,
@@ -813,7 +813,7 @@ static struct radeon_asic sumo_asic = {
813 .fence_ring_emit = &r600_fence_ring_emit, 813 .fence_ring_emit = &r600_fence_ring_emit,
814 .cs_parse = &evergreen_cs_parse, 814 .cs_parse = &evergreen_cs_parse,
815 .copy_blit = &evergreen_copy_blit, 815 .copy_blit = &evergreen_copy_blit,
816 .copy_dma = &evergreen_copy_blit, 816 .copy_dma = NULL,
817 .copy = &evergreen_copy_blit, 817 .copy = &evergreen_copy_blit,
818 .get_engine_clock = &radeon_atom_get_engine_clock, 818 .get_engine_clock = &radeon_atom_get_engine_clock,
819 .set_engine_clock = &radeon_atom_set_engine_clock, 819 .set_engine_clock = &radeon_atom_set_engine_clock,
@@ -860,7 +860,7 @@ static struct radeon_asic btc_asic = {
860 .fence_ring_emit = &r600_fence_ring_emit, 860 .fence_ring_emit = &r600_fence_ring_emit,
861 .cs_parse = &evergreen_cs_parse, 861 .cs_parse = &evergreen_cs_parse,
862 .copy_blit = &evergreen_copy_blit, 862 .copy_blit = &evergreen_copy_blit,
863 .copy_dma = &evergreen_copy_blit, 863 .copy_dma = NULL,
864 .copy = &evergreen_copy_blit, 864 .copy = &evergreen_copy_blit,
865 .get_engine_clock = &radeon_atom_get_engine_clock, 865 .get_engine_clock = &radeon_atom_get_engine_clock,
866 .set_engine_clock = &radeon_atom_set_engine_clock, 866 .set_engine_clock = &radeon_atom_set_engine_clock,
@@ -907,7 +907,7 @@ static struct radeon_asic cayman_asic = {
907 .fence_ring_emit = &r600_fence_ring_emit, 907 .fence_ring_emit = &r600_fence_ring_emit,
908 .cs_parse = &evergreen_cs_parse, 908 .cs_parse = &evergreen_cs_parse,
909 .copy_blit = &evergreen_copy_blit, 909 .copy_blit = &evergreen_copy_blit,
910 .copy_dma = &evergreen_copy_blit, 910 .copy_dma = NULL,
911 .copy = &evergreen_copy_blit, 911 .copy = &evergreen_copy_blit,
912 .get_engine_clock = &radeon_atom_get_engine_clock, 912 .get_engine_clock = &radeon_atom_get_engine_clock,
913 .set_engine_clock = &radeon_atom_set_engine_clock, 913 .set_engine_clock = &radeon_atom_set_engine_clock,
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 2d48e7a1474b..dcd0863e31ae 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -96,7 +96,7 @@ uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
96 * Read XTAL (ref clock), SCLK and MCLK from Open Firmware device 96 * Read XTAL (ref clock), SCLK and MCLK from Open Firmware device
97 * tree. Hopefully, ATI OF driver is kind enough to fill these 97 * tree. Hopefully, ATI OF driver is kind enough to fill these
98 */ 98 */
99static bool __devinit radeon_read_clocks_OF(struct drm_device *dev) 99static bool radeon_read_clocks_OF(struct drm_device *dev)
100{ 100{
101 struct radeon_device *rdev = dev->dev_private; 101 struct radeon_device *rdev = dev->dev_private;
102 struct device_node *dp = rdev->pdev->dev.of_node; 102 struct device_node *dp = rdev->pdev->dev.of_node;
@@ -166,7 +166,7 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
166 return true; 166 return true;
167} 167}
168#else 168#else
169static bool __devinit radeon_read_clocks_OF(struct drm_device *dev) 169static bool radeon_read_clocks_OF(struct drm_device *dev)
170{ 170{
171 return false; 171 return false;
172} 172}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index e4594676a07c..a74217cd192f 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -779,7 +779,8 @@ void radeon_combios_i2c_init(struct radeon_device *rdev)
779 } 779 }
780 } 780 }
781 } 781 }
782 } else if (rdev->family >= CHIP_R200) { 782 } else if ((rdev->family == CHIP_R200) ||
783 (rdev->family >= CHIP_R300)) {
783 /* 0x68 */ 784 /* 0x68 */
784 i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); 785 i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
785 rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); 786 rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 75867792a4e2..045ec59478f9 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2115,7 +2115,7 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
2115 2115
2116 if (drm_pci_device_is_agp(dev)) 2116 if (drm_pci_device_is_agp(dev))
2117 dev_priv->flags |= RADEON_IS_AGP; 2117 dev_priv->flags |= RADEON_IS_AGP;
2118 else if (drm_pci_device_is_pcie(dev)) 2118 else if (pci_is_pcie(dev->pdev))
2119 dev_priv->flags |= RADEON_IS_PCIE; 2119 dev_priv->flags |= RADEON_IS_PCIE;
2120 else 2120 else
2121 dev_priv->flags |= RADEON_IS_PCI; 2121 dev_priv->flags |= RADEON_IS_PCI;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 292f73f0ddbd..28f4655905bc 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -282,7 +282,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
282 spin_lock_irqsave(&rdev->ddev->event_lock, flags); 282 spin_lock_irqsave(&rdev->ddev->event_lock, flags);
283 work = radeon_crtc->unpin_work; 283 work = radeon_crtc->unpin_work;
284 if (work == NULL || 284 if (work == NULL ||
285 !radeon_fence_signaled(work->fence)) { 285 (work->fence && !radeon_fence_signaled(work->fence))) {
286 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 286 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
287 return; 287 return;
288 } 288 }
@@ -348,7 +348,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
348 struct radeon_framebuffer *new_radeon_fb; 348 struct radeon_framebuffer *new_radeon_fb;
349 struct drm_gem_object *obj; 349 struct drm_gem_object *obj;
350 struct radeon_bo *rbo; 350 struct radeon_bo *rbo;
351 struct radeon_fence *fence;
352 struct radeon_unpin_work *work; 351 struct radeon_unpin_work *work;
353 unsigned long flags; 352 unsigned long flags;
354 u32 tiling_flags, pitch_pixels; 353 u32 tiling_flags, pitch_pixels;
@@ -359,16 +358,9 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
359 if (work == NULL) 358 if (work == NULL)
360 return -ENOMEM; 359 return -ENOMEM;
361 360
362 r = radeon_fence_create(rdev, &fence);
363 if (unlikely(r != 0)) {
364 kfree(work);
365 DRM_ERROR("flip queue: failed to create fence.\n");
366 return -ENOMEM;
367 }
368 work->event = event; 361 work->event = event;
369 work->rdev = rdev; 362 work->rdev = rdev;
370 work->crtc_id = radeon_crtc->crtc_id; 363 work->crtc_id = radeon_crtc->crtc_id;
371 work->fence = radeon_fence_ref(fence);
372 old_radeon_fb = to_radeon_framebuffer(crtc->fb); 364 old_radeon_fb = to_radeon_framebuffer(crtc->fb);
373 new_radeon_fb = to_radeon_framebuffer(fb); 365 new_radeon_fb = to_radeon_framebuffer(fb);
374 /* schedule unpin of the old buffer */ 366 /* schedule unpin of the old buffer */
@@ -377,6 +369,10 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
377 drm_gem_object_reference(obj); 369 drm_gem_object_reference(obj);
378 rbo = gem_to_radeon_bo(obj); 370 rbo = gem_to_radeon_bo(obj);
379 work->old_rbo = rbo; 371 work->old_rbo = rbo;
372 obj = new_radeon_fb->obj;
373 rbo = gem_to_radeon_bo(obj);
374 if (rbo->tbo.sync_obj)
375 work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
380 INIT_WORK(&work->work, radeon_unpin_work_func); 376 INIT_WORK(&work->work, radeon_unpin_work_func);
381 377
382 /* We borrow the event spin lock for protecting unpin_work */ 378 /* We borrow the event spin lock for protecting unpin_work */
@@ -391,9 +387,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
391 spin_unlock_irqrestore(&dev->event_lock, flags); 387 spin_unlock_irqrestore(&dev->event_lock, flags);
392 388
393 /* pin the new buffer */ 389 /* pin the new buffer */
394 obj = new_radeon_fb->obj;
395 rbo = gem_to_radeon_bo(obj);
396
397 DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", 390 DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
398 work->old_rbo, rbo); 391 work->old_rbo, rbo);
399 392
@@ -461,37 +454,18 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
461 goto pflip_cleanup1; 454 goto pflip_cleanup1;
462 } 455 }
463 456
464 /* 32 ought to cover us */
465 r = radeon_ring_lock(rdev, 32);
466 if (r) {
467 DRM_ERROR("failed to lock the ring before flip\n");
468 goto pflip_cleanup2;
469 }
470
471 /* emit the fence */
472 radeon_fence_emit(rdev, fence);
473 /* set the proper interrupt */ 457 /* set the proper interrupt */
474 radeon_pre_page_flip(rdev, radeon_crtc->crtc_id); 458 radeon_pre_page_flip(rdev, radeon_crtc->crtc_id);
475 /* fire the ring */
476 radeon_ring_unlock_commit(rdev);
477 459
478 return 0; 460 return 0;
479 461
480pflip_cleanup2:
481 drm_vblank_put(dev, radeon_crtc->crtc_id);
482
483pflip_cleanup1: 462pflip_cleanup1:
484 r = radeon_bo_reserve(rbo, false); 463 if (unlikely(radeon_bo_reserve(rbo, false) != 0)) {
485 if (unlikely(r != 0)) {
486 DRM_ERROR("failed to reserve new rbo in error path\n"); 464 DRM_ERROR("failed to reserve new rbo in error path\n");
487 goto pflip_cleanup; 465 goto pflip_cleanup;
488 } 466 }
489 r = radeon_bo_unpin(rbo); 467 if (unlikely(radeon_bo_unpin(rbo) != 0)) {
490 if (unlikely(r != 0)) {
491 radeon_bo_unreserve(rbo);
492 r = -EINVAL;
493 DRM_ERROR("failed to unpin new rbo in error path\n"); 468 DRM_ERROR("failed to unpin new rbo in error path\n");
494 goto pflip_cleanup;
495 } 469 }
496 radeon_bo_unreserve(rbo); 470 radeon_bo_unreserve(rbo);
497 471
@@ -501,7 +475,7 @@ pflip_cleanup:
501unlock_free: 475unlock_free:
502 drm_gem_object_unreference_unlocked(old_radeon_fb->obj); 476 drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
503 spin_unlock_irqrestore(&dev->event_lock, flags); 477 spin_unlock_irqrestore(&dev->event_lock, flags);
504 radeon_fence_unref(&fence); 478 radeon_fence_unref(&work->fence);
505 kfree(work); 479 kfree(work);
506 480
507 return r; 481 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 73dfbe8e5f9e..85f033f19a8a 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -50,10 +50,11 @@
50 * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs 50 * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
51 * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query 51 * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query
52 * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query 52 * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
53 * 2.10.0 - fusion 2D tiling 53 * 2.10.0 - fusion 2D tiling, initial compute support for the CS checker
54 * 2.11.0 - backend map
54 */ 55 */
55#define KMS_DRIVER_MAJOR 2 56#define KMS_DRIVER_MAJOR 2
56#define KMS_DRIVER_MINOR 10 57#define KMS_DRIVER_MINOR 11
57#define KMS_DRIVER_PATCHLEVEL 0 58#define KMS_DRIVER_PATCHLEVEL 0
58int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 59int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
59int radeon_driver_unload_kms(struct drm_device *dev); 60int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 021d2b6b556f..7fd4e3e5ad5f 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -29,7 +29,7 @@
29 * Dave Airlie 29 * Dave Airlie
30 */ 30 */
31#include <linux/seq_file.h> 31#include <linux/seq_file.h>
32#include <asm/atomic.h> 32#include <linux/atomic.h>
33#include <linux/wait.h> 33#include <linux/wait.h>
34#include <linux/list.h> 34#include <linux/list.h>
35#include <linux/kref.h> 35#include <linux/kref.h>
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index bd58af658581..be2c1224e68a 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -60,7 +60,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
60 /* update BUS flag */ 60 /* update BUS flag */
61 if (drm_pci_device_is_agp(dev)) { 61 if (drm_pci_device_is_agp(dev)) {
62 flags |= RADEON_IS_AGP; 62 flags |= RADEON_IS_AGP;
63 } else if (drm_pci_device_is_pcie(dev)) { 63 } else if (pci_is_pcie(dev->pdev)) {
64 flags |= RADEON_IS_PCIE; 64 flags |= RADEON_IS_PCIE;
65 } else { 65 } else {
66 flags |= RADEON_IS_PCI; 66 flags |= RADEON_IS_PCI;
@@ -237,6 +237,19 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
237 case RADEON_INFO_FUSION_GART_WORKING: 237 case RADEON_INFO_FUSION_GART_WORKING:
238 value = 1; 238 value = 1;
239 break; 239 break;
240 case RADEON_INFO_BACKEND_MAP:
241 if (rdev->family >= CHIP_CAYMAN)
242 value = rdev->config.cayman.backend_map;
243 else if (rdev->family >= CHIP_CEDAR)
244 value = rdev->config.evergreen.backend_map;
245 else if (rdev->family >= CHIP_RV770)
246 value = rdev->config.rv770.backend_map;
247 else if (rdev->family >= CHIP_R600)
248 value = rdev->config.r600.backend_map;
249 else {
250 return -EINVAL;
251 }
252 break;
240 default: 253 default:
241 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 254 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
242 return -EINVAL; 255 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index aaa19dc418a0..6fabe89fa6a1 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -594,6 +594,9 @@ int radeon_pm_init(struct radeon_device *rdev)
594 if (rdev->pm.default_vddc) 594 if (rdev->pm.default_vddc)
595 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 595 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
596 SET_VOLTAGE_TYPE_ASIC_VDDC); 596 SET_VOLTAGE_TYPE_ASIC_VDDC);
597 if (rdev->pm.default_vddci)
598 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
599 SET_VOLTAGE_TYPE_ASIC_VDDCI);
597 if (rdev->pm.default_sclk) 600 if (rdev->pm.default_sclk)
598 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 601 radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
599 if (rdev->pm.default_mclk) 602 if (rdev->pm.default_mclk)
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index bc44a3d35ec6..b4ce86455707 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -3295,7 +3295,7 @@
3295# define RADEON_RB_BUFSZ_MASK (0x3f << 0) 3295# define RADEON_RB_BUFSZ_MASK (0x3f << 0)
3296# define RADEON_RB_BLKSZ_SHIFT 8 3296# define RADEON_RB_BLKSZ_SHIFT 8
3297# define RADEON_RB_BLKSZ_MASK (0x3f << 8) 3297# define RADEON_RB_BLKSZ_MASK (0x3f << 8)
3298# define RADEON_BUF_SWAP_32BIT (1 << 17) 3298# define RADEON_BUF_SWAP_32BIT (2 << 16)
3299# define RADEON_MAX_FETCH_SHIFT 18 3299# define RADEON_MAX_FETCH_SHIFT 18
3300# define RADEON_MAX_FETCH_MASK (0x3 << 18) 3300# define RADEON_MAX_FETCH_MASK (0x3 << 18)
3301# define RADEON_RB_NO_UPDATE (1 << 27) 3301# define RADEON_RB_NO_UPDATE (1 << 27)
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
index 0aa8e85a9457..2316977eb924 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -208,6 +208,7 @@ cayman 0x9400
2080x0002834C PA_SC_VPORT_ZMAX_15 2080x0002834C PA_SC_VPORT_ZMAX_15
2090x00028350 SX_MISC 2090x00028350 SX_MISC
2100x00028354 SX_SURFACE_SYNC 2100x00028354 SX_SURFACE_SYNC
2110x0002835C SX_SCATTER_EXPORT_SIZE
2110x00028380 SQ_VTX_SEMANTIC_0 2120x00028380 SQ_VTX_SEMANTIC_0
2120x00028384 SQ_VTX_SEMANTIC_1 2130x00028384 SQ_VTX_SEMANTIC_1
2130x00028388 SQ_VTX_SEMANTIC_2 2140x00028388 SQ_VTX_SEMANTIC_2
@@ -432,6 +433,7 @@ cayman 0x9400
4320x00028700 SPI_STACK_MGMT 4330x00028700 SPI_STACK_MGMT
4330x00028704 SPI_WAVE_MGMT_1 4340x00028704 SPI_WAVE_MGMT_1
4340x00028708 SPI_WAVE_MGMT_2 4350x00028708 SPI_WAVE_MGMT_2
4360x00028720 GDS_ADDR_BASE
4350x00028724 GDS_ADDR_SIZE 4370x00028724 GDS_ADDR_SIZE
4360x00028780 CB_BLEND0_CONTROL 4380x00028780 CB_BLEND0_CONTROL
4370x00028784 CB_BLEND1_CONTROL 4390x00028784 CB_BLEND1_CONTROL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index 0e28cae7ea43..161737a28c23 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -44,6 +44,7 @@ evergreen 0x9400
440x00008E28 SQ_STATIC_THREAD_MGMT_3 440x00008E28 SQ_STATIC_THREAD_MGMT_3
450x00008E2C SQ_LDS_RESOURCE_MGMT 450x00008E2C SQ_LDS_RESOURCE_MGMT
460x00008E48 SQ_EX_ALLOC_TABLE_SLOTS 460x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
470x00009014 SX_MEMORY_EXPORT_SIZE
470x00009100 SPI_CONFIG_CNTL 480x00009100 SPI_CONFIG_CNTL
480x0000913C SPI_CONFIG_CNTL_1 490x0000913C SPI_CONFIG_CNTL_1
490x00009508 TA_CNTL_AUX 500x00009508 TA_CNTL_AUX
@@ -442,7 +443,9 @@ evergreen 0x9400
4420x000286EC SPI_COMPUTE_NUM_THREAD_X 4430x000286EC SPI_COMPUTE_NUM_THREAD_X
4430x000286F0 SPI_COMPUTE_NUM_THREAD_Y 4440x000286F0 SPI_COMPUTE_NUM_THREAD_Y
4440x000286F4 SPI_COMPUTE_NUM_THREAD_Z 4450x000286F4 SPI_COMPUTE_NUM_THREAD_Z
4460x00028720 GDS_ADDR_BASE
4450x00028724 GDS_ADDR_SIZE 4470x00028724 GDS_ADDR_SIZE
4480x00028728 GDS_ORDERED_WAVE_PER_SE
4460x00028780 CB_BLEND0_CONTROL 4490x00028780 CB_BLEND0_CONTROL
4470x00028784 CB_BLEND1_CONTROL 4500x00028784 CB_BLEND1_CONTROL
4480x00028788 CB_BLEND2_CONTROL 4510x00028788 CB_BLEND2_CONTROL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index ea49752ee99c..0380c5c15f80 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -429,6 +429,7 @@ r600 0x9400
4290x00028438 SX_ALPHA_REF 4290x00028438 SX_ALPHA_REF
4300x00028410 SX_ALPHA_TEST_CONTROL 4300x00028410 SX_ALPHA_TEST_CONTROL
4310x00028350 SX_MISC 4310x00028350 SX_MISC
4320x00009014 SX_MEMORY_EXPORT_SIZE
4320x00009604 TC_INVALIDATE 4330x00009604 TC_INVALIDATE
4330x00009400 TD_FILTER4 4340x00009400 TD_FILTER4
4340x00009404 TD_FILTER4_1 4350x00009404 TD_FILTER4_1
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 1f5850e473cc..4b5d0e6974a8 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -530,7 +530,7 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
530 addr = addr & 0xFFFFFFFFFFFFF000ULL; 530 addr = addr & 0xFFFFFFFFFFFFF000ULL;
531 addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED; 531 addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
532 addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE; 532 addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
533 writeq(addr, ((void __iomem *)ptr) + (i * 8)); 533 writeq(addr, ptr + (i * 8));
534 return 0; 534 return 0;
535} 535}
536 536
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 4de51891aa6d..4720d000d440 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -778,6 +778,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
778 (cc_rb_backend_disable >> 16)); 778 (cc_rb_backend_disable >> 16));
779 779
780 rdev->config.rv770.tile_config = gb_tiling_config; 780 rdev->config.rv770.tile_config = gb_tiling_config;
781 rdev->config.rv770.backend_map = backend_map;
781 gb_tiling_config |= BACKEND_MAP(backend_map); 782 gb_tiling_config |= BACKEND_MAP(backend_map);
782 783
783 WREG32(GB_TILING_CONFIG, gb_tiling_config); 784 WREG32(GB_TILING_CONFIG, gb_tiling_config);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 2e618b5ac465..56619f64b6bf 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -37,7 +37,7 @@
37#include <linux/mm.h> 37#include <linux/mm.h>
38#include <linux/file.h> 38#include <linux/file.h>
39#include <linux/module.h> 39#include <linux/module.h>
40#include <asm/atomic.h> 40#include <linux/atomic.h>
41 41
42#define TTM_ASSERT_LOCKED(param) 42#define TTM_ASSERT_LOCKED(param)
43#define TTM_DEBUG(fmt, arg...) 43#define TTM_DEBUG(fmt, arg...)
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index de41e55a944a..075daf44bce4 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -30,7 +30,7 @@
30 30
31#include "ttm/ttm_lock.h" 31#include "ttm/ttm_lock.h"
32#include "ttm/ttm_module.h" 32#include "ttm/ttm_module.h"
33#include <asm/atomic.h> 33#include <linux/atomic.h>
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/wait.h> 35#include <linux/wait.h>
36#include <linux/sched.h> 36#include <linux/sched.h>
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index ebddd443d91a..93577f2e2954 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -55,7 +55,7 @@
55#include <linux/spinlock.h> 55#include <linux/spinlock.h>
56#include <linux/slab.h> 56#include <linux/slab.h>
57#include <linux/module.h> 57#include <linux/module.h>
58#include <asm/atomic.h> 58#include <linux/atomic.h>
59 59
60struct ttm_object_file { 60struct ttm_object_file {
61 struct ttm_object_device *tdev; 61 struct ttm_object_device *tdev;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index d948575717bf..727e93daac3b 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -40,7 +40,7 @@
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/dma-mapping.h> 41#include <linux/dma-mapping.h>
42 42
43#include <asm/atomic.h> 43#include <linux/atomic.h>
44 44
45#include "ttm/ttm_bo_driver.h" 45#include "ttm/ttm_bo_driver.h"
46#include "ttm/ttm_page_alloc.h" 46#include "ttm/ttm_page_alloc.h"
@@ -355,7 +355,7 @@ restart:
355 if (nr_free) 355 if (nr_free)
356 goto restart; 356 goto restart;
357 357
358 /* Not allowed to fall tough or break because 358 /* Not allowed to fall through or break because
359 * following context is inside spinlock while we are 359 * following context is inside spinlock while we are
360 * outside here. 360 * outside here.
361 */ 361 */
@@ -556,7 +556,7 @@ out:
556} 556}
557 557
558/** 558/**
559 * Fill the given pool if there isn't enough pages and requested number of 559 * Fill the given pool if there aren't enough pages and the requested number of
560 * pages is small. 560 * pages is small.
561 */ 561 */
562static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, 562static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
@@ -576,8 +576,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
576 576
577 pool->fill_lock = true; 577 pool->fill_lock = true;
578 578
579 /* If allocation request is small and there is not enough 579 /* If allocation request is small and there are not enough
580 * pages in pool we fill the pool first */ 580 * pages in a pool we fill the pool up first. */
581 if (count < _manager->options.small 581 if (count < _manager->options.small
582 && count > pool->npages) { 582 && count > pool->npages) {
583 struct list_head new_pages; 583 struct list_head new_pages;
@@ -614,9 +614,9 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
614} 614}
615 615
616/** 616/**
617 * Cut count nubmer of pages from the pool and put them to return list 617 * Cut 'count' number of pages from the pool and put them on the return list.
618 * 618 *
619 * @return count of pages still to allocate to fill the request. 619 * @return count of pages still required to fulfill the request.
620 */ 620 */
621static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, 621static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
622 struct list_head *pages, int ttm_flags, 622 struct list_head *pages, int ttm_flags,
@@ -637,7 +637,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
637 goto out; 637 goto out;
638 } 638 }
639 /* find the last pages to include for requested number of pages. Split 639 /* find the last pages to include for requested number of pages. Split
640 * pool to begin and halves to reduce search space. */ 640 * pool to begin and halve it to reduce search space. */
641 if (count <= pool->npages/2) { 641 if (count <= pool->npages/2) {
642 i = 0; 642 i = 0;
643 list_for_each(p, &pool->list) { 643 list_for_each(p, &pool->list) {
@@ -651,7 +651,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
651 break; 651 break;
652 } 652 }
653 } 653 }
654 /* Cut count number of pages from pool */ 654 /* Cut 'count' number of pages from the pool */
655 list_cut_position(pages, &pool->list, p); 655 list_cut_position(pages, &pool->list, p);
656 pool->npages -= count; 656 pool->npages -= count;
657 count = 0; 657 count = 0;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 0598cd22edf2..0b62c3c6b7ce 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -623,7 +623,7 @@ config SENSORS_LM90
623 LM86, LM89 and LM99, Analog Devices ADM1032, ADT7461, and ADT7461A, 623 LM86, LM89 and LM99, Analog Devices ADM1032, ADT7461, and ADT7461A,
624 Maxim MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659, 624 Maxim MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659,
625 MAX6680, MAX6681, MAX6692, MAX6695, MAX6696, ON Semiconductor NCT1008, 625 MAX6680, MAX6681, MAX6692, MAX6695, MAX6696, ON Semiconductor NCT1008,
626 and Winbond/Nuvoton W83L771W/G/AWG/ASG sensor chips. 626 Winbond/Nuvoton W83L771W/G/AWG/ASG and Philips SA56004 sensor chips.
627 627
628 This driver can also be built as a module. If so, the module 628 This driver can also be built as a module. If so, the module
629 will be called lm90. 629 will be called lm90.
@@ -694,14 +694,24 @@ config SENSORS_LTC4261
694 be called ltc4261. 694 be called ltc4261.
695 695
696config SENSORS_LM95241 696config SENSORS_LM95241
697 tristate "National Semiconductor LM95241 sensor chip" 697 tristate "National Semiconductor LM95241 and compatibles"
698 depends on I2C 698 depends on I2C
699 help 699 help
700 If you say yes here you get support for LM95241 sensor chip. 700 If you say yes here you get support for LM95231 and LM95241 sensor
701 chips.
701 702
702 This driver can also be built as a module. If so, the module 703 This driver can also be built as a module. If so, the module
703 will be called lm95241. 704 will be called lm95241.
704 705
706config SENSORS_LM95245
707 tristate "National Semiconductor LM95245 sensor chip"
708 depends on I2C && EXPERIMENTAL
709 help
710 If you say yes here you get support for LM95245 sensor chip.
711
712 This driver can also be built as a module. If so, the module
713 will be called lm95245.
714
705config SENSORS_MAX1111 715config SENSORS_MAX1111
706 tristate "Maxim MAX1111 Multichannel, Serial 8-bit ADC chip" 716 tristate "Maxim MAX1111 Multichannel, Serial 8-bit ADC chip"
707 depends on SPI_MASTER 717 depends on SPI_MASTER
@@ -736,6 +746,16 @@ config SENSORS_MAX1619
736 This driver can also be built as a module. If so, the module 746 This driver can also be built as a module. If so, the module
737 will be called max1619. 747 will be called max1619.
738 748
749config SENSORS_MAX1668
750 tristate "Maxim MAX1668 and compatibles"
751 depends on I2C && EXPERIMENTAL
752 help
753 If you say yes here you get support for MAX1668, MAX1989 and
754 MAX1805 chips.
755
756 This driver can also be built as a module. If so, the module
757 will be called max1668.
758
739config SENSORS_MAX6639 759config SENSORS_MAX6639
740 tristate "Maxim MAX6639 sensor chip" 760 tristate "Maxim MAX6639 sensor chip"
741 depends on I2C && EXPERIMENTAL 761 depends on I2C && EXPERIMENTAL
@@ -767,6 +787,20 @@ config SENSORS_MAX6650
767 This driver can also be built as a module. If so, the module 787 This driver can also be built as a module. If so, the module
768 will be called max6650. 788 will be called max6650.
769 789
790config SENSORS_NTC_THERMISTOR
791 tristate "NTC thermistor support"
792 depends on EXPERIMENTAL
793 help
794 This driver supports NTC thermistors sensor reading and its
795 interpretation. The driver can also monitor the temperature and
796 send notifications about the temperature.
797
798 Currently, this driver supports
799 NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, and NCP15WL333.
800
801 This driver can also be built as a module. If so, the module
802 will be called ntc-thermistor.
803
770config SENSORS_PC87360 804config SENSORS_PC87360
771 tristate "National Semiconductor PC87360 family" 805 tristate "National Semiconductor PC87360 family"
772 select HWMON_VID 806 select HWMON_VID
@@ -807,92 +841,7 @@ config SENSORS_PCF8591
807 These devices are hard to detect and rarely found on mainstream 841 These devices are hard to detect and rarely found on mainstream
808 hardware. If unsure, say N. 842 hardware. If unsure, say N.
809 843
810config PMBUS 844source drivers/hwmon/pmbus/Kconfig
811 tristate "PMBus support"
812 depends on I2C && EXPERIMENTAL
813 default n
814 help
815 Say yes here if you want to enable PMBus support.
816
817 This driver can also be built as a module. If so, the module will
818 be called pmbus_core.
819
820if PMBUS
821
822config SENSORS_PMBUS
823 tristate "Generic PMBus devices"
824 default n
825 help
826 If you say yes here you get hardware monitoring support for generic
827 PMBus devices, including but not limited to BMR450, BMR451, BMR453,
828 BMR454, and LTC2978.
829
830 This driver can also be built as a module. If so, the module will
831 be called pmbus.
832
833config SENSORS_ADM1275
834 tristate "Analog Devices ADM1275"
835 default n
836 help
837 If you say yes here you get hardware monitoring support for Analog
838 Devices ADM1275 Hot-Swap Controller and Digital Power Monitor.
839
840 This driver can also be built as a module. If so, the module will
841 be called adm1275.
842
843config SENSORS_MAX16064
844 tristate "Maxim MAX16064"
845 default n
846 help
847 If you say yes here you get hardware monitoring support for Maxim
848 MAX16064.
849
850 This driver can also be built as a module. If so, the module will
851 be called max16064.
852
853config SENSORS_MAX34440
854 tristate "Maxim MAX34440/MAX34441"
855 default n
856 help
857 If you say yes here you get hardware monitoring support for Maxim
858 MAX34440 and MAX34441.
859
860 This driver can also be built as a module. If so, the module will
861 be called max34440.
862
863config SENSORS_MAX8688
864 tristate "Maxim MAX8688"
865 default n
866 help
867 If you say yes here you get hardware monitoring support for Maxim
868 MAX8688.
869
870 This driver can also be built as a module. If so, the module will
871 be called max8688.
872
873config SENSORS_UCD9000
874 tristate "TI UCD90120, UCD90124, UCD9090, UCD90910"
875 default n
876 help
877 If you say yes here you get hardware monitoring support for TI
878 UCD90120, UCD90124, UCD9090, UCD90910 Sequencer and System Health
879 Controllers.
880
881 This driver can also be built as a module. If so, the module will
882 be called ucd9000.
883
884config SENSORS_UCD9200
885 tristate "TI UCD9220, UCD9222, UCD9224, UCD9240, UCD9244, UCD9246, UCD9248"
886 default n
887 help
888 If you say yes here you get hardware monitoring support for TI
889 UCD9220, UCD9222, UCD9224, UCD9240, UCD9244, UCD9246, and UCD9248
890 Digital PWM System Controllers.
891
892 This driver can also be built as a module. If so, the module will
893 be called ucd9200.
894
895endif # PMBUS
896 845
897config SENSORS_SHT15 846config SENSORS_SHT15
898 tristate "Sensiron humidity and temperature sensors. SHT15 and compat." 847 tristate "Sensiron humidity and temperature sensors. SHT15 and compat."
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index d7995a1d0784..3c9ccefea791 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -80,6 +80,7 @@ obj-$(CONFIG_SENSORS_LM90) += lm90.o
80obj-$(CONFIG_SENSORS_LM92) += lm92.o 80obj-$(CONFIG_SENSORS_LM92) += lm92.o
81obj-$(CONFIG_SENSORS_LM93) += lm93.o 81obj-$(CONFIG_SENSORS_LM93) += lm93.o
82obj-$(CONFIG_SENSORS_LM95241) += lm95241.o 82obj-$(CONFIG_SENSORS_LM95241) += lm95241.o
83obj-$(CONFIG_SENSORS_LM95245) += lm95245.o
83obj-$(CONFIG_SENSORS_LTC4151) += ltc4151.o 84obj-$(CONFIG_SENSORS_LTC4151) += ltc4151.o
84obj-$(CONFIG_SENSORS_LTC4215) += ltc4215.o 85obj-$(CONFIG_SENSORS_LTC4215) += ltc4215.o
85obj-$(CONFIG_SENSORS_LTC4245) += ltc4245.o 86obj-$(CONFIG_SENSORS_LTC4245) += ltc4245.o
@@ -87,10 +88,12 @@ obj-$(CONFIG_SENSORS_LTC4261) += ltc4261.o
87obj-$(CONFIG_SENSORS_MAX1111) += max1111.o 88obj-$(CONFIG_SENSORS_MAX1111) += max1111.o
88obj-$(CONFIG_SENSORS_MAX16065) += max16065.o 89obj-$(CONFIG_SENSORS_MAX16065) += max16065.o
89obj-$(CONFIG_SENSORS_MAX1619) += max1619.o 90obj-$(CONFIG_SENSORS_MAX1619) += max1619.o
91obj-$(CONFIG_SENSORS_MAX1668) += max1668.o
90obj-$(CONFIG_SENSORS_MAX6639) += max6639.o 92obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
91obj-$(CONFIG_SENSORS_MAX6642) += max6642.o 93obj-$(CONFIG_SENSORS_MAX6642) += max6642.o
92obj-$(CONFIG_SENSORS_MAX6650) += max6650.o 94obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
93obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o 95obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
96obj-$(CONFIG_SENSORS_NTC_THERMISTOR) += ntc_thermistor.o
94obj-$(CONFIG_SENSORS_PC87360) += pc87360.o 97obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
95obj-$(CONFIG_SENSORS_PC87427) += pc87427.o 98obj-$(CONFIG_SENSORS_PC87427) += pc87427.o
96obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o 99obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
@@ -121,15 +124,7 @@ obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o
121obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o 124obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o
122obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o 125obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o
123 126
124# PMBus drivers 127obj-$(CONFIG_PMBUS) += pmbus/
125obj-$(CONFIG_PMBUS) += pmbus_core.o
126obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
127obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o
128obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
129obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
130obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
131obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o
132obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o
133 128
134ccflags-$(CONFIG_HWMON_DEBUG_CHIP) := -DDEBUG 129ccflags-$(CONFIG_HWMON_DEBUG_CHIP) := -DDEBUG
135 130
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 0070d5476dd0..59d83e83da7f 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -44,7 +44,9 @@
44#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ 44#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
45#define NUM_REAL_CORES 16 /* Number of Real cores per cpu */ 45#define NUM_REAL_CORES 16 /* Number of Real cores per cpu */
46#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */ 46#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
47#define MAX_ATTRS 5 /* Maximum no of per-core attrs */ 47#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
48#define MAX_THRESH_ATTRS 3 /* Maximum no of Threshold attrs */
49#define TOTAL_ATTRS (MAX_CORE_ATTRS + MAX_THRESH_ATTRS)
48#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) 50#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
49 51
50#ifdef CONFIG_SMP 52#ifdef CONFIG_SMP
@@ -67,6 +69,9 @@
67 * This value is passed as "id" field to rdmsr/wrmsr functions. 69 * This value is passed as "id" field to rdmsr/wrmsr functions.
68 * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS, 70 * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS,
69 * from where the temperature values should be read. 71 * from where the temperature values should be read.
72 * @intrpt_reg: One of IA32_THERM_INTERRUPT or IA32_PACKAGE_THERM_INTERRUPT,
73 * from where the thresholds are read.
74 * @attr_size: Total number of pre-core attrs displayed in the sysfs.
70 * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data. 75 * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data.
71 * Otherwise, temp_data holds coretemp data. 76 * Otherwise, temp_data holds coretemp data.
72 * @valid: If this is 1, the current temperature is valid. 77 * @valid: If this is 1, the current temperature is valid.
@@ -74,15 +79,18 @@
74struct temp_data { 79struct temp_data {
75 int temp; 80 int temp;
76 int ttarget; 81 int ttarget;
82 int tmin;
77 int tjmax; 83 int tjmax;
78 unsigned long last_updated; 84 unsigned long last_updated;
79 unsigned int cpu; 85 unsigned int cpu;
80 u32 cpu_core_id; 86 u32 cpu_core_id;
81 u32 status_reg; 87 u32 status_reg;
88 u32 intrpt_reg;
89 int attr_size;
82 bool is_pkg_data; 90 bool is_pkg_data;
83 bool valid; 91 bool valid;
84 struct sensor_device_attribute sd_attrs[MAX_ATTRS]; 92 struct sensor_device_attribute sd_attrs[TOTAL_ATTRS];
85 char attr_name[MAX_ATTRS][CORETEMP_NAME_LENGTH]; 93 char attr_name[TOTAL_ATTRS][CORETEMP_NAME_LENGTH];
86 struct mutex update_lock; 94 struct mutex update_lock;
87}; 95};
88 96
@@ -135,6 +143,19 @@ static ssize_t show_crit_alarm(struct device *dev,
135 return sprintf(buf, "%d\n", (eax >> 5) & 1); 143 return sprintf(buf, "%d\n", (eax >> 5) & 1);
136} 144}
137 145
146static ssize_t show_max_alarm(struct device *dev,
147 struct device_attribute *devattr, char *buf)
148{
149 u32 eax, edx;
150 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
151 struct platform_data *pdata = dev_get_drvdata(dev);
152 struct temp_data *tdata = pdata->core_data[attr->index];
153
154 rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
155
156 return sprintf(buf, "%d\n", !!(eax & THERM_STATUS_THRESHOLD1));
157}
158
138static ssize_t show_tjmax(struct device *dev, 159static ssize_t show_tjmax(struct device *dev,
139 struct device_attribute *devattr, char *buf) 160 struct device_attribute *devattr, char *buf)
140{ 161{
@@ -153,6 +174,83 @@ static ssize_t show_ttarget(struct device *dev,
153 return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget); 174 return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget);
154} 175}
155 176
177static ssize_t store_ttarget(struct device *dev,
178 struct device_attribute *devattr,
179 const char *buf, size_t count)
180{
181 struct platform_data *pdata = dev_get_drvdata(dev);
182 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
183 struct temp_data *tdata = pdata->core_data[attr->index];
184 u32 eax, edx;
185 unsigned long val;
186 int diff;
187
188 if (strict_strtoul(buf, 10, &val))
189 return -EINVAL;
190
191 /*
192 * THERM_MASK_THRESHOLD1 is 7 bits wide. Values are entered in terms
193 * of milli degree celsius. Hence don't accept val > (127 * 1000)
194 */
195 if (val > tdata->tjmax || val > 127000)
196 return -EINVAL;
197
198 diff = (tdata->tjmax - val) / 1000;
199
200 mutex_lock(&tdata->update_lock);
201 rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
202 eax = (eax & ~THERM_MASK_THRESHOLD1) |
203 (diff << THERM_SHIFT_THRESHOLD1);
204 wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
205 tdata->ttarget = val;
206 mutex_unlock(&tdata->update_lock);
207
208 return count;
209}
210
211static ssize_t show_tmin(struct device *dev,
212 struct device_attribute *devattr, char *buf)
213{
214 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
215 struct platform_data *pdata = dev_get_drvdata(dev);
216
217 return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tmin);
218}
219
220static ssize_t store_tmin(struct device *dev,
221 struct device_attribute *devattr,
222 const char *buf, size_t count)
223{
224 struct platform_data *pdata = dev_get_drvdata(dev);
225 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
226 struct temp_data *tdata = pdata->core_data[attr->index];
227 u32 eax, edx;
228 unsigned long val;
229 int diff;
230
231 if (strict_strtoul(buf, 10, &val))
232 return -EINVAL;
233
234 /*
235 * THERM_MASK_THRESHOLD0 is 7 bits wide. Values are entered in terms
236 * of milli degree celsius. Hence don't accept val > (127 * 1000)
237 */
238 if (val > tdata->tjmax || val > 127000)
239 return -EINVAL;
240
241 diff = (tdata->tjmax - val) / 1000;
242
243 mutex_lock(&tdata->update_lock);
244 rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
245 eax = (eax & ~THERM_MASK_THRESHOLD0) |
246 (diff << THERM_SHIFT_THRESHOLD0);
247 wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
248 tdata->tmin = val;
249 mutex_unlock(&tdata->update_lock);
250
251 return count;
252}
253
156static ssize_t show_temp(struct device *dev, 254static ssize_t show_temp(struct device *dev,
157 struct device_attribute *devattr, char *buf) 255 struct device_attribute *devattr, char *buf)
158{ 256{
@@ -344,23 +442,31 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
344 int attr_no) 442 int attr_no)
345{ 443{
346 int err, i; 444 int err, i;
347 static ssize_t (*rd_ptr[MAX_ATTRS]) (struct device *dev, 445 static ssize_t (*rd_ptr[TOTAL_ATTRS]) (struct device *dev,
348 struct device_attribute *devattr, char *buf) = { 446 struct device_attribute *devattr, char *buf) = {
349 show_label, show_crit_alarm, show_ttarget, 447 show_label, show_crit_alarm, show_temp, show_tjmax,
350 show_temp, show_tjmax }; 448 show_max_alarm, show_ttarget, show_tmin };
351 static const char *names[MAX_ATTRS] = { 449 static ssize_t (*rw_ptr[TOTAL_ATTRS]) (struct device *dev,
450 struct device_attribute *devattr, const char *buf,
451 size_t count) = { NULL, NULL, NULL, NULL, NULL,
452 store_ttarget, store_tmin };
453 static const char *names[TOTAL_ATTRS] = {
352 "temp%d_label", "temp%d_crit_alarm", 454 "temp%d_label", "temp%d_crit_alarm",
353 "temp%d_max", "temp%d_input", 455 "temp%d_input", "temp%d_crit",
354 "temp%d_crit" }; 456 "temp%d_max_alarm", "temp%d_max",
457 "temp%d_max_hyst" };
355 458
356 for (i = 0; i < MAX_ATTRS; i++) { 459 for (i = 0; i < tdata->attr_size; i++) {
357 snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i], 460 snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i],
358 attr_no); 461 attr_no);
359 sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr); 462 sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
360 tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i]; 463 tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
361 tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO; 464 tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
465 if (rw_ptr[i]) {
466 tdata->sd_attrs[i].dev_attr.attr.mode |= S_IWUSR;
467 tdata->sd_attrs[i].dev_attr.store = rw_ptr[i];
468 }
362 tdata->sd_attrs[i].dev_attr.show = rd_ptr[i]; 469 tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
363 tdata->sd_attrs[i].dev_attr.store = NULL;
364 tdata->sd_attrs[i].index = attr_no; 470 tdata->sd_attrs[i].index = attr_no;
365 err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr); 471 err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr);
366 if (err) 472 if (err)
@@ -374,38 +480,6 @@ exit_free:
374 return err; 480 return err;
375} 481}
376 482
377static void update_ttarget(__u8 cpu_model, struct temp_data *tdata,
378 struct device *dev)
379{
380 int err;
381 u32 eax, edx;
382
383 /*
384 * Initialize ttarget value. Eventually this will be
385 * initialized with the value from MSR_IA32_THERM_INTERRUPT
386 * register. If IA32_TEMPERATURE_TARGET is supported, this
387 * value will be over written below.
388 * To Do: Patch to initialize ttarget from MSR_IA32_THERM_INTERRUPT
389 */
390 tdata->ttarget = tdata->tjmax - 20000;
391
392 /*
393 * Read the still undocumented IA32_TEMPERATURE_TARGET. It exists
394 * on older CPUs but not in this register,
395 * Atoms don't have it either.
396 */
397 if (cpu_model > 0xe && cpu_model != 0x1c) {
398 err = rdmsr_safe_on_cpu(tdata->cpu,
399 MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
400 if (err) {
401 dev_warn(dev,
402 "Unable to read IA32_TEMPERATURE_TARGET MSR\n");
403 } else {
404 tdata->ttarget = tdata->tjmax -
405 ((eax >> 8) & 0xff) * 1000;
406 }
407 }
408}
409 483
410static int __devinit chk_ucode_version(struct platform_device *pdev) 484static int __devinit chk_ucode_version(struct platform_device *pdev)
411{ 485{
@@ -464,9 +538,12 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
464 538
465 tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS : 539 tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS :
466 MSR_IA32_THERM_STATUS; 540 MSR_IA32_THERM_STATUS;
541 tdata->intrpt_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_INTERRUPT :
542 MSR_IA32_THERM_INTERRUPT;
467 tdata->is_pkg_data = pkg_flag; 543 tdata->is_pkg_data = pkg_flag;
468 tdata->cpu = cpu; 544 tdata->cpu = cpu;
469 tdata->cpu_core_id = TO_CORE_ID(cpu); 545 tdata->cpu_core_id = TO_CORE_ID(cpu);
546 tdata->attr_size = MAX_CORE_ATTRS;
470 mutex_init(&tdata->update_lock); 547 mutex_init(&tdata->update_lock);
471 return tdata; 548 return tdata;
472} 549}
@@ -516,7 +593,17 @@ static int create_core_data(struct platform_data *pdata,
516 else 593 else
517 tdata->tjmax = get_tjmax(c, cpu, &pdev->dev); 594 tdata->tjmax = get_tjmax(c, cpu, &pdev->dev);
518 595
519 update_ttarget(c->x86_model, tdata, &pdev->dev); 596 /*
597 * Test if we can access the intrpt register. If so, increase the
598 * 'size' enough to have ttarget/tmin/max_alarm interfaces.
599 * Initialize ttarget with bits 16:22 of MSR_IA32_THERM_INTERRUPT
600 */
601 err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx);
602 if (!err) {
603 tdata->attr_size += MAX_THRESH_ATTRS;
604 tdata->ttarget = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000;
605 }
606
520 pdata->core_data[attr_no] = tdata; 607 pdata->core_data[attr_no] = tdata;
521 608
522 /* Create sysfs interfaces */ 609 /* Create sysfs interfaces */
@@ -553,7 +640,7 @@ static void coretemp_remove_core(struct platform_data *pdata,
553 struct temp_data *tdata = pdata->core_data[indx]; 640 struct temp_data *tdata = pdata->core_data[indx];
554 641
555 /* Remove the sysfs attributes */ 642 /* Remove the sysfs attributes */
556 for (i = 0; i < MAX_ATTRS; i++) 643 for (i = 0; i < tdata->attr_size; i++)
557 device_remove_file(dev, &tdata->sd_attrs[i].dev_attr); 644 device_remove_file(dev, &tdata->sd_attrs[i].dev_attr);
558 645
559 kfree(pdata->core_data[indx]); 646 kfree(pdata->core_data[indx]);
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 2f94f9504804..90ddb8774210 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -54,6 +54,9 @@
54 * and extended mode. They are mostly compatible with LM90 except for a data 54 * and extended mode. They are mostly compatible with LM90 except for a data
55 * format difference for the temperature value registers. 55 * format difference for the temperature value registers.
56 * 56 *
57 * This driver also supports the SA56004 from Philips. This device is
58 * pin-compatible with the LM86, the ED/EDP parts are also address-compatible.
59 *
57 * Since the LM90 was the first chipset supported by this driver, most 60 * Since the LM90 was the first chipset supported by this driver, most
58 * comments will refer to this chipset, but are actually general and 61 * comments will refer to this chipset, but are actually general and
59 * concern all supported chipsets, unless mentioned otherwise. 62 * concern all supported chipsets, unless mentioned otherwise.
@@ -96,13 +99,15 @@
96 * MAX6659 can have address 0x4c, 0x4d or 0x4e. 99 * MAX6659 can have address 0x4c, 0x4d or 0x4e.
97 * MAX6680 and MAX6681 can have address 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 100 * MAX6680 and MAX6681 can have address 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b,
98 * 0x4c, 0x4d or 0x4e. 101 * 0x4c, 0x4d or 0x4e.
102 * SA56004 can have address 0x48 through 0x4F.
99 */ 103 */
100 104
101static const unsigned short normal_i2c[] = { 105static const unsigned short normal_i2c[] = {
102 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END }; 106 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x48, 0x49, 0x4a, 0x4b, 0x4c,
107 0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
103 108
104enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680, 109enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
105 max6646, w83l771, max6696 }; 110 max6646, w83l771, max6696, sa56004 };
106 111
107/* 112/*
108 * The LM90 registers 113 * The LM90 registers
@@ -152,6 +157,10 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
152#define MAX6659_REG_R_LOCAL_EMERG 0x17 157#define MAX6659_REG_R_LOCAL_EMERG 0x17
153#define MAX6659_REG_W_LOCAL_EMERG 0x17 158#define MAX6659_REG_W_LOCAL_EMERG 0x17
154 159
160/* SA56004 registers */
161
162#define SA56004_REG_R_LOCAL_TEMPL 0x22
163
155#define LM90_DEF_CONVRATE_RVAL 6 /* Def conversion rate register value */ 164#define LM90_DEF_CONVRATE_RVAL 6 /* Def conversion rate register value */
156#define LM90_MAX_CONVRATE_MS 16000 /* Maximum conversion rate in ms */ 165#define LM90_MAX_CONVRATE_MS 16000 /* Maximum conversion rate in ms */
157 166
@@ -161,7 +170,6 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
161#define LM90_FLAG_ADT7461_EXT (1 << 0) /* ADT7461 extended mode */ 170#define LM90_FLAG_ADT7461_EXT (1 << 0) /* ADT7461 extended mode */
162/* Device features */ 171/* Device features */
163#define LM90_HAVE_OFFSET (1 << 1) /* temperature offset register */ 172#define LM90_HAVE_OFFSET (1 << 1) /* temperature offset register */
164#define LM90_HAVE_LOCAL_EXT (1 << 2) /* extended local temperature */
165#define LM90_HAVE_REM_LIMIT_EXT (1 << 3) /* extended remote limit */ 173#define LM90_HAVE_REM_LIMIT_EXT (1 << 3) /* extended remote limit */
166#define LM90_HAVE_EMERGENCY (1 << 4) /* 3rd upper (emergency) limit */ 174#define LM90_HAVE_EMERGENCY (1 << 4) /* 3rd upper (emergency) limit */
167#define LM90_HAVE_EMERGENCY_ALARM (1 << 5)/* emergency alarm */ 175#define LM90_HAVE_EMERGENCY_ALARM (1 << 5)/* emergency alarm */
@@ -192,6 +200,7 @@ static const struct i2c_device_id lm90_id[] = {
192 { "max6696", max6696 }, 200 { "max6696", max6696 },
193 { "nct1008", adt7461 }, 201 { "nct1008", adt7461 },
194 { "w83l771", w83l771 }, 202 { "w83l771", w83l771 },
203 { "sa56004", sa56004 },
195 { } 204 { }
196}; 205};
197MODULE_DEVICE_TABLE(i2c, lm90_id); 206MODULE_DEVICE_TABLE(i2c, lm90_id);
@@ -204,6 +213,7 @@ struct lm90_params {
204 u16 alert_alarms; /* Which alarm bits trigger ALERT# */ 213 u16 alert_alarms; /* Which alarm bits trigger ALERT# */
205 /* Upper 8 bits for max6695/96 */ 214 /* Upper 8 bits for max6695/96 */
206 u8 max_convrate; /* Maximum conversion rate register value */ 215 u8 max_convrate; /* Maximum conversion rate register value */
216 u8 reg_local_ext; /* Extended local temp register (optional) */
207}; 217};
208 218
209static const struct lm90_params lm90_params[] = { 219static const struct lm90_params lm90_params[] = {
@@ -235,19 +245,20 @@ static const struct lm90_params lm90_params[] = {
235 .max_convrate = 9, 245 .max_convrate = 9,
236 }, 246 },
237 [max6646] = { 247 [max6646] = {
238 .flags = LM90_HAVE_LOCAL_EXT,
239 .alert_alarms = 0x7c, 248 .alert_alarms = 0x7c,
240 .max_convrate = 6, 249 .max_convrate = 6,
250 .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
241 }, 251 },
242 [max6657] = { 252 [max6657] = {
243 .flags = LM90_HAVE_LOCAL_EXT,
244 .alert_alarms = 0x7c, 253 .alert_alarms = 0x7c,
245 .max_convrate = 8, 254 .max_convrate = 8,
255 .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
246 }, 256 },
247 [max6659] = { 257 [max6659] = {
248 .flags = LM90_HAVE_LOCAL_EXT | LM90_HAVE_EMERGENCY, 258 .flags = LM90_HAVE_EMERGENCY,
249 .alert_alarms = 0x7c, 259 .alert_alarms = 0x7c,
250 .max_convrate = 8, 260 .max_convrate = 8,
261 .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
251 }, 262 },
252 [max6680] = { 263 [max6680] = {
253 .flags = LM90_HAVE_OFFSET, 264 .flags = LM90_HAVE_OFFSET,
@@ -255,16 +266,23 @@ static const struct lm90_params lm90_params[] = {
255 .max_convrate = 7, 266 .max_convrate = 7,
256 }, 267 },
257 [max6696] = { 268 [max6696] = {
258 .flags = LM90_HAVE_LOCAL_EXT | LM90_HAVE_EMERGENCY 269 .flags = LM90_HAVE_EMERGENCY
259 | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3, 270 | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3,
260 .alert_alarms = 0x187c, 271 .alert_alarms = 0x187c,
261 .max_convrate = 6, 272 .max_convrate = 6,
273 .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
262 }, 274 },
263 [w83l771] = { 275 [w83l771] = {
264 .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT, 276 .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
265 .alert_alarms = 0x7c, 277 .alert_alarms = 0x7c,
266 .max_convrate = 8, 278 .max_convrate = 8,
267 }, 279 },
280 [sa56004] = {
281 .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
282 .alert_alarms = 0x7b,
283 .max_convrate = 9,
284 .reg_local_ext = SA56004_REG_R_LOCAL_TEMPL,
285 },
268}; 286};
269 287
270/* 288/*
@@ -286,6 +304,7 @@ struct lm90_data {
286 u16 alert_alarms; /* Which alarm bits trigger ALERT# */ 304 u16 alert_alarms; /* Which alarm bits trigger ALERT# */
287 /* Upper 8 bits for max6695/96 */ 305 /* Upper 8 bits for max6695/96 */
288 u8 max_convrate; /* Maximum conversion rate */ 306 u8 max_convrate; /* Maximum conversion rate */
307 u8 reg_local_ext; /* local extension register offset */
289 308
290 /* registers values */ 309 /* registers values */
291 s8 temp8[8]; /* 0: local low limit 310 s8 temp8[8]; /* 0: local low limit
@@ -452,9 +471,9 @@ static struct lm90_data *lm90_update_device(struct device *dev)
452 lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT, &data->temp8[3]); 471 lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT, &data->temp8[3]);
453 lm90_read_reg(client, LM90_REG_R_TCRIT_HYST, &data->temp_hyst); 472 lm90_read_reg(client, LM90_REG_R_TCRIT_HYST, &data->temp_hyst);
454 473
455 if (data->flags & LM90_HAVE_LOCAL_EXT) { 474 if (data->reg_local_ext) {
456 lm90_read16(client, LM90_REG_R_LOCAL_TEMP, 475 lm90_read16(client, LM90_REG_R_LOCAL_TEMP,
457 MAX6657_REG_R_LOCAL_TEMPL, 476 data->reg_local_ext,
458 &data->temp11[4]); 477 &data->temp11[4]);
459 } else { 478 } else {
460 if (lm90_read_reg(client, LM90_REG_R_LOCAL_TEMP, 479 if (lm90_read_reg(client, LM90_REG_R_LOCAL_TEMP,
@@ -1092,7 +1111,7 @@ static int lm90_detect(struct i2c_client *new_client,
1092 struct i2c_adapter *adapter = new_client->adapter; 1111 struct i2c_adapter *adapter = new_client->adapter;
1093 int address = new_client->addr; 1112 int address = new_client->addr;
1094 const char *name = NULL; 1113 const char *name = NULL;
1095 int man_id, chip_id, reg_config1, reg_convrate; 1114 int man_id, chip_id, reg_config1, reg_config2, reg_convrate;
1096 1115
1097 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 1116 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
1098 return -ENODEV; 1117 return -ENODEV;
@@ -1108,15 +1127,16 @@ static int lm90_detect(struct i2c_client *new_client,
1108 LM90_REG_R_CONVRATE)) < 0) 1127 LM90_REG_R_CONVRATE)) < 0)
1109 return -ENODEV; 1128 return -ENODEV;
1110 1129
1111 if ((address == 0x4C || address == 0x4D) 1130 if (man_id == 0x01 || man_id == 0x5C || man_id == 0x41) {
1112 && man_id == 0x01) { /* National Semiconductor */
1113 int reg_config2;
1114
1115 reg_config2 = i2c_smbus_read_byte_data(new_client, 1131 reg_config2 = i2c_smbus_read_byte_data(new_client,
1116 LM90_REG_R_CONFIG2); 1132 LM90_REG_R_CONFIG2);
1117 if (reg_config2 < 0) 1133 if (reg_config2 < 0)
1118 return -ENODEV; 1134 return -ENODEV;
1135 } else
1136 reg_config2 = 0; /* Make compiler happy */
1119 1137
1138 if ((address == 0x4C || address == 0x4D)
1139 && man_id == 0x01) { /* National Semiconductor */
1120 if ((reg_config1 & 0x2A) == 0x00 1140 if ((reg_config1 & 0x2A) == 0x00
1121 && (reg_config2 & 0xF8) == 0x00 1141 && (reg_config2 & 0xF8) == 0x00
1122 && reg_convrate <= 0x09) { 1142 && reg_convrate <= 0x09) {
@@ -1245,13 +1265,6 @@ static int lm90_detect(struct i2c_client *new_client,
1245 } else 1265 } else
1246 if (address == 0x4C 1266 if (address == 0x4C
1247 && man_id == 0x5C) { /* Winbond/Nuvoton */ 1267 && man_id == 0x5C) { /* Winbond/Nuvoton */
1248 int reg_config2;
1249
1250 reg_config2 = i2c_smbus_read_byte_data(new_client,
1251 LM90_REG_R_CONFIG2);
1252 if (reg_config2 < 0)
1253 return -ENODEV;
1254
1255 if ((reg_config1 & 0x2A) == 0x00 1268 if ((reg_config1 & 0x2A) == 0x00
1256 && (reg_config2 & 0xF8) == 0x00) { 1269 && (reg_config2 & 0xF8) == 0x00) {
1257 if (chip_id == 0x01 /* W83L771W/G */ 1270 if (chip_id == 0x01 /* W83L771W/G */
@@ -1263,6 +1276,15 @@ static int lm90_detect(struct i2c_client *new_client,
1263 name = "w83l771"; 1276 name = "w83l771";
1264 } 1277 }
1265 } 1278 }
1279 } else
1280 if (address >= 0x48 && address <= 0x4F
1281 && man_id == 0xA1) { /* NXP Semiconductor/Philips */
1282 if (chip_id == 0x00
1283 && (reg_config1 & 0x2A) == 0x00
1284 && (reg_config2 & 0xFE) == 0x00
1285 && reg_convrate <= 0x09) {
1286 name = "sa56004";
1287 }
1266 } 1288 }
1267 1289
1268 if (!name) { /* identification failed */ 1290 if (!name) { /* identification failed */
@@ -1368,6 +1390,7 @@ static int lm90_probe(struct i2c_client *new_client,
1368 1390
1369 /* Set chip capabilities */ 1391 /* Set chip capabilities */
1370 data->flags = lm90_params[data->kind].flags; 1392 data->flags = lm90_params[data->kind].flags;
1393 data->reg_local_ext = lm90_params[data->kind].reg_local_ext;
1371 1394
1372 /* Set maximum conversion rate */ 1395 /* Set maximum conversion rate */
1373 data->max_convrate = lm90_params[data->kind].max_convrate; 1396 data->max_convrate = lm90_params[data->kind].max_convrate;
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index d3b464b74ced..513901d592a9 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -74,8 +74,9 @@ static const unsigned short normal_i2c[] = {
74#define TT_OFF 0 74#define TT_OFF 0
75#define TT_ON 1 75#define TT_ON 1
76#define TT_MASK 7 76#define TT_MASK 7
77#define MANUFACTURER_ID 0x01 77#define NATSEMI_MAN_ID 0x01
78#define DEFAULT_REVISION 0xA4 78#define LM95231_CHIP_ID 0xA1
79#define LM95241_CHIP_ID 0xA4
79 80
80static const u8 lm95241_reg_address[] = { 81static const u8 lm95241_reg_address[] = {
81 LM95241_REG_R_LOCAL_TEMPH, 82 LM95241_REG_R_LOCAL_TEMPH,
@@ -338,20 +339,25 @@ static int lm95241_detect(struct i2c_client *new_client,
338 struct i2c_board_info *info) 339 struct i2c_board_info *info)
339{ 340{
340 struct i2c_adapter *adapter = new_client->adapter; 341 struct i2c_adapter *adapter = new_client->adapter;
341 int address = new_client->addr;
342 const char *name; 342 const char *name;
343 int mfg_id, chip_id;
343 344
344 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 345 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
345 return -ENODEV; 346 return -ENODEV;
346 347
347 if ((i2c_smbus_read_byte_data(new_client, LM95241_REG_R_MAN_ID) 348 mfg_id = i2c_smbus_read_byte_data(new_client, LM95241_REG_R_MAN_ID);
348 == MANUFACTURER_ID) 349 if (mfg_id != NATSEMI_MAN_ID)
349 && (i2c_smbus_read_byte_data(new_client, LM95241_REG_R_CHIP_ID) 350 return -ENODEV;
350 == DEFAULT_REVISION)) { 351
351 name = DEVNAME; 352 chip_id = i2c_smbus_read_byte_data(new_client, LM95241_REG_R_CHIP_ID);
352 } else { 353 switch (chip_id) {
353 dev_dbg(&adapter->dev, "LM95241 detection failed at 0x%02x\n", 354 case LM95231_CHIP_ID:
354 address); 355 name = "lm95231";
356 break;
357 case LM95241_CHIP_ID:
358 name = "lm95241";
359 break;
360 default:
355 return -ENODEV; 361 return -ENODEV;
356 } 362 }
357 363
@@ -431,7 +437,8 @@ static int lm95241_remove(struct i2c_client *client)
431 437
432/* Driver data (common to all clients) */ 438/* Driver data (common to all clients) */
433static const struct i2c_device_id lm95241_id[] = { 439static const struct i2c_device_id lm95241_id[] = {
434 { DEVNAME, 0 }, 440 { "lm95231", 0 },
441 { "lm95241", 0 },
435 { } 442 { }
436}; 443};
437MODULE_DEVICE_TABLE(i2c, lm95241_id); 444MODULE_DEVICE_TABLE(i2c, lm95241_id);
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
new file mode 100644
index 000000000000..dce9e68241e6
--- /dev/null
+++ b/drivers/hwmon/lm95245.c
@@ -0,0 +1,543 @@
1/*
2 * Copyright (C) 2011 Alexander Stein <alexander.stein@systec-electronic.com>
3 *
4 * The LM95245 is a sensor chip made by National Semiconductors.
5 * It reports up to two temperatures (its own plus an external one).
6 * Complete datasheet can be obtained from National's website at:
7 * http://www.national.com/ds.cgi/LM/LM95245.pdf
8 *
9 * This driver is based on lm95241.c
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/slab.h>
29#include <linux/jiffies.h>
30#include <linux/i2c.h>
31#include <linux/hwmon.h>
32#include <linux/hwmon-sysfs.h>
33#include <linux/err.h>
34#include <linux/mutex.h>
35#include <linux/sysfs.h>
36
37#define DEVNAME "lm95245"
38
39static const unsigned short normal_i2c[] = {
40 0x18, 0x19, 0x29, 0x4c, 0x4d, I2C_CLIENT_END };
41
42/* LM95245 registers */
43/* general registers */
44#define LM95245_REG_RW_CONFIG1 0x03
45#define LM95245_REG_RW_CONVERS_RATE 0x04
46#define LM95245_REG_W_ONE_SHOT 0x0F
47
48/* diode configuration */
49#define LM95245_REG_RW_CONFIG2 0xBF
50#define LM95245_REG_RW_REMOTE_OFFH 0x11
51#define LM95245_REG_RW_REMOTE_OFFL 0x12
52
53/* status registers */
54#define LM95245_REG_R_STATUS1 0x02
55#define LM95245_REG_R_STATUS2 0x33
56
57/* limit registers */
58#define LM95245_REG_RW_REMOTE_OS_LIMIT 0x07
59#define LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT 0x20
60#define LM95245_REG_RW_REMOTE_TCRIT_LIMIT 0x19
61#define LM95245_REG_RW_COMMON_HYSTERESIS 0x21
62
63/* temperature signed */
64#define LM95245_REG_R_LOCAL_TEMPH_S 0x00
65#define LM95245_REG_R_LOCAL_TEMPL_S 0x30
66#define LM95245_REG_R_REMOTE_TEMPH_S 0x01
67#define LM95245_REG_R_REMOTE_TEMPL_S 0x10
68/* temperature unsigned */
69#define LM95245_REG_R_REMOTE_TEMPH_U 0x31
70#define LM95245_REG_R_REMOTE_TEMPL_U 0x32
71
72/* id registers */
73#define LM95245_REG_R_MAN_ID 0xFE
74#define LM95245_REG_R_CHIP_ID 0xFF
75
76/* LM95245 specific bitfields */
77#define CFG_STOP 0x40
78#define CFG_REMOTE_TCRIT_MASK 0x10
79#define CFG_REMOTE_OS_MASK 0x08
80#define CFG_LOCAL_TCRIT_MASK 0x04
81#define CFG_LOCAL_OS_MASK 0x02
82
83#define CFG2_OS_A0 0x40
84#define CFG2_DIODE_FAULT_OS 0x20
85#define CFG2_DIODE_FAULT_TCRIT 0x10
86#define CFG2_REMOTE_TT 0x08
87#define CFG2_REMOTE_FILTER_DIS 0x00
88#define CFG2_REMOTE_FILTER_EN 0x06
89
90/* conversation rate in ms */
91#define RATE_CR0063 0x00
92#define RATE_CR0364 0x01
93#define RATE_CR1000 0x02
94#define RATE_CR2500 0x03
95
96#define STATUS1_DIODE_FAULT 0x04
97#define STATUS1_RTCRIT 0x02
98#define STATUS1_LOC 0x01
99
100#define MANUFACTURER_ID 0x01
101#define DEFAULT_REVISION 0xB3
102
103static const u8 lm95245_reg_address[] = {
104 LM95245_REG_R_LOCAL_TEMPH_S,
105 LM95245_REG_R_LOCAL_TEMPL_S,
106 LM95245_REG_R_REMOTE_TEMPH_S,
107 LM95245_REG_R_REMOTE_TEMPL_S,
108 LM95245_REG_R_REMOTE_TEMPH_U,
109 LM95245_REG_R_REMOTE_TEMPL_U,
110 LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT,
111 LM95245_REG_RW_REMOTE_TCRIT_LIMIT,
112 LM95245_REG_RW_COMMON_HYSTERESIS,
113 LM95245_REG_R_STATUS1,
114};
115
116/* Client data (each client gets its own) */
117struct lm95245_data {
118 struct device *hwmon_dev;
119 struct mutex update_lock;
120 unsigned long last_updated; /* in jiffies */
121 unsigned long interval; /* in msecs */
122 bool valid; /* zero until following fields are valid */
123 /* registers values */
124 u8 regs[ARRAY_SIZE(lm95245_reg_address)];
125 u8 config1, config2;
126};
127
128/* Conversions */
129static int temp_from_reg_unsigned(u8 val_h, u8 val_l)
130{
131 return val_h * 1000 + val_l * 1000 / 256;
132}
133
134static int temp_from_reg_signed(u8 val_h, u8 val_l)
135{
136 if (val_h & 0x80)
137 return (val_h - 0x100) * 1000;
138 return temp_from_reg_unsigned(val_h, val_l);
139}
140
141static struct lm95245_data *lm95245_update_device(struct device *dev)
142{
143 struct i2c_client *client = to_i2c_client(dev);
144 struct lm95245_data *data = i2c_get_clientdata(client);
145
146 mutex_lock(&data->update_lock);
147
148 if (time_after(jiffies, data->last_updated
149 + msecs_to_jiffies(data->interval)) || !data->valid) {
150 int i;
151
152 dev_dbg(&client->dev, "Updating lm95245 data.\n");
153 for (i = 0; i < ARRAY_SIZE(lm95245_reg_address); i++)
154 data->regs[i]
155 = i2c_smbus_read_byte_data(client,
156 lm95245_reg_address[i]);
157 data->last_updated = jiffies;
158 data->valid = 1;
159 }
160
161 mutex_unlock(&data->update_lock);
162
163 return data;
164}
165
166static unsigned long lm95245_read_conversion_rate(struct i2c_client *client)
167{
168 int rate;
169 unsigned long interval;
170
171 rate = i2c_smbus_read_byte_data(client, LM95245_REG_RW_CONVERS_RATE);
172
173 switch (rate) {
174 case RATE_CR0063:
175 interval = 63;
176 break;
177 case RATE_CR0364:
178 interval = 364;
179 break;
180 case RATE_CR1000:
181 interval = 1000;
182 break;
183 case RATE_CR2500:
184 default:
185 interval = 2500;
186 break;
187 }
188
189 return interval;
190}
191
192static unsigned long lm95245_set_conversion_rate(struct i2c_client *client,
193 unsigned long interval)
194{
195 int rate;
196
197 if (interval <= 63) {
198 interval = 63;
199 rate = RATE_CR0063;
200 } else if (interval <= 364) {
201 interval = 364;
202 rate = RATE_CR0364;
203 } else if (interval <= 1000) {
204 interval = 1000;
205 rate = RATE_CR1000;
206 } else {
207 interval = 2500;
208 rate = RATE_CR2500;
209 }
210
211 i2c_smbus_write_byte_data(client, LM95245_REG_RW_CONVERS_RATE, rate);
212
213 return interval;
214}
215
216/* Sysfs stuff */
217static ssize_t show_input(struct device *dev, struct device_attribute *attr,
218 char *buf)
219{
220 struct lm95245_data *data = lm95245_update_device(dev);
221 int temp;
222 int index = to_sensor_dev_attr(attr)->index;
223
224 /*
225 * Index 0 (Local temp) is always signed
226 * Index 2 (Remote temp) has both signed and unsigned data
227 * use signed calculation for remote if signed bit is set
228 */
229 if (index == 0 || data->regs[index] & 0x80)
230 temp = temp_from_reg_signed(data->regs[index],
231 data->regs[index + 1]);
232 else
233 temp = temp_from_reg_unsigned(data->regs[index + 2],
234 data->regs[index + 3]);
235
236 return snprintf(buf, PAGE_SIZE - 1, "%d\n", temp);
237}
238
239static ssize_t show_limit(struct device *dev, struct device_attribute *attr,
240 char *buf)
241{
242 struct lm95245_data *data = lm95245_update_device(dev);
243 int index = to_sensor_dev_attr(attr)->index;
244
245 return snprintf(buf, PAGE_SIZE - 1, "%d\n",
246 data->regs[index] * 1000);
247}
248
249static ssize_t set_limit(struct device *dev, struct device_attribute *attr,
250 const char *buf, size_t count)
251{
252 struct i2c_client *client = to_i2c_client(dev);
253 struct lm95245_data *data = i2c_get_clientdata(client);
254 int index = to_sensor_dev_attr(attr)->index;
255 unsigned long val;
256
257 if (strict_strtoul(buf, 10, &val) < 0)
258 return -EINVAL;
259
260 val /= 1000;
261
262 val = SENSORS_LIMIT(val, 0, (index == 6 ? 127 : 255));
263
264 mutex_lock(&data->update_lock);
265
266 data->valid = 0;
267
268 i2c_smbus_write_byte_data(client, lm95245_reg_address[index], val);
269
270 mutex_unlock(&data->update_lock);
271
272 return count;
273}
274
275static ssize_t set_crit_hyst(struct device *dev, struct device_attribute *attr,
276 const char *buf, size_t count)
277{
278 struct i2c_client *client = to_i2c_client(dev);
279 struct lm95245_data *data = i2c_get_clientdata(client);
280 unsigned long val;
281
282 if (strict_strtoul(buf, 10, &val) < 0)
283 return -EINVAL;
284
285 val /= 1000;
286
287 val = SENSORS_LIMIT(val, 0, 31);
288
289 mutex_lock(&data->update_lock);
290
291 data->valid = 0;
292
293 /* shared crit hysteresis */
294 i2c_smbus_write_byte_data(client, LM95245_REG_RW_COMMON_HYSTERESIS,
295 val);
296
297 mutex_unlock(&data->update_lock);
298
299 return count;
300}
301
302static ssize_t show_type(struct device *dev, struct device_attribute *attr,
303 char *buf)
304{
305 struct i2c_client *client = to_i2c_client(dev);
306 struct lm95245_data *data = i2c_get_clientdata(client);
307
308 return snprintf(buf, PAGE_SIZE - 1,
309 data->config2 & CFG2_REMOTE_TT ? "1\n" : "2\n");
310}
311
312static ssize_t set_type(struct device *dev, struct device_attribute *attr,
313 const char *buf, size_t count)
314{
315 struct i2c_client *client = to_i2c_client(dev);
316 struct lm95245_data *data = i2c_get_clientdata(client);
317 unsigned long val;
318
319 if (strict_strtoul(buf, 10, &val) < 0)
320 return -EINVAL;
321 if (val != 1 && val != 2)
322 return -EINVAL;
323
324 mutex_lock(&data->update_lock);
325
326 if (val == 1)
327 data->config2 |= CFG2_REMOTE_TT;
328 else
329 data->config2 &= ~CFG2_REMOTE_TT;
330
331 data->valid = 0;
332
333 i2c_smbus_write_byte_data(client, LM95245_REG_RW_CONFIG2,
334 data->config2);
335
336 mutex_unlock(&data->update_lock);
337
338 return count;
339}
340
341static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
342 char *buf)
343{
344 struct lm95245_data *data = lm95245_update_device(dev);
345 int index = to_sensor_dev_attr(attr)->index;
346
347 return snprintf(buf, PAGE_SIZE - 1, "%d\n",
348 !!(data->regs[9] & index));
349}
350
351static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
352 char *buf)
353{
354 struct lm95245_data *data = lm95245_update_device(dev);
355
356 return snprintf(buf, PAGE_SIZE - 1, "%lu\n", data->interval);
357}
358
359static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
360 const char *buf, size_t count)
361{
362 struct i2c_client *client = to_i2c_client(dev);
363 struct lm95245_data *data = i2c_get_clientdata(client);
364 unsigned long val;
365
366 if (strict_strtoul(buf, 10, &val) < 0)
367 return -EINVAL;
368
369 mutex_lock(&data->update_lock);
370
371 data->interval = lm95245_set_conversion_rate(client, val);
372
373 mutex_unlock(&data->update_lock);
374
375 return count;
376}
377
378static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_input, NULL, 0);
379static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_limit,
380 set_limit, 6);
381static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_limit,
382 set_crit_hyst, 8);
383static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL,
384 STATUS1_LOC);
385
386static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_input, NULL, 2);
387static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_limit,
388 set_limit, 7);
389static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_limit,
390 set_crit_hyst, 8);
391static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL,
392 STATUS1_RTCRIT);
393static SENSOR_DEVICE_ATTR(temp2_type, S_IWUSR | S_IRUGO, show_type,
394 set_type, 0);
395static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL,
396 STATUS1_DIODE_FAULT);
397
398static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
399 set_interval);
400
401static struct attribute *lm95245_attributes[] = {
402 &sensor_dev_attr_temp1_input.dev_attr.attr,
403 &sensor_dev_attr_temp1_crit.dev_attr.attr,
404 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
405 &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
406 &sensor_dev_attr_temp2_input.dev_attr.attr,
407 &sensor_dev_attr_temp2_crit.dev_attr.attr,
408 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
409 &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
410 &sensor_dev_attr_temp2_type.dev_attr.attr,
411 &sensor_dev_attr_temp2_fault.dev_attr.attr,
412 &dev_attr_update_interval.attr,
413 NULL
414};
415
416static const struct attribute_group lm95245_group = {
417 .attrs = lm95245_attributes,
418};
419
420/* Return 0 if detection is successful, -ENODEV otherwise */
421static int lm95245_detect(struct i2c_client *new_client,
422 struct i2c_board_info *info)
423{
424 struct i2c_adapter *adapter = new_client->adapter;
425
426 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
427 return -ENODEV;
428
429 if (i2c_smbus_read_byte_data(new_client, LM95245_REG_R_MAN_ID)
430 != MANUFACTURER_ID
431 || i2c_smbus_read_byte_data(new_client, LM95245_REG_R_CHIP_ID)
432 != DEFAULT_REVISION)
433 return -ENODEV;
434
435 strlcpy(info->type, DEVNAME, I2C_NAME_SIZE);
436 return 0;
437}
438
439static void lm95245_init_client(struct i2c_client *client)
440{
441 struct lm95245_data *data = i2c_get_clientdata(client);
442
443 data->valid = 0;
444 data->interval = lm95245_read_conversion_rate(client);
445
446 data->config1 = i2c_smbus_read_byte_data(client,
447 LM95245_REG_RW_CONFIG1);
448 data->config2 = i2c_smbus_read_byte_data(client,
449 LM95245_REG_RW_CONFIG2);
450
451 if (data->config1 & CFG_STOP) {
452 /* Clear the standby bit */
453 data->config1 &= ~CFG_STOP;
454 i2c_smbus_write_byte_data(client, LM95245_REG_RW_CONFIG1,
455 data->config1);
456 }
457}
458
459static int lm95245_probe(struct i2c_client *new_client,
460 const struct i2c_device_id *id)
461{
462 struct lm95245_data *data;
463 int err;
464
465 data = kzalloc(sizeof(struct lm95245_data), GFP_KERNEL);
466 if (!data) {
467 err = -ENOMEM;
468 goto exit;
469 }
470
471 i2c_set_clientdata(new_client, data);
472 mutex_init(&data->update_lock);
473
474 /* Initialize the LM95245 chip */
475 lm95245_init_client(new_client);
476
477 /* Register sysfs hooks */
478 err = sysfs_create_group(&new_client->dev.kobj, &lm95245_group);
479 if (err)
480 goto exit_free;
481
482 data->hwmon_dev = hwmon_device_register(&new_client->dev);
483 if (IS_ERR(data->hwmon_dev)) {
484 err = PTR_ERR(data->hwmon_dev);
485 goto exit_remove_files;
486 }
487
488 return 0;
489
490exit_remove_files:
491 sysfs_remove_group(&new_client->dev.kobj, &lm95245_group);
492exit_free:
493 kfree(data);
494exit:
495 return err;
496}
497
498static int lm95245_remove(struct i2c_client *client)
499{
500 struct lm95245_data *data = i2c_get_clientdata(client);
501
502 hwmon_device_unregister(data->hwmon_dev);
503 sysfs_remove_group(&client->dev.kobj, &lm95245_group);
504
505 kfree(data);
506 return 0;
507}
508
509/* Driver data (common to all clients) */
510static const struct i2c_device_id lm95245_id[] = {
511 { DEVNAME, 0 },
512 { }
513};
514MODULE_DEVICE_TABLE(i2c, lm95245_id);
515
516static struct i2c_driver lm95245_driver = {
517 .class = I2C_CLASS_HWMON,
518 .driver = {
519 .name = DEVNAME,
520 },
521 .probe = lm95245_probe,
522 .remove = lm95245_remove,
523 .id_table = lm95245_id,
524 .detect = lm95245_detect,
525 .address_list = normal_i2c,
526};
527
528static int __init sensors_lm95245_init(void)
529{
530 return i2c_add_driver(&lm95245_driver);
531}
532
533static void __exit sensors_lm95245_exit(void)
534{
535 i2c_del_driver(&lm95245_driver);
536}
537
538MODULE_AUTHOR("Alexander Stein <alexander.stein@systec-electronic.com>");
539MODULE_DESCRIPTION("LM95245 sensor driver");
540MODULE_LICENSE("GPL");
541
542module_init(sensors_lm95245_init);
543module_exit(sensors_lm95245_exit);
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
new file mode 100644
index 000000000000..20d1b2ddffb6
--- /dev/null
+++ b/drivers/hwmon/max1668.c
@@ -0,0 +1,502 @@
1/*
2 Copyright (c) 2011 David George <david.george@ska.ac.za>
3
4 based on adm1021.c
5 some credit to Christoph Scheurer, but largely a rewrite
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20*/
21
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/slab.h>
25#include <linux/jiffies.h>
26#include <linux/i2c.h>
27#include <linux/hwmon.h>
28#include <linux/hwmon-sysfs.h>
29#include <linux/err.h>
30#include <linux/mutex.h>
31
32/* Addresses to scan */
33static unsigned short max1668_addr_list[] = {
34 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
35
36/* max1668 registers */
37
38#define MAX1668_REG_TEMP(nr) (nr)
39#define MAX1668_REG_STAT1 0x05
40#define MAX1668_REG_STAT2 0x06
41#define MAX1668_REG_MAN_ID 0xfe
42#define MAX1668_REG_DEV_ID 0xff
43
44/* limits */
45
46/* write high limits */
47#define MAX1668_REG_LIMH_WR(nr) (0x13 + 2 * (nr))
48/* write low limits */
49#define MAX1668_REG_LIML_WR(nr) (0x14 + 2 * (nr))
50/* read high limits */
51#define MAX1668_REG_LIMH_RD(nr) (0x08 + 2 * (nr))
52/* read low limits */
53#define MAX1668_REG_LIML_RD(nr) (0x09 + 2 * (nr))
54
55/* manufacturer and device ID Constants */
56#define MAN_ID_MAXIM 0x4d
57#define DEV_ID_MAX1668 0x3
58#define DEV_ID_MAX1805 0x5
59#define DEV_ID_MAX1989 0xb
60
61/* read only mode module parameter */
62static int read_only;
63module_param(read_only, bool, 0);
64MODULE_PARM_DESC(read_only, "Don't set any values, read only mode");
65
66enum chips { max1668, max1805, max1989 };
67
68struct max1668_data {
69 struct device *hwmon_dev;
70 enum chips type;
71
72 struct mutex update_lock;
73 char valid; /* !=0 if following fields are valid */
74 unsigned long last_updated; /* In jiffies */
75
76 /* 1x local and 4x remote */
77 s8 temp_max[5];
78 s8 temp_min[5];
79 s8 temp[5];
80 u16 alarms;
81};
82
83static struct max1668_data *max1668_update_device(struct device *dev)
84{
85 struct i2c_client *client = to_i2c_client(dev);
86 struct max1668_data *data = i2c_get_clientdata(client);
87 struct max1668_data *ret = data;
88 s32 val;
89 int i;
90
91 mutex_lock(&data->update_lock);
92
93 if (data->valid && !time_after(jiffies,
94 data->last_updated + HZ + HZ / 2))
95 goto abort;
96
97 for (i = 0; i < 5; i++) {
98 val = i2c_smbus_read_byte_data(client, MAX1668_REG_TEMP(i));
99 if (unlikely(val < 0)) {
100 ret = ERR_PTR(val);
101 goto abort;
102 }
103 data->temp[i] = (s8) val;
104
105 val = i2c_smbus_read_byte_data(client, MAX1668_REG_LIMH_RD(i));
106 if (unlikely(val < 0)) {
107 ret = ERR_PTR(val);
108 goto abort;
109 }
110 data->temp_max[i] = (s8) val;
111
112 val = i2c_smbus_read_byte_data(client, MAX1668_REG_LIML_RD(i));
113 if (unlikely(val < 0)) {
114 ret = ERR_PTR(val);
115 goto abort;
116 }
117 data->temp_min[i] = (s8) val;
118 }
119
120 val = i2c_smbus_read_byte_data(client, MAX1668_REG_STAT1);
121 if (unlikely(val < 0)) {
122 ret = ERR_PTR(val);
123 goto abort;
124 }
125 data->alarms = val << 8;
126
127 val = i2c_smbus_read_byte_data(client, MAX1668_REG_STAT2);
128 if (unlikely(val < 0)) {
129 ret = ERR_PTR(val);
130 goto abort;
131 }
132 data->alarms |= val;
133
134 data->last_updated = jiffies;
135 data->valid = 1;
136abort:
137 mutex_unlock(&data->update_lock);
138
139 return ret;
140}
141
142static ssize_t show_temp(struct device *dev,
143 struct device_attribute *devattr, char *buf)
144{
145 int index = to_sensor_dev_attr(devattr)->index;
146 struct max1668_data *data = max1668_update_device(dev);
147
148 if (IS_ERR(data))
149 return PTR_ERR(data);
150
151 return sprintf(buf, "%d\n", data->temp[index] * 1000);
152}
153
154static ssize_t show_temp_max(struct device *dev,
155 struct device_attribute *devattr, char *buf)
156{
157 int index = to_sensor_dev_attr(devattr)->index;
158 struct max1668_data *data = max1668_update_device(dev);
159
160 if (IS_ERR(data))
161 return PTR_ERR(data);
162
163 return sprintf(buf, "%d\n", data->temp_max[index] * 1000);
164}
165
166static ssize_t show_temp_min(struct device *dev,
167 struct device_attribute *devattr, char *buf)
168{
169 int index = to_sensor_dev_attr(devattr)->index;
170 struct max1668_data *data = max1668_update_device(dev);
171
172 if (IS_ERR(data))
173 return PTR_ERR(data);
174
175 return sprintf(buf, "%d\n", data->temp_min[index] * 1000);
176}
177
178static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
179 char *buf)
180{
181 int index = to_sensor_dev_attr(attr)->index;
182 struct max1668_data *data = max1668_update_device(dev);
183
184 if (IS_ERR(data))
185 return PTR_ERR(data);
186
187 return sprintf(buf, "%u\n", (data->alarms >> index) & 0x1);
188}
189
190static ssize_t show_fault(struct device *dev,
191 struct device_attribute *devattr, char *buf)
192{
193 int index = to_sensor_dev_attr(devattr)->index;
194 struct max1668_data *data = max1668_update_device(dev);
195
196 if (IS_ERR(data))
197 return PTR_ERR(data);
198
199 return sprintf(buf, "%u\n",
200 (data->alarms & (1 << 12)) && data->temp[index] == 127);
201}
202
203static ssize_t set_temp_max(struct device *dev,
204 struct device_attribute *devattr,
205 const char *buf, size_t count)
206{
207 int index = to_sensor_dev_attr(devattr)->index;
208 struct i2c_client *client = to_i2c_client(dev);
209 struct max1668_data *data = i2c_get_clientdata(client);
210 long temp;
211 int ret;
212
213 ret = kstrtol(buf, 10, &temp);
214 if (ret < 0)
215 return ret;
216
217 mutex_lock(&data->update_lock);
218 data->temp_max[index] = SENSORS_LIMIT(temp/1000, -128, 127);
219 if (i2c_smbus_write_byte_data(client,
220 MAX1668_REG_LIMH_WR(index),
221 data->temp_max[index]))
222 count = -EIO;
223 mutex_unlock(&data->update_lock);
224
225 return count;
226}
227
228static ssize_t set_temp_min(struct device *dev,
229 struct device_attribute *devattr,
230 const char *buf, size_t count)
231{
232 int index = to_sensor_dev_attr(devattr)->index;
233 struct i2c_client *client = to_i2c_client(dev);
234 struct max1668_data *data = i2c_get_clientdata(client);
235 long temp;
236 int ret;
237
238 ret = kstrtol(buf, 10, &temp);
239 if (ret < 0)
240 return ret;
241
242 mutex_lock(&data->update_lock);
243 data->temp_min[index] = SENSORS_LIMIT(temp/1000, -128, 127);
244 if (i2c_smbus_write_byte_data(client,
245 MAX1668_REG_LIML_WR(index),
246 data->temp_max[index]))
247 count = -EIO;
248 mutex_unlock(&data->update_lock);
249
250 return count;
251}
252
253static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
254static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp_max,
255 set_temp_max, 0);
256static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO, show_temp_min,
257 set_temp_min, 0);
258static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
259static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO, show_temp_max,
260 set_temp_max, 1);
261static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO, show_temp_min,
262 set_temp_min, 1);
263static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
264static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO, show_temp_max,
265 set_temp_max, 2);
266static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO, show_temp_min,
267 set_temp_min, 2);
268static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3);
269static SENSOR_DEVICE_ATTR(temp4_max, S_IRUGO, show_temp_max,
270 set_temp_max, 3);
271static SENSOR_DEVICE_ATTR(temp4_min, S_IRUGO, show_temp_min,
272 set_temp_min, 3);
273static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_temp, NULL, 4);
274static SENSOR_DEVICE_ATTR(temp5_max, S_IRUGO, show_temp_max,
275 set_temp_max, 4);
276static SENSOR_DEVICE_ATTR(temp5_min, S_IRUGO, show_temp_min,
277 set_temp_min, 4);
278
279static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 14);
280static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 13);
281static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 7);
282static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 6);
283static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_alarm, NULL, 5);
284static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_alarm, NULL, 4);
285static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, show_alarm, NULL, 3);
286static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_alarm, NULL, 2);
287static SENSOR_DEVICE_ATTR(temp5_min_alarm, S_IRUGO, show_alarm, NULL, 1);
288static SENSOR_DEVICE_ATTR(temp5_max_alarm, S_IRUGO, show_alarm, NULL, 0);
289
290static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_fault, NULL, 1);
291static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_fault, NULL, 2);
292static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_fault, NULL, 3);
293static SENSOR_DEVICE_ATTR(temp5_fault, S_IRUGO, show_fault, NULL, 4);
294
295/* Attributes common to MAX1668, MAX1989 and MAX1805 */
296static struct attribute *max1668_attribute_common[] = {
297 &sensor_dev_attr_temp1_max.dev_attr.attr,
298 &sensor_dev_attr_temp1_min.dev_attr.attr,
299 &sensor_dev_attr_temp1_input.dev_attr.attr,
300 &sensor_dev_attr_temp2_max.dev_attr.attr,
301 &sensor_dev_attr_temp2_min.dev_attr.attr,
302 &sensor_dev_attr_temp2_input.dev_attr.attr,
303 &sensor_dev_attr_temp3_max.dev_attr.attr,
304 &sensor_dev_attr_temp3_min.dev_attr.attr,
305 &sensor_dev_attr_temp3_input.dev_attr.attr,
306
307 &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
308 &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
309 &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
310 &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
311 &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
312 &sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
313
314 &sensor_dev_attr_temp2_fault.dev_attr.attr,
315 &sensor_dev_attr_temp3_fault.dev_attr.attr,
316 NULL
317};
318
319/* Attributes not present on MAX1805 */
320static struct attribute *max1668_attribute_unique[] = {
321 &sensor_dev_attr_temp4_max.dev_attr.attr,
322 &sensor_dev_attr_temp4_min.dev_attr.attr,
323 &sensor_dev_attr_temp4_input.dev_attr.attr,
324 &sensor_dev_attr_temp5_max.dev_attr.attr,
325 &sensor_dev_attr_temp5_min.dev_attr.attr,
326 &sensor_dev_attr_temp5_input.dev_attr.attr,
327
328 &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
329 &sensor_dev_attr_temp4_min_alarm.dev_attr.attr,
330 &sensor_dev_attr_temp5_max_alarm.dev_attr.attr,
331 &sensor_dev_attr_temp5_min_alarm.dev_attr.attr,
332
333 &sensor_dev_attr_temp4_fault.dev_attr.attr,
334 &sensor_dev_attr_temp5_fault.dev_attr.attr,
335 NULL
336};
337
338static mode_t max1668_attribute_mode(struct kobject *kobj,
339 struct attribute *attr, int index)
340{
341 int ret = S_IRUGO;
342 if (read_only)
343 return ret;
344 if (attr == &sensor_dev_attr_temp1_max.dev_attr.attr ||
345 attr == &sensor_dev_attr_temp2_max.dev_attr.attr ||
346 attr == &sensor_dev_attr_temp3_max.dev_attr.attr ||
347 attr == &sensor_dev_attr_temp4_max.dev_attr.attr ||
348 attr == &sensor_dev_attr_temp5_max.dev_attr.attr ||
349 attr == &sensor_dev_attr_temp1_min.dev_attr.attr ||
350 attr == &sensor_dev_attr_temp2_min.dev_attr.attr ||
351 attr == &sensor_dev_attr_temp3_min.dev_attr.attr ||
352 attr == &sensor_dev_attr_temp4_min.dev_attr.attr ||
353 attr == &sensor_dev_attr_temp5_min.dev_attr.attr)
354 ret |= S_IWUSR;
355 return ret;
356}
357
358static const struct attribute_group max1668_group_common = {
359 .attrs = max1668_attribute_common,
360 .is_visible = max1668_attribute_mode
361};
362
363static const struct attribute_group max1668_group_unique = {
364 .attrs = max1668_attribute_unique,
365 .is_visible = max1668_attribute_mode
366};
367
368/* Return 0 if detection is successful, -ENODEV otherwise */
369static int max1668_detect(struct i2c_client *client,
370 struct i2c_board_info *info)
371{
372 struct i2c_adapter *adapter = client->adapter;
373 const char *type_name;
374 int man_id, dev_id;
375
376 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
377 return -ENODEV;
378
379 /* Check for unsupported part */
380 man_id = i2c_smbus_read_byte_data(client, MAX1668_REG_MAN_ID);
381 if (man_id != MAN_ID_MAXIM)
382 return -ENODEV;
383
384 dev_id = i2c_smbus_read_byte_data(client, MAX1668_REG_DEV_ID);
385 if (dev_id < 0)
386 return -ENODEV;
387
388 type_name = NULL;
389 if (dev_id == DEV_ID_MAX1668)
390 type_name = "max1668";
391 else if (dev_id == DEV_ID_MAX1805)
392 type_name = "max1805";
393 else if (dev_id == DEV_ID_MAX1989)
394 type_name = "max1989";
395
396 if (!type_name)
397 return -ENODEV;
398
399 strlcpy(info->type, type_name, I2C_NAME_SIZE);
400
401 return 0;
402}
403
404static int max1668_probe(struct i2c_client *client,
405 const struct i2c_device_id *id)
406{
407 struct i2c_adapter *adapter = client->adapter;
408 struct max1668_data *data;
409 int err;
410
411 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
412 return -ENODEV;
413
414 data = kzalloc(sizeof(struct max1668_data), GFP_KERNEL);
415 if (!data)
416 return -ENOMEM;
417
418 i2c_set_clientdata(client, data);
419 data->type = id->driver_data;
420 mutex_init(&data->update_lock);
421
422 /* Register sysfs hooks */
423 err = sysfs_create_group(&client->dev.kobj, &max1668_group_common);
424 if (err)
425 goto error_free;
426
427 if (data->type == max1668 || data->type == max1989) {
428 err = sysfs_create_group(&client->dev.kobj,
429 &max1668_group_unique);
430 if (err)
431 goto error_sysrem0;
432 }
433
434 data->hwmon_dev = hwmon_device_register(&client->dev);
435 if (IS_ERR(data->hwmon_dev)) {
436 err = PTR_ERR(data->hwmon_dev);
437 goto error_sysrem1;
438 }
439
440 return 0;
441
442error_sysrem1:
443 if (data->type == max1668 || data->type == max1989)
444 sysfs_remove_group(&client->dev.kobj, &max1668_group_unique);
445error_sysrem0:
446 sysfs_remove_group(&client->dev.kobj, &max1668_group_common);
447error_free:
448 kfree(data);
449 return err;
450}
451
452static int max1668_remove(struct i2c_client *client)
453{
454 struct max1668_data *data = i2c_get_clientdata(client);
455
456 hwmon_device_unregister(data->hwmon_dev);
457 if (data->type == max1668 || data->type == max1989)
458 sysfs_remove_group(&client->dev.kobj, &max1668_group_unique);
459
460 sysfs_remove_group(&client->dev.kobj, &max1668_group_common);
461
462 kfree(data);
463 return 0;
464}
465
466static const struct i2c_device_id max1668_id[] = {
467 { "max1668", max1668 },
468 { "max1805", max1805 },
469 { "max1989", max1989 },
470 { }
471};
472MODULE_DEVICE_TABLE(i2c, max1668_id);
473
474/* This is the driver that will be inserted */
475static struct i2c_driver max1668_driver = {
476 .class = I2C_CLASS_HWMON,
477 .driver = {
478 .name = "max1668",
479 },
480 .probe = max1668_probe,
481 .remove = max1668_remove,
482 .id_table = max1668_id,
483 .detect = max1668_detect,
484 .address_list = max1668_addr_list,
485};
486
487static int __init sensors_max1668_init(void)
488{
489 return i2c_add_driver(&max1668_driver);
490}
491
492static void __exit sensors_max1668_exit(void)
493{
494 i2c_del_driver(&max1668_driver);
495}
496
497MODULE_AUTHOR("David George <david.george@ska.ac.za>");
498MODULE_DESCRIPTION("MAX1668 remote temperature sensor driver");
499MODULE_LICENSE("GPL");
500
501module_init(sensors_max1668_init)
502module_exit(sensors_max1668_exit)
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
new file mode 100644
index 000000000000..d7926f4336b5
--- /dev/null
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -0,0 +1,453 @@
1/*
2 * ntc_thermistor.c - NTC Thermistors
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 * MyungJoo Ham <myungjoo.ham@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <linux/pm_runtime.h>
26#include <linux/math64.h>
27#include <linux/platform_device.h>
28#include <linux/err.h>
29
30#include <linux/platform_data/ntc_thermistor.h>
31
32#include <linux/hwmon.h>
33#include <linux/hwmon-sysfs.h>
34
35struct ntc_compensation {
36 int temp_C;
37 unsigned int ohm;
38};
39
40/*
41 * A compensation table should be sorted by the values of .ohm
42 * in descending order.
43 * The following compensation tables are from the specification of Murata NTC
44 * Thermistors Datasheet
45 */
46const struct ntc_compensation ncpXXwb473[] = {
47 { .temp_C = -40, .ohm = 1747920 },
48 { .temp_C = -35, .ohm = 1245428 },
49 { .temp_C = -30, .ohm = 898485 },
50 { .temp_C = -25, .ohm = 655802 },
51 { .temp_C = -20, .ohm = 483954 },
52 { .temp_C = -15, .ohm = 360850 },
53 { .temp_C = -10, .ohm = 271697 },
54 { .temp_C = -5, .ohm = 206463 },
55 { .temp_C = 0, .ohm = 158214 },
56 { .temp_C = 5, .ohm = 122259 },
57 { .temp_C = 10, .ohm = 95227 },
58 { .temp_C = 15, .ohm = 74730 },
59 { .temp_C = 20, .ohm = 59065 },
60 { .temp_C = 25, .ohm = 47000 },
61 { .temp_C = 30, .ohm = 37643 },
62 { .temp_C = 35, .ohm = 30334 },
63 { .temp_C = 40, .ohm = 24591 },
64 { .temp_C = 45, .ohm = 20048 },
65 { .temp_C = 50, .ohm = 16433 },
66 { .temp_C = 55, .ohm = 13539 },
67 { .temp_C = 60, .ohm = 11209 },
68 { .temp_C = 65, .ohm = 9328 },
69 { .temp_C = 70, .ohm = 7798 },
70 { .temp_C = 75, .ohm = 6544 },
71 { .temp_C = 80, .ohm = 5518 },
72 { .temp_C = 85, .ohm = 4674 },
73 { .temp_C = 90, .ohm = 3972 },
74 { .temp_C = 95, .ohm = 3388 },
75 { .temp_C = 100, .ohm = 2902 },
76 { .temp_C = 105, .ohm = 2494 },
77 { .temp_C = 110, .ohm = 2150 },
78 { .temp_C = 115, .ohm = 1860 },
79 { .temp_C = 120, .ohm = 1615 },
80 { .temp_C = 125, .ohm = 1406 },
81};
82const struct ntc_compensation ncpXXwl333[] = {
83 { .temp_C = -40, .ohm = 1610154 },
84 { .temp_C = -35, .ohm = 1130850 },
85 { .temp_C = -30, .ohm = 802609 },
86 { .temp_C = -25, .ohm = 575385 },
87 { .temp_C = -20, .ohm = 416464 },
88 { .temp_C = -15, .ohm = 304219 },
89 { .temp_C = -10, .ohm = 224193 },
90 { .temp_C = -5, .ohm = 166623 },
91 { .temp_C = 0, .ohm = 124850 },
92 { .temp_C = 5, .ohm = 94287 },
93 { .temp_C = 10, .ohm = 71747 },
94 { .temp_C = 15, .ohm = 54996 },
95 { .temp_C = 20, .ohm = 42455 },
96 { .temp_C = 25, .ohm = 33000 },
97 { .temp_C = 30, .ohm = 25822 },
98 { .temp_C = 35, .ohm = 20335 },
99 { .temp_C = 40, .ohm = 16115 },
100 { .temp_C = 45, .ohm = 12849 },
101 { .temp_C = 50, .ohm = 10306 },
102 { .temp_C = 55, .ohm = 8314 },
103 { .temp_C = 60, .ohm = 6746 },
104 { .temp_C = 65, .ohm = 5503 },
105 { .temp_C = 70, .ohm = 4513 },
106 { .temp_C = 75, .ohm = 3721 },
107 { .temp_C = 80, .ohm = 3084 },
108 { .temp_C = 85, .ohm = 2569 },
109 { .temp_C = 90, .ohm = 2151 },
110 { .temp_C = 95, .ohm = 1809 },
111 { .temp_C = 100, .ohm = 1529 },
112 { .temp_C = 105, .ohm = 1299 },
113 { .temp_C = 110, .ohm = 1108 },
114 { .temp_C = 115, .ohm = 949 },
115 { .temp_C = 120, .ohm = 817 },
116 { .temp_C = 125, .ohm = 707 },
117};
118
119struct ntc_data {
120 struct device *hwmon_dev;
121 struct ntc_thermistor_platform_data *pdata;
122 const struct ntc_compensation *comp;
123 struct device *dev;
124 int n_comp;
125 char name[PLATFORM_NAME_SIZE];
126};
127
128static inline u64 div64_u64_safe(u64 dividend, u64 divisor)
129{
130 if (divisor == 0 && dividend == 0)
131 return 0;
132 if (divisor == 0)
133 return UINT_MAX;
134 return div64_u64(dividend, divisor);
135}
136
137static unsigned int get_ohm_of_thermistor(struct ntc_data *data,
138 unsigned int uV)
139{
140 struct ntc_thermistor_platform_data *pdata = data->pdata;
141 u64 mV = uV / 1000;
142 u64 pmV = pdata->pullup_uV / 1000;
143 u64 N, puO, pdO;
144 puO = pdata->pullup_ohm;
145 pdO = pdata->pulldown_ohm;
146
147 if (mV == 0) {
148 if (pdata->connect == NTC_CONNECTED_POSITIVE)
149 return UINT_MAX;
150 return 0;
151 }
152 if (mV >= pmV)
153 return (pdata->connect == NTC_CONNECTED_POSITIVE) ?
154 0 : UINT_MAX;
155
156 if (pdata->connect == NTC_CONNECTED_POSITIVE && puO == 0)
157 N = div64_u64_safe(pdO * (pmV - mV), mV);
158 else if (pdata->connect == NTC_CONNECTED_GROUND && pdO == 0)
159 N = div64_u64_safe(puO * mV, pmV - mV);
160 else if (pdata->connect == NTC_CONNECTED_POSITIVE)
161 N = div64_u64_safe(pdO * puO * (pmV - mV),
162 puO * mV - pdO * (pmV - mV));
163 else
164 N = div64_u64_safe(pdO * puO * mV, pdO * (pmV - mV) - puO * mV);
165
166 return (unsigned int) N;
167}
168
169static int lookup_comp(struct ntc_data *data,
170 unsigned int ohm, int *i_low, int *i_high)
171{
172 int start, end, mid = -1;
173
174 /* Do a binary search on compensation table */
175 start = 0;
176 end = data->n_comp;
177
178 while (end > start) {
179 mid = start + (end - start) / 2;
180 if (data->comp[mid].ohm < ohm)
181 end = mid;
182 else if (data->comp[mid].ohm > ohm)
183 start = mid + 1;
184 else
185 break;
186 }
187
188 if (mid == 0) {
189 if (data->comp[mid].ohm > ohm) {
190 *i_high = mid;
191 *i_low = mid + 1;
192 return 0;
193 } else {
194 *i_low = mid;
195 *i_high = -1;
196 return -EINVAL;
197 }
198 }
199 if (mid == (data->n_comp - 1)) {
200 if (data->comp[mid].ohm <= ohm) {
201 *i_low = mid;
202 *i_high = mid - 1;
203 return 0;
204 } else {
205 *i_low = -1;
206 *i_high = mid;
207 return -EINVAL;
208 }
209 }
210
211 if (data->comp[mid].ohm <= ohm) {
212 *i_low = mid;
213 *i_high = mid - 1;
214 }
215 if (data->comp[mid].ohm > ohm) {
216 *i_low = mid + 1;
217 *i_high = mid;
218 }
219
220 return 0;
221}
222
223static int get_temp_mC(struct ntc_data *data, unsigned int ohm, int *temp)
224{
225 int low, high;
226 int ret;
227
228 ret = lookup_comp(data, ohm, &low, &high);
229 if (ret) {
230 /* Unable to use linear approximation */
231 if (low != -1)
232 *temp = data->comp[low].temp_C * 1000;
233 else if (high != -1)
234 *temp = data->comp[high].temp_C * 1000;
235 else
236 return ret;
237 } else {
238 *temp = data->comp[low].temp_C * 1000 +
239 ((data->comp[high].temp_C - data->comp[low].temp_C) *
240 1000 * ((int)ohm - (int)data->comp[low].ohm)) /
241 ((int)data->comp[high].ohm - (int)data->comp[low].ohm);
242 }
243
244 return 0;
245}
246
247static int ntc_thermistor_read(struct ntc_data *data, int *temp)
248{
249 int ret;
250 int read_ohm, read_uV;
251 unsigned int ohm = 0;
252
253 if (data->pdata->read_ohm) {
254 read_ohm = data->pdata->read_ohm();
255 if (read_ohm < 0)
256 return read_ohm;
257 ohm = (unsigned int)read_ohm;
258 }
259
260 if (data->pdata->read_uV) {
261 read_uV = data->pdata->read_uV();
262 if (read_uV < 0)
263 return read_uV;
264 ohm = get_ohm_of_thermistor(data, (unsigned int)read_uV);
265 }
266
267 ret = get_temp_mC(data, ohm, temp);
268 if (ret) {
269 dev_dbg(data->dev, "Sensor reading function not available.\n");
270 return ret;
271 }
272
273 return 0;
274}
275
276static ssize_t ntc_show_name(struct device *dev,
277 struct device_attribute *attr, char *buf)
278{
279 struct ntc_data *data = dev_get_drvdata(dev);
280
281 return sprintf(buf, "%s\n", data->name);
282}
283
284static ssize_t ntc_show_type(struct device *dev,
285 struct device_attribute *attr, char *buf)
286{
287 return sprintf(buf, "4\n");
288}
289
290static ssize_t ntc_show_temp(struct device *dev,
291 struct device_attribute *attr, char *buf)
292{
293 struct ntc_data *data = dev_get_drvdata(dev);
294 int temp, ret;
295
296 ret = ntc_thermistor_read(data, &temp);
297 if (ret)
298 return ret;
299 return sprintf(buf, "%d\n", temp);
300}
301
302static SENSOR_DEVICE_ATTR(temp1_type, S_IRUGO, ntc_show_type, NULL, 0);
303static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ntc_show_temp, NULL, 0);
304static DEVICE_ATTR(name, S_IRUGO, ntc_show_name, NULL);
305
306static struct attribute *ntc_attributes[] = {
307 &dev_attr_name.attr,
308 &sensor_dev_attr_temp1_type.dev_attr.attr,
309 &sensor_dev_attr_temp1_input.dev_attr.attr,
310 NULL,
311};
312
313static const struct attribute_group ntc_attr_group = {
314 .attrs = ntc_attributes,
315};
316
317static int __devinit ntc_thermistor_probe(struct platform_device *pdev)
318{
319 struct ntc_data *data;
320 struct ntc_thermistor_platform_data *pdata = pdev->dev.platform_data;
321 int ret = 0;
322
323 if (!pdata) {
324 dev_err(&pdev->dev, "No platform init data supplied.\n");
325 return -ENODEV;
326 }
327
328 /* Either one of the two is required. */
329 if (!pdata->read_uV && !pdata->read_ohm) {
330 dev_err(&pdev->dev, "Both read_uV and read_ohm missing."
331 "Need either one of the two.\n");
332 return -EINVAL;
333 }
334
335 if (pdata->read_uV && pdata->read_ohm) {
336 dev_warn(&pdev->dev, "Only one of read_uV and read_ohm "
337 "is needed; ignoring read_uV.\n");
338 pdata->read_uV = NULL;
339 }
340
341 if (pdata->read_uV && (pdata->pullup_uV == 0 ||
342 (pdata->pullup_ohm == 0 && pdata->connect ==
343 NTC_CONNECTED_GROUND) ||
344 (pdata->pulldown_ohm == 0 && pdata->connect ==
345 NTC_CONNECTED_POSITIVE) ||
346 (pdata->connect != NTC_CONNECTED_POSITIVE &&
347 pdata->connect != NTC_CONNECTED_GROUND))) {
348 dev_err(&pdev->dev, "Required data to use read_uV not "
349 "supplied.\n");
350 return -EINVAL;
351 }
352
353 data = kzalloc(sizeof(struct ntc_data), GFP_KERNEL);
354 if (!data)
355 return -ENOMEM;
356
357 data->dev = &pdev->dev;
358 data->pdata = pdata;
359 strncpy(data->name, pdev->id_entry->name, PLATFORM_NAME_SIZE);
360
361 switch (pdev->id_entry->driver_data) {
362 case TYPE_NCPXXWB473:
363 data->comp = ncpXXwb473;
364 data->n_comp = ARRAY_SIZE(ncpXXwb473);
365 break;
366 case TYPE_NCPXXWL333:
367 data->comp = ncpXXwl333;
368 data->n_comp = ARRAY_SIZE(ncpXXwl333);
369 break;
370 default:
371 dev_err(&pdev->dev, "Unknown device type: %lu(%s)\n",
372 pdev->id_entry->driver_data,
373 pdev->id_entry->name);
374 ret = -EINVAL;
375 goto err;
376 }
377
378 platform_set_drvdata(pdev, data);
379
380 ret = sysfs_create_group(&data->dev->kobj, &ntc_attr_group);
381 if (ret) {
382 dev_err(data->dev, "unable to create sysfs files\n");
383 goto err;
384 }
385
386 data->hwmon_dev = hwmon_device_register(data->dev);
387 if (IS_ERR_OR_NULL(data->hwmon_dev)) {
388 dev_err(data->dev, "unable to register as hwmon device.\n");
389 ret = -EINVAL;
390 goto err_after_sysfs;
391 }
392
393 dev_info(&pdev->dev, "Thermistor %s:%d (type: %s/%lu) successfully probed.\n",
394 pdev->name, pdev->id, pdev->id_entry->name,
395 pdev->id_entry->driver_data);
396 return 0;
397err_after_sysfs:
398 sysfs_remove_group(&data->dev->kobj, &ntc_attr_group);
399err:
400 kfree(data);
401 return ret;
402}
403
404static int __devexit ntc_thermistor_remove(struct platform_device *pdev)
405{
406 struct ntc_data *data = platform_get_drvdata(pdev);
407
408 hwmon_device_unregister(data->hwmon_dev);
409 sysfs_remove_group(&data->dev->kobj, &ntc_attr_group);
410 platform_set_drvdata(pdev, NULL);
411
412 kfree(data);
413
414 return 0;
415}
416
417static const struct platform_device_id ntc_thermistor_id[] = {
418 { "ncp15wb473", TYPE_NCPXXWB473 },
419 { "ncp18wb473", TYPE_NCPXXWB473 },
420 { "ncp21wb473", TYPE_NCPXXWB473 },
421 { "ncp03wb473", TYPE_NCPXXWB473 },
422 { "ncp15wl333", TYPE_NCPXXWL333 },
423 { },
424};
425
426static struct platform_driver ntc_thermistor_driver = {
427 .driver = {
428 .name = "ntc-thermistor",
429 .owner = THIS_MODULE,
430 },
431 .probe = ntc_thermistor_probe,
432 .remove = __devexit_p(ntc_thermistor_remove),
433 .id_table = ntc_thermistor_id,
434};
435
436static int __init ntc_thermistor_init(void)
437{
438 return platform_driver_register(&ntc_thermistor_driver);
439}
440
441module_init(ntc_thermistor_init);
442
443static void __exit ntc_thermistor_cleanup(void)
444{
445 platform_driver_unregister(&ntc_thermistor_driver);
446}
447
448module_exit(ntc_thermistor_cleanup);
449
450MODULE_DESCRIPTION("NTC Thermistor Driver");
451MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
452MODULE_LICENSE("GPL");
453MODULE_ALIAS("platform:ntc-thermistor");
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
new file mode 100644
index 000000000000..c9237b9dcff2
--- /dev/null
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -0,0 +1,100 @@
1#
2# PMBus chip drivers configuration
3#
4
5menuconfig PMBUS
6 tristate "PMBus support"
7 depends on I2C && EXPERIMENTAL
8 default n
9 help
10 Say yes here if you want to enable PMBus support.
11
12 This driver can also be built as a module. If so, the module will
13 be called pmbus_core.
14
15if PMBUS
16
17config SENSORS_PMBUS
18 tristate "Generic PMBus devices"
19 default y
20 help
21 If you say yes here you get hardware monitoring support for generic
22 PMBus devices, including but not limited to ADP4000, BMR450, BMR451,
23 BMR453, BMR454, LTC2978, NCP4200, and NCP4208.
24
25 This driver can also be built as a module. If so, the module will
26 be called pmbus.
27
28config SENSORS_ADM1275
29 tristate "Analog Devices ADM1275"
30 default n
31 help
32 If you say yes here you get hardware monitoring support for Analog
33 Devices ADM1275 Hot-Swap Controller and Digital Power Monitor.
34
35 This driver can also be built as a module. If so, the module will
36 be called adm1275.
37
38config SENSORS_LM25066
39 tristate "National Semiconductor LM25066 and compatibles"
40 default n
41 help
42 If you say yes here you get hardware monitoring support for National
43 Semiconductor LM25066, LM5064, and LM5066.
44
45 This driver can also be built as a module. If so, the module will
46 be called lm25066.
47
48config SENSORS_MAX16064
49 tristate "Maxim MAX16064"
50 default n
51 help
52 If you say yes here you get hardware monitoring support for Maxim
53 MAX16064.
54
55 This driver can also be built as a module. If so, the module will
56 be called max16064.
57
58config SENSORS_MAX34440
59 tristate "Maxim MAX34440/MAX34441"
60 default n
61 help
62 If you say yes here you get hardware monitoring support for Maxim
63 MAX34440 and MAX34441.
64
65 This driver can also be built as a module. If so, the module will
66 be called max34440.
67
68config SENSORS_MAX8688
69 tristate "Maxim MAX8688"
70 default n
71 help
72 If you say yes here you get hardware monitoring support for Maxim
73 MAX8688.
74
75 This driver can also be built as a module. If so, the module will
76 be called max8688.
77
78config SENSORS_UCD9000
79 tristate "TI UCD90120, UCD90124, UCD9090, UCD90910"
80 default n
81 help
82 If you say yes here you get hardware monitoring support for TI
83 UCD90120, UCD90124, UCD9090, UCD90910 Sequencer and System Health
84 Controllers.
85
86 This driver can also be built as a module. If so, the module will
87 be called ucd9000.
88
89config SENSORS_UCD9200
90 tristate "TI UCD9220, UCD9222, UCD9224, UCD9240, UCD9244, UCD9246, UCD9248"
91 default n
92 help
93 If you say yes here you get hardware monitoring support for TI
94 UCD9220, UCD9222, UCD9224, UCD9240, UCD9244, UCD9246, and UCD9248
95 Digital PWM System Controllers.
96
97 This driver can also be built as a module. If so, the module will
98 be called ucd9200.
99
100endif # PMBUS
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
new file mode 100644
index 000000000000..623eedb1ed9a
--- /dev/null
+++ b/drivers/hwmon/pmbus/Makefile
@@ -0,0 +1,13 @@
1#
2# Makefile for PMBus chip drivers.
3#
4
5obj-$(CONFIG_PMBUS) += pmbus_core.o
6obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
7obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o
8obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
9obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
10obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
11obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
12obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o
13obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o
diff --git a/drivers/hwmon/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 8bc1bd663721..c936e2782309 100644
--- a/drivers/hwmon/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -23,11 +23,68 @@
23#include <linux/i2c.h> 23#include <linux/i2c.h>
24#include "pmbus.h" 24#include "pmbus.h"
25 25
26#define ADM1275_PEAK_IOUT 0xd0
27#define ADM1275_PEAK_VIN 0xd1
28#define ADM1275_PEAK_VOUT 0xd2
26#define ADM1275_PMON_CONFIG 0xd4 29#define ADM1275_PMON_CONFIG 0xd4
27 30
28#define ADM1275_VIN_VOUT_SELECT (1 << 6) 31#define ADM1275_VIN_VOUT_SELECT (1 << 6)
29#define ADM1275_VRANGE (1 << 5) 32#define ADM1275_VRANGE (1 << 5)
30 33
34static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
35{
36 int ret;
37
38 if (page)
39 return -EINVAL;
40
41 switch (reg) {
42 case PMBUS_VIRT_READ_IOUT_MAX:
43 ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_IOUT);
44 break;
45 case PMBUS_VIRT_READ_VOUT_MAX:
46 ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_VOUT);
47 break;
48 case PMBUS_VIRT_READ_VIN_MAX:
49 ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_VIN);
50 break;
51 case PMBUS_VIRT_RESET_IOUT_HISTORY:
52 case PMBUS_VIRT_RESET_VOUT_HISTORY:
53 case PMBUS_VIRT_RESET_VIN_HISTORY:
54 ret = 0;
55 break;
56 default:
57 ret = -ENODATA;
58 break;
59 }
60 return ret;
61}
62
63static int adm1275_write_word_data(struct i2c_client *client, int page, int reg,
64 u16 word)
65{
66 int ret;
67
68 if (page)
69 return -EINVAL;
70
71 switch (reg) {
72 case PMBUS_VIRT_RESET_IOUT_HISTORY:
73 ret = pmbus_write_word_data(client, 0, ADM1275_PEAK_IOUT, 0);
74 break;
75 case PMBUS_VIRT_RESET_VOUT_HISTORY:
76 ret = pmbus_write_word_data(client, 0, ADM1275_PEAK_VOUT, 0);
77 break;
78 case PMBUS_VIRT_RESET_VIN_HISTORY:
79 ret = pmbus_write_word_data(client, 0, ADM1275_PEAK_VIN, 0);
80 break;
81 default:
82 ret = -ENODATA;
83 break;
84 }
85 return ret;
86}
87
31static int adm1275_probe(struct i2c_client *client, 88static int adm1275_probe(struct i2c_client *client,
32 const struct i2c_device_id *id) 89 const struct i2c_device_id *id)
33{ 90{
@@ -50,14 +107,17 @@ static int adm1275_probe(struct i2c_client *client,
50 } 107 }
51 108
52 info->pages = 1; 109 info->pages = 1;
53 info->direct[PSC_VOLTAGE_IN] = true; 110 info->format[PSC_VOLTAGE_IN] = direct;
54 info->direct[PSC_VOLTAGE_OUT] = true; 111 info->format[PSC_VOLTAGE_OUT] = direct;
55 info->direct[PSC_CURRENT_OUT] = true; 112 info->format[PSC_CURRENT_OUT] = direct;
56 info->m[PSC_CURRENT_OUT] = 807; 113 info->m[PSC_CURRENT_OUT] = 807;
57 info->b[PSC_CURRENT_OUT] = 20475; 114 info->b[PSC_CURRENT_OUT] = 20475;
58 info->R[PSC_CURRENT_OUT] = -1; 115 info->R[PSC_CURRENT_OUT] = -1;
59 info->func[0] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT; 116 info->func[0] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
60 117
118 info->read_word_data = adm1275_read_word_data;
119 info->write_word_data = adm1275_write_word_data;
120
61 if (config & ADM1275_VRANGE) { 121 if (config & ADM1275_VRANGE) {
62 info->m[PSC_VOLTAGE_IN] = 19199; 122 info->m[PSC_VOLTAGE_IN] = 19199;
63 info->b[PSC_VOLTAGE_IN] = 0; 123 info->b[PSC_VOLTAGE_IN] = 0;
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
new file mode 100644
index 000000000000..d4bc114572de
--- /dev/null
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -0,0 +1,340 @@
1/*
2 * Hardware monitoring driver for LM25066 / LM5064 / LM5066
3 *
4 * Copyright (c) 2011 Ericsson AB.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/err.h>
25#include <linux/slab.h>
26#include <linux/i2c.h>
27#include "pmbus.h"
28
29enum chips { lm25066, lm5064, lm5066 };
30
31#define LM25066_READ_VAUX 0xd0
32#define LM25066_MFR_READ_IIN 0xd1
33#define LM25066_MFR_READ_PIN 0xd2
34#define LM25066_MFR_IIN_OC_WARN_LIMIT 0xd3
35#define LM25066_MFR_PIN_OP_WARN_LIMIT 0xd4
36#define LM25066_READ_PIN_PEAK 0xd5
37#define LM25066_CLEAR_PIN_PEAK 0xd6
38#define LM25066_DEVICE_SETUP 0xd9
39#define LM25066_READ_AVG_VIN 0xdc
40#define LM25066_READ_AVG_VOUT 0xdd
41#define LM25066_READ_AVG_IIN 0xde
42#define LM25066_READ_AVG_PIN 0xdf
43
44#define LM25066_DEV_SETUP_CL (1 << 4) /* Current limit */
45
46struct lm25066_data {
47 int id;
48 struct pmbus_driver_info info;
49};
50
51#define to_lm25066_data(x) container_of(x, struct lm25066_data, info)
52
53static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
54{
55 const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
56 const struct lm25066_data *data = to_lm25066_data(info);
57 int ret;
58
59 if (page > 1)
60 return -EINVAL;
61
62 /* Map READ_VAUX into READ_VOUT register on page 1 */
63 if (page == 1) {
64 switch (reg) {
65 case PMBUS_READ_VOUT:
66 ret = pmbus_read_word_data(client, 0,
67 LM25066_READ_VAUX);
68 if (ret < 0)
69 break;
70 /* Adjust returned value to match VOUT coefficients */
71 switch (data->id) {
72 case lm25066:
73 /* VOUT: 4.54 mV VAUX: 283.2 uV LSB */
74 ret = DIV_ROUND_CLOSEST(ret * 2832, 45400);
75 break;
76 case lm5064:
77 /* VOUT: 4.53 mV VAUX: 700 uV LSB */
78 ret = DIV_ROUND_CLOSEST(ret * 70, 453);
79 break;
80 case lm5066:
81 /* VOUT: 2.18 mV VAUX: 725 uV LSB */
82 ret = DIV_ROUND_CLOSEST(ret * 725, 2180);
83 break;
84 }
85 break;
86 default:
87 /* No other valid registers on page 1 */
88 ret = -EINVAL;
89 break;
90 }
91 goto done;
92 }
93
94 switch (reg) {
95 case PMBUS_READ_IIN:
96 ret = pmbus_read_word_data(client, 0, LM25066_MFR_READ_IIN);
97 break;
98 case PMBUS_READ_PIN:
99 ret = pmbus_read_word_data(client, 0, LM25066_MFR_READ_PIN);
100 break;
101 case PMBUS_IIN_OC_WARN_LIMIT:
102 ret = pmbus_read_word_data(client, 0,
103 LM25066_MFR_IIN_OC_WARN_LIMIT);
104 break;
105 case PMBUS_PIN_OP_WARN_LIMIT:
106 ret = pmbus_read_word_data(client, 0,
107 LM25066_MFR_PIN_OP_WARN_LIMIT);
108 break;
109 case PMBUS_VIRT_READ_VIN_AVG:
110 ret = pmbus_read_word_data(client, 0, LM25066_READ_AVG_VIN);
111 break;
112 case PMBUS_VIRT_READ_VOUT_AVG:
113 ret = pmbus_read_word_data(client, 0, LM25066_READ_AVG_VOUT);
114 break;
115 case PMBUS_VIRT_READ_IIN_AVG:
116 ret = pmbus_read_word_data(client, 0, LM25066_READ_AVG_IIN);
117 break;
118 case PMBUS_VIRT_READ_PIN_AVG:
119 ret = pmbus_read_word_data(client, 0, LM25066_READ_AVG_PIN);
120 break;
121 case PMBUS_VIRT_READ_PIN_MAX:
122 ret = pmbus_read_word_data(client, 0, LM25066_READ_PIN_PEAK);
123 break;
124 case PMBUS_VIRT_RESET_PIN_HISTORY:
125 ret = 0;
126 break;
127 default:
128 ret = -ENODATA;
129 break;
130 }
131done:
132 return ret;
133}
134
135static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
136 u16 word)
137{
138 int ret;
139
140 if (page > 1)
141 return -EINVAL;
142
143 switch (reg) {
144 case PMBUS_IIN_OC_WARN_LIMIT:
145 ret = pmbus_write_word_data(client, 0,
146 LM25066_MFR_IIN_OC_WARN_LIMIT,
147 word);
148 break;
149 case PMBUS_PIN_OP_WARN_LIMIT:
150 ret = pmbus_write_word_data(client, 0,
151 LM25066_MFR_PIN_OP_WARN_LIMIT,
152 word);
153 break;
154 case PMBUS_VIRT_RESET_PIN_HISTORY:
155 ret = pmbus_write_byte(client, 0, LM25066_CLEAR_PIN_PEAK);
156 break;
157 default:
158 ret = -ENODATA;
159 break;
160 }
161 return ret;
162}
163
164static int lm25066_probe(struct i2c_client *client,
165 const struct i2c_device_id *id)
166{
167 int config;
168 int ret;
169 struct lm25066_data *data;
170 struct pmbus_driver_info *info;
171
172 if (!i2c_check_functionality(client->adapter,
173 I2C_FUNC_SMBUS_READ_BYTE_DATA))
174 return -ENODEV;
175
176 data = kzalloc(sizeof(struct lm25066_data), GFP_KERNEL);
177 if (!data)
178 return -ENOMEM;
179
180 config = i2c_smbus_read_byte_data(client, LM25066_DEVICE_SETUP);
181 if (config < 0) {
182 ret = config;
183 goto err_mem;
184 }
185
186 data->id = id->driver_data;
187 info = &data->info;
188
189 info->pages = 2;
190 info->format[PSC_VOLTAGE_IN] = direct;
191 info->format[PSC_VOLTAGE_OUT] = direct;
192 info->format[PSC_CURRENT_IN] = direct;
193 info->format[PSC_TEMPERATURE] = direct;
194 info->format[PSC_POWER] = direct;
195
196 info->m[PSC_TEMPERATURE] = 16;
197 info->b[PSC_TEMPERATURE] = 0;
198 info->R[PSC_TEMPERATURE] = 0;
199
200 info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT
201 | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_PIN | PMBUS_HAVE_IIN
202 | PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
203 info->func[1] = PMBUS_HAVE_VOUT;
204
205 info->read_word_data = lm25066_read_word_data;
206 info->write_word_data = lm25066_write_word_data;
207
208 switch (id->driver_data) {
209 case lm25066:
210 info->m[PSC_VOLTAGE_IN] = 22070;
211 info->b[PSC_VOLTAGE_IN] = 0;
212 info->R[PSC_VOLTAGE_IN] = -2;
213 info->m[PSC_VOLTAGE_OUT] = 22070;
214 info->b[PSC_VOLTAGE_OUT] = 0;
215 info->R[PSC_VOLTAGE_OUT] = -2;
216
217 if (config & LM25066_DEV_SETUP_CL) {
218 info->m[PSC_CURRENT_IN] = 6852;
219 info->b[PSC_CURRENT_IN] = 0;
220 info->R[PSC_CURRENT_IN] = -2;
221 info->m[PSC_POWER] = 369;
222 info->b[PSC_POWER] = 0;
223 info->R[PSC_POWER] = -2;
224 } else {
225 info->m[PSC_CURRENT_IN] = 13661;
226 info->b[PSC_CURRENT_IN] = 0;
227 info->R[PSC_CURRENT_IN] = -2;
228 info->m[PSC_POWER] = 736;
229 info->b[PSC_POWER] = 0;
230 info->R[PSC_POWER] = -2;
231 }
232 break;
233 case lm5064:
234 info->m[PSC_VOLTAGE_IN] = 22075;
235 info->b[PSC_VOLTAGE_IN] = 0;
236 info->R[PSC_VOLTAGE_IN] = -2;
237 info->m[PSC_VOLTAGE_OUT] = 22075;
238 info->b[PSC_VOLTAGE_OUT] = 0;
239 info->R[PSC_VOLTAGE_OUT] = -2;
240
241 if (config & LM25066_DEV_SETUP_CL) {
242 info->m[PSC_CURRENT_IN] = 6713;
243 info->b[PSC_CURRENT_IN] = 0;
244 info->R[PSC_CURRENT_IN] = -2;
245 info->m[PSC_POWER] = 3619;
246 info->b[PSC_POWER] = 0;
247 info->R[PSC_POWER] = -3;
248 } else {
249 info->m[PSC_CURRENT_IN] = 13426;
250 info->b[PSC_CURRENT_IN] = 0;
251 info->R[PSC_CURRENT_IN] = -2;
252 info->m[PSC_POWER] = 7238;
253 info->b[PSC_POWER] = 0;
254 info->R[PSC_POWER] = -3;
255 }
256 break;
257 case lm5066:
258 info->m[PSC_VOLTAGE_IN] = 4587;
259 info->b[PSC_VOLTAGE_IN] = 0;
260 info->R[PSC_VOLTAGE_IN] = -2;
261 info->m[PSC_VOLTAGE_OUT] = 4587;
262 info->b[PSC_VOLTAGE_OUT] = 0;
263 info->R[PSC_VOLTAGE_OUT] = -2;
264
265 if (config & LM25066_DEV_SETUP_CL) {
266 info->m[PSC_CURRENT_IN] = 10753;
267 info->b[PSC_CURRENT_IN] = 0;
268 info->R[PSC_CURRENT_IN] = -2;
269 info->m[PSC_POWER] = 1204;
270 info->b[PSC_POWER] = 0;
271 info->R[PSC_POWER] = -3;
272 } else {
273 info->m[PSC_CURRENT_IN] = 5405;
274 info->b[PSC_CURRENT_IN] = 0;
275 info->R[PSC_CURRENT_IN] = -2;
276 info->m[PSC_POWER] = 605;
277 info->b[PSC_POWER] = 0;
278 info->R[PSC_POWER] = -3;
279 }
280 break;
281 default:
282 ret = -ENODEV;
283 goto err_mem;
284 }
285
286 ret = pmbus_do_probe(client, id, info);
287 if (ret)
288 goto err_mem;
289 return 0;
290
291err_mem:
292 kfree(data);
293 return ret;
294}
295
296static int lm25066_remove(struct i2c_client *client)
297{
298 const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
299 const struct lm25066_data *data = to_lm25066_data(info);
300 int ret;
301
302 ret = pmbus_do_remove(client);
303 kfree(data);
304 return ret;
305}
306
307static const struct i2c_device_id lm25066_id[] = {
308 {"lm25066", lm25066},
309 {"lm5064", lm5064},
310 {"lm5066", lm5066},
311 { }
312};
313
314MODULE_DEVICE_TABLE(i2c, lm25066_id);
315
316/* This is the driver that will be inserted */
317static struct i2c_driver lm25066_driver = {
318 .driver = {
319 .name = "lm25066",
320 },
321 .probe = lm25066_probe,
322 .remove = lm25066_remove,
323 .id_table = lm25066_id,
324};
325
326static int __init lm25066_init(void)
327{
328 return i2c_add_driver(&lm25066_driver);
329}
330
331static void __exit lm25066_exit(void)
332{
333 i2c_del_driver(&lm25066_driver);
334}
335
336MODULE_AUTHOR("Guenter Roeck");
337MODULE_DESCRIPTION("PMBus driver for LM25066/LM5064/LM5066");
338MODULE_LICENSE("GPL");
339module_init(lm25066_init);
340module_exit(lm25066_exit);
diff --git a/drivers/hwmon/max16064.c b/drivers/hwmon/pmbus/max16064.c
index 1d6d717060d3..e50b296e8db4 100644
--- a/drivers/hwmon/max16064.c
+++ b/drivers/hwmon/pmbus/max16064.c
@@ -25,11 +25,60 @@
25#include <linux/i2c.h> 25#include <linux/i2c.h>
26#include "pmbus.h" 26#include "pmbus.h"
27 27
28#define MAX16064_MFR_VOUT_PEAK 0xd4
29#define MAX16064_MFR_TEMPERATURE_PEAK 0xd6
30
31static int max16064_read_word_data(struct i2c_client *client, int page, int reg)
32{
33 int ret;
34
35 switch (reg) {
36 case PMBUS_VIRT_READ_VOUT_MAX:
37 ret = pmbus_read_word_data(client, page,
38 MAX16064_MFR_VOUT_PEAK);
39 break;
40 case PMBUS_VIRT_READ_TEMP_MAX:
41 ret = pmbus_read_word_data(client, page,
42 MAX16064_MFR_TEMPERATURE_PEAK);
43 break;
44 case PMBUS_VIRT_RESET_VOUT_HISTORY:
45 case PMBUS_VIRT_RESET_TEMP_HISTORY:
46 ret = 0;
47 break;
48 default:
49 ret = -ENODATA;
50 break;
51 }
52 return ret;
53}
54
55static int max16064_write_word_data(struct i2c_client *client, int page,
56 int reg, u16 word)
57{
58 int ret;
59
60 switch (reg) {
61 case PMBUS_VIRT_RESET_VOUT_HISTORY:
62 ret = pmbus_write_word_data(client, page,
63 MAX16064_MFR_VOUT_PEAK, 0);
64 break;
65 case PMBUS_VIRT_RESET_TEMP_HISTORY:
66 ret = pmbus_write_word_data(client, page,
67 MAX16064_MFR_TEMPERATURE_PEAK,
68 0xffff);
69 break;
70 default:
71 ret = -ENODATA;
72 break;
73 }
74 return ret;
75}
76
28static struct pmbus_driver_info max16064_info = { 77static struct pmbus_driver_info max16064_info = {
29 .pages = 4, 78 .pages = 4,
30 .direct[PSC_VOLTAGE_IN] = true, 79 .format[PSC_VOLTAGE_IN] = direct,
31 .direct[PSC_VOLTAGE_OUT] = true, 80 .format[PSC_VOLTAGE_OUT] = direct,
32 .direct[PSC_TEMPERATURE] = true, 81 .format[PSC_TEMPERATURE] = direct,
33 .m[PSC_VOLTAGE_IN] = 19995, 82 .m[PSC_VOLTAGE_IN] = 19995,
34 .b[PSC_VOLTAGE_IN] = 0, 83 .b[PSC_VOLTAGE_IN] = 0,
35 .R[PSC_VOLTAGE_IN] = -1, 84 .R[PSC_VOLTAGE_IN] = -1,
@@ -44,6 +93,8 @@ static struct pmbus_driver_info max16064_info = {
44 .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT, 93 .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
45 .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT, 94 .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
46 .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT, 95 .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
96 .read_word_data = max16064_read_word_data,
97 .write_word_data = max16064_write_word_data,
47}; 98};
48 99
49static int max16064_probe(struct i2c_client *client, 100static int max16064_probe(struct i2c_client *client,
diff --git a/drivers/hwmon/max34440.c b/drivers/hwmon/pmbus/max34440.c
index db11e1a175b2..fda621d2e458 100644
--- a/drivers/hwmon/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -27,11 +27,70 @@
27 27
28enum chips { max34440, max34441 }; 28enum chips { max34440, max34441 };
29 29
30#define MAX34440_MFR_VOUT_PEAK 0xd4
31#define MAX34440_MFR_IOUT_PEAK 0xd5
32#define MAX34440_MFR_TEMPERATURE_PEAK 0xd6
33
30#define MAX34440_STATUS_OC_WARN (1 << 0) 34#define MAX34440_STATUS_OC_WARN (1 << 0)
31#define MAX34440_STATUS_OC_FAULT (1 << 1) 35#define MAX34440_STATUS_OC_FAULT (1 << 1)
32#define MAX34440_STATUS_OT_FAULT (1 << 5) 36#define MAX34440_STATUS_OT_FAULT (1 << 5)
33#define MAX34440_STATUS_OT_WARN (1 << 6) 37#define MAX34440_STATUS_OT_WARN (1 << 6)
34 38
39static int max34440_read_word_data(struct i2c_client *client, int page, int reg)
40{
41 int ret;
42
43 switch (reg) {
44 case PMBUS_VIRT_READ_VOUT_MAX:
45 ret = pmbus_read_word_data(client, page,
46 MAX34440_MFR_VOUT_PEAK);
47 break;
48 case PMBUS_VIRT_READ_IOUT_MAX:
49 ret = pmbus_read_word_data(client, page,
50 MAX34440_MFR_IOUT_PEAK);
51 break;
52 case PMBUS_VIRT_READ_TEMP_MAX:
53 ret = pmbus_read_word_data(client, page,
54 MAX34440_MFR_TEMPERATURE_PEAK);
55 break;
56 case PMBUS_VIRT_RESET_VOUT_HISTORY:
57 case PMBUS_VIRT_RESET_IOUT_HISTORY:
58 case PMBUS_VIRT_RESET_TEMP_HISTORY:
59 ret = 0;
60 break;
61 default:
62 ret = -ENODATA;
63 break;
64 }
65 return ret;
66}
67
68static int max34440_write_word_data(struct i2c_client *client, int page,
69 int reg, u16 word)
70{
71 int ret;
72
73 switch (reg) {
74 case PMBUS_VIRT_RESET_VOUT_HISTORY:
75 ret = pmbus_write_word_data(client, page,
76 MAX34440_MFR_VOUT_PEAK, 0);
77 break;
78 case PMBUS_VIRT_RESET_IOUT_HISTORY:
79 ret = pmbus_write_word_data(client, page,
80 MAX34440_MFR_IOUT_PEAK, 0);
81 break;
82 case PMBUS_VIRT_RESET_TEMP_HISTORY:
83 ret = pmbus_write_word_data(client, page,
84 MAX34440_MFR_TEMPERATURE_PEAK,
85 0xffff);
86 break;
87 default:
88 ret = -ENODATA;
89 break;
90 }
91 return ret;
92}
93
35static int max34440_read_byte_data(struct i2c_client *client, int page, int reg) 94static int max34440_read_byte_data(struct i2c_client *client, int page, int reg)
36{ 95{
37 int ret; 96 int ret;
@@ -72,10 +131,10 @@ static int max34440_read_byte_data(struct i2c_client *client, int page, int reg)
72static struct pmbus_driver_info max34440_info[] = { 131static struct pmbus_driver_info max34440_info[] = {
73 [max34440] = { 132 [max34440] = {
74 .pages = 14, 133 .pages = 14,
75 .direct[PSC_VOLTAGE_IN] = true, 134 .format[PSC_VOLTAGE_IN] = direct,
76 .direct[PSC_VOLTAGE_OUT] = true, 135 .format[PSC_VOLTAGE_OUT] = direct,
77 .direct[PSC_TEMPERATURE] = true, 136 .format[PSC_TEMPERATURE] = direct,
78 .direct[PSC_CURRENT_OUT] = true, 137 .format[PSC_CURRENT_OUT] = direct,
79 .m[PSC_VOLTAGE_IN] = 1, 138 .m[PSC_VOLTAGE_IN] = 1,
80 .b[PSC_VOLTAGE_IN] = 0, 139 .b[PSC_VOLTAGE_IN] = 0,
81 .R[PSC_VOLTAGE_IN] = 3, /* R = 0 in datasheet reflects mV */ 140 .R[PSC_VOLTAGE_IN] = 3, /* R = 0 in datasheet reflects mV */
@@ -109,14 +168,16 @@ static struct pmbus_driver_info max34440_info[] = {
109 .func[12] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, 168 .func[12] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
110 .func[13] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, 169 .func[13] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
111 .read_byte_data = max34440_read_byte_data, 170 .read_byte_data = max34440_read_byte_data,
171 .read_word_data = max34440_read_word_data,
172 .write_word_data = max34440_write_word_data,
112 }, 173 },
113 [max34441] = { 174 [max34441] = {
114 .pages = 12, 175 .pages = 12,
115 .direct[PSC_VOLTAGE_IN] = true, 176 .format[PSC_VOLTAGE_IN] = direct,
116 .direct[PSC_VOLTAGE_OUT] = true, 177 .format[PSC_VOLTAGE_OUT] = direct,
117 .direct[PSC_TEMPERATURE] = true, 178 .format[PSC_TEMPERATURE] = direct,
118 .direct[PSC_CURRENT_OUT] = true, 179 .format[PSC_CURRENT_OUT] = direct,
119 .direct[PSC_FAN] = true, 180 .format[PSC_FAN] = direct,
120 .m[PSC_VOLTAGE_IN] = 1, 181 .m[PSC_VOLTAGE_IN] = 1,
121 .b[PSC_VOLTAGE_IN] = 0, 182 .b[PSC_VOLTAGE_IN] = 0,
122 .R[PSC_VOLTAGE_IN] = 3, 183 .R[PSC_VOLTAGE_IN] = 3,
@@ -150,6 +211,8 @@ static struct pmbus_driver_info max34440_info[] = {
150 .func[10] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, 211 .func[10] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
151 .func[11] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, 212 .func[11] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
152 .read_byte_data = max34440_read_byte_data, 213 .read_byte_data = max34440_read_byte_data,
214 .read_word_data = max34440_read_word_data,
215 .write_word_data = max34440_write_word_data,
153 }, 216 },
154}; 217};
155 218
diff --git a/drivers/hwmon/max8688.c b/drivers/hwmon/pmbus/max8688.c
index 7fb93f4e9f21..c3e72f1a3cfb 100644
--- a/drivers/hwmon/max8688.c
+++ b/drivers/hwmon/pmbus/max8688.c
@@ -25,6 +25,9 @@
25#include <linux/i2c.h> 25#include <linux/i2c.h>
26#include "pmbus.h" 26#include "pmbus.h"
27 27
28#define MAX8688_MFR_VOUT_PEAK 0xd4
29#define MAX8688_MFR_IOUT_PEAK 0xd5
30#define MAX8688_MFR_TEMPERATURE_PEAK 0xd6
28#define MAX8688_MFG_STATUS 0xd8 31#define MAX8688_MFG_STATUS 0xd8
29 32
30#define MAX8688_STATUS_OC_FAULT (1 << 4) 33#define MAX8688_STATUS_OC_FAULT (1 << 4)
@@ -37,6 +40,62 @@
37#define MAX8688_STATUS_OT_FAULT (1 << 13) 40#define MAX8688_STATUS_OT_FAULT (1 << 13)
38#define MAX8688_STATUS_OT_WARNING (1 << 14) 41#define MAX8688_STATUS_OT_WARNING (1 << 14)
39 42
43static int max8688_read_word_data(struct i2c_client *client, int page, int reg)
44{
45 int ret;
46
47 if (page)
48 return -EINVAL;
49
50 switch (reg) {
51 case PMBUS_VIRT_READ_VOUT_MAX:
52 ret = pmbus_read_word_data(client, 0, MAX8688_MFR_VOUT_PEAK);
53 break;
54 case PMBUS_VIRT_READ_IOUT_MAX:
55 ret = pmbus_read_word_data(client, 0, MAX8688_MFR_IOUT_PEAK);
56 break;
57 case PMBUS_VIRT_READ_TEMP_MAX:
58 ret = pmbus_read_word_data(client, 0,
59 MAX8688_MFR_TEMPERATURE_PEAK);
60 break;
61 case PMBUS_VIRT_RESET_VOUT_HISTORY:
62 case PMBUS_VIRT_RESET_IOUT_HISTORY:
63 case PMBUS_VIRT_RESET_TEMP_HISTORY:
64 ret = 0;
65 break;
66 default:
67 ret = -ENODATA;
68 break;
69 }
70 return ret;
71}
72
73static int max8688_write_word_data(struct i2c_client *client, int page, int reg,
74 u16 word)
75{
76 int ret;
77
78 switch (reg) {
79 case PMBUS_VIRT_RESET_VOUT_HISTORY:
80 ret = pmbus_write_word_data(client, 0, MAX8688_MFR_VOUT_PEAK,
81 0);
82 break;
83 case PMBUS_VIRT_RESET_IOUT_HISTORY:
84 ret = pmbus_write_word_data(client, 0, MAX8688_MFR_IOUT_PEAK,
85 0);
86 break;
87 case PMBUS_VIRT_RESET_TEMP_HISTORY:
88 ret = pmbus_write_word_data(client, 0,
89 MAX8688_MFR_TEMPERATURE_PEAK,
90 0xffff);
91 break;
92 default:
93 ret = -ENODATA;
94 break;
95 }
96 return ret;
97}
98
40static int max8688_read_byte_data(struct i2c_client *client, int page, int reg) 99static int max8688_read_byte_data(struct i2c_client *client, int page, int reg)
41{ 100{
42 int ret = 0; 101 int ret = 0;
@@ -91,10 +150,10 @@ static int max8688_read_byte_data(struct i2c_client *client, int page, int reg)
91 150
92static struct pmbus_driver_info max8688_info = { 151static struct pmbus_driver_info max8688_info = {
93 .pages = 1, 152 .pages = 1,
94 .direct[PSC_VOLTAGE_IN] = true, 153 .format[PSC_VOLTAGE_IN] = direct,
95 .direct[PSC_VOLTAGE_OUT] = true, 154 .format[PSC_VOLTAGE_OUT] = direct,
96 .direct[PSC_TEMPERATURE] = true, 155 .format[PSC_TEMPERATURE] = direct,
97 .direct[PSC_CURRENT_OUT] = true, 156 .format[PSC_CURRENT_OUT] = direct,
98 .m[PSC_VOLTAGE_IN] = 19995, 157 .m[PSC_VOLTAGE_IN] = 19995,
99 .b[PSC_VOLTAGE_IN] = 0, 158 .b[PSC_VOLTAGE_IN] = 0,
100 .R[PSC_VOLTAGE_IN] = -1, 159 .R[PSC_VOLTAGE_IN] = -1,
@@ -111,6 +170,8 @@ static struct pmbus_driver_info max8688_info = {
111 | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT 170 | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT
112 | PMBUS_HAVE_STATUS_TEMP, 171 | PMBUS_HAVE_STATUS_TEMP,
113 .read_byte_data = max8688_read_byte_data, 172 .read_byte_data = max8688_read_byte_data,
173 .read_word_data = max8688_read_word_data,
174 .write_word_data = max8688_write_word_data,
114}; 175};
115 176
116static int max8688_probe(struct i2c_client *client, 177static int max8688_probe(struct i2c_client *client,
diff --git a/drivers/hwmon/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index 9b1f0c37ef77..73de9f1f3194 100644
--- a/drivers/hwmon/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -96,6 +96,8 @@ static void pmbus_find_sensor_groups(struct i2c_client *client,
96static int pmbus_identify(struct i2c_client *client, 96static int pmbus_identify(struct i2c_client *client,
97 struct pmbus_driver_info *info) 97 struct pmbus_driver_info *info)
98{ 98{
99 int ret = 0;
100
99 if (!info->pages) { 101 if (!info->pages) {
100 /* 102 /*
101 * Check if the PAGE command is supported. If it is, 103 * Check if the PAGE command is supported. If it is,
@@ -117,6 +119,27 @@ static int pmbus_identify(struct i2c_client *client,
117 } 119 }
118 } 120 }
119 121
122 if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) {
123 int vout_mode;
124
125 vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
126 if (vout_mode >= 0 && vout_mode != 0xff) {
127 switch (vout_mode >> 5) {
128 case 0:
129 break;
130 case 1:
131 info->format[PSC_VOLTAGE_OUT] = vid;
132 break;
133 case 2:
134 info->format[PSC_VOLTAGE_OUT] = direct;
135 break;
136 default:
137 ret = -ENODEV;
138 goto abort;
139 }
140 }
141 }
142
120 /* 143 /*
121 * We should check if the COEFFICIENTS register is supported. 144 * We should check if the COEFFICIENTS register is supported.
122 * If it is, and the chip is configured for direct mode, we can read 145 * If it is, and the chip is configured for direct mode, we can read
@@ -125,13 +148,18 @@ static int pmbus_identify(struct i2c_client *client,
125 * 148 *
126 * To do this, we will need access to a chip which actually supports the 149 * To do this, we will need access to a chip which actually supports the
127 * COEFFICIENTS command, since the command is too complex to implement 150 * COEFFICIENTS command, since the command is too complex to implement
128 * without testing it. 151 * without testing it. Until then, abort if a chip configured for direct
152 * mode was detected.
129 */ 153 */
154 if (info->format[PSC_VOLTAGE_OUT] == direct) {
155 ret = -ENODEV;
156 goto abort;
157 }
130 158
131 /* Try to find sensor groups */ 159 /* Try to find sensor groups */
132 pmbus_find_sensor_groups(client, info); 160 pmbus_find_sensor_groups(client, info);
133 161abort:
134 return 0; 162 return ret;
135} 163}
136 164
137static int pmbus_probe(struct i2c_client *client, 165static int pmbus_probe(struct i2c_client *client,
@@ -172,11 +200,14 @@ static int pmbus_remove(struct i2c_client *client)
172 * Use driver_data to set the number of pages supported by the chip. 200 * Use driver_data to set the number of pages supported by the chip.
173 */ 201 */
174static const struct i2c_device_id pmbus_id[] = { 202static const struct i2c_device_id pmbus_id[] = {
203 {"adp4000", 1},
175 {"bmr450", 1}, 204 {"bmr450", 1},
176 {"bmr451", 1}, 205 {"bmr451", 1},
177 {"bmr453", 1}, 206 {"bmr453", 1},
178 {"bmr454", 1}, 207 {"bmr454", 1},
179 {"ltc2978", 8}, 208 {"ltc2978", 8},
209 {"ncp4200", 1},
210 {"ncp4208", 1},
180 {"pmbus", 0}, 211 {"pmbus", 0},
181 {} 212 {}
182}; 213};
diff --git a/drivers/hwmon/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index 50647ab7235a..0808d986d75b 100644
--- a/drivers/hwmon/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -126,6 +126,42 @@
126#define PMBUS_MFR_SERIAL 0x9E 126#define PMBUS_MFR_SERIAL 0x9E
127 127
128/* 128/*
129 * Virtual registers.
130 * Useful to support attributes which are not supported by standard PMBus
131 * registers but exist as manufacturer specific registers on individual chips.
132 * Must be mapped to real registers in device specific code.
133 *
134 * Semantics:
135 * Virtual registers are all word size.
136 * READ registers are read-only; writes are either ignored or return an error.
137 * RESET registers are read/write. Reading returns zero (used for detection),
138 * writing any value causes the associated history to be reset.
139 */
140#define PMBUS_VIRT_BASE 0x100
141#define PMBUS_VIRT_READ_TEMP_MIN (PMBUS_VIRT_BASE + 0)
142#define PMBUS_VIRT_READ_TEMP_MAX (PMBUS_VIRT_BASE + 1)
143#define PMBUS_VIRT_RESET_TEMP_HISTORY (PMBUS_VIRT_BASE + 2)
144#define PMBUS_VIRT_READ_VIN_AVG (PMBUS_VIRT_BASE + 3)
145#define PMBUS_VIRT_READ_VIN_MIN (PMBUS_VIRT_BASE + 4)
146#define PMBUS_VIRT_READ_VIN_MAX (PMBUS_VIRT_BASE + 5)
147#define PMBUS_VIRT_RESET_VIN_HISTORY (PMBUS_VIRT_BASE + 6)
148#define PMBUS_VIRT_READ_IIN_AVG (PMBUS_VIRT_BASE + 7)
149#define PMBUS_VIRT_READ_IIN_MIN (PMBUS_VIRT_BASE + 8)
150#define PMBUS_VIRT_READ_IIN_MAX (PMBUS_VIRT_BASE + 9)
151#define PMBUS_VIRT_RESET_IIN_HISTORY (PMBUS_VIRT_BASE + 10)
152#define PMBUS_VIRT_READ_PIN_AVG (PMBUS_VIRT_BASE + 11)
153#define PMBUS_VIRT_READ_PIN_MAX (PMBUS_VIRT_BASE + 12)
154#define PMBUS_VIRT_RESET_PIN_HISTORY (PMBUS_VIRT_BASE + 13)
155#define PMBUS_VIRT_READ_VOUT_AVG (PMBUS_VIRT_BASE + 14)
156#define PMBUS_VIRT_READ_VOUT_MIN (PMBUS_VIRT_BASE + 15)
157#define PMBUS_VIRT_READ_VOUT_MAX (PMBUS_VIRT_BASE + 16)
158#define PMBUS_VIRT_RESET_VOUT_HISTORY (PMBUS_VIRT_BASE + 17)
159#define PMBUS_VIRT_READ_IOUT_AVG (PMBUS_VIRT_BASE + 18)
160#define PMBUS_VIRT_READ_IOUT_MIN (PMBUS_VIRT_BASE + 19)
161#define PMBUS_VIRT_READ_IOUT_MAX (PMBUS_VIRT_BASE + 20)
162#define PMBUS_VIRT_RESET_IOUT_HISTORY (PMBUS_VIRT_BASE + 21)
163
164/*
129 * CAPABILITY 165 * CAPABILITY
130 */ 166 */
131#define PB_CAPABILITY_SMBALERT (1<<4) 167#define PB_CAPABILITY_SMBALERT (1<<4)
@@ -266,11 +302,11 @@ enum pmbus_sensor_classes {
266#define PMBUS_HAVE_STATUS_FAN12 (1 << 16) 302#define PMBUS_HAVE_STATUS_FAN12 (1 << 16)
267#define PMBUS_HAVE_STATUS_FAN34 (1 << 17) 303#define PMBUS_HAVE_STATUS_FAN34 (1 << 17)
268 304
305enum pmbus_data_format { linear = 0, direct, vid };
306
269struct pmbus_driver_info { 307struct pmbus_driver_info {
270 int pages; /* Total number of pages */ 308 int pages; /* Total number of pages */
271 bool direct[PSC_NUM_CLASSES]; 309 enum pmbus_data_format format[PSC_NUM_CLASSES];
272 /* true if device uses direct data format
273 for the given sensor class */
274 /* 310 /*
275 * Support one set of coefficients for each sensor type 311 * Support one set of coefficients for each sensor type
276 * Used for chips providing data in direct mode. 312 * Used for chips providing data in direct mode.
@@ -286,6 +322,9 @@ struct pmbus_driver_info {
286 * necessary. 322 * necessary.
287 */ 323 */
288 int (*read_byte_data)(struct i2c_client *client, int page, int reg); 324 int (*read_byte_data)(struct i2c_client *client, int page, int reg);
325 int (*read_word_data)(struct i2c_client *client, int page, int reg);
326 int (*write_word_data)(struct i2c_client *client, int page, int reg,
327 u16 word);
289 /* 328 /*
290 * The identify function determines supported PMBus functionality. 329 * The identify function determines supported PMBus functionality.
291 * This function is only necessary if a chip driver supports multiple 330 * This function is only necessary if a chip driver supports multiple
@@ -299,6 +338,9 @@ struct pmbus_driver_info {
299 338
300int pmbus_set_page(struct i2c_client *client, u8 page); 339int pmbus_set_page(struct i2c_client *client, u8 page);
301int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg); 340int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg);
341int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word);
342int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg);
343int pmbus_write_byte(struct i2c_client *client, int page, u8 value);
302void pmbus_clear_faults(struct i2c_client *client); 344void pmbus_clear_faults(struct i2c_client *client);
303bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg); 345bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg);
304bool pmbus_check_word_register(struct i2c_client *client, int page, int reg); 346bool pmbus_check_word_register(struct i2c_client *client, int page, int reg);
diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 8e31a8e2c746..5c1b6cf31701 100644
--- a/drivers/hwmon/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -33,14 +33,18 @@
33/* 33/*
34 * Constants needed to determine number of sensors, booleans, and labels. 34 * Constants needed to determine number of sensors, booleans, and labels.
35 */ 35 */
36#define PMBUS_MAX_INPUT_SENSORS 11 /* 6*volt, 3*curr, 2*power */ 36#define PMBUS_MAX_INPUT_SENSORS 22 /* 10*volt, 7*curr, 5*power */
37#define PMBUS_VOUT_SENSORS_PER_PAGE 5 /* input, min, max, lcrit, 37#define PMBUS_VOUT_SENSORS_PER_PAGE 9 /* input, min, max, lcrit,
38 crit */ 38 crit, lowest, highest, avg,
39#define PMBUS_IOUT_SENSORS_PER_PAGE 4 /* input, min, max, crit */ 39 reset */
40#define PMBUS_IOUT_SENSORS_PER_PAGE 8 /* input, min, max, crit,
41 lowest, highest, avg,
42 reset */
40#define PMBUS_POUT_SENSORS_PER_PAGE 4 /* input, cap, max, crit */ 43#define PMBUS_POUT_SENSORS_PER_PAGE 4 /* input, cap, max, crit */
41#define PMBUS_MAX_SENSORS_PER_FAN 1 /* input */ 44#define PMBUS_MAX_SENSORS_PER_FAN 1 /* input */
42#define PMBUS_MAX_SENSORS_PER_TEMP 5 /* input, min, max, lcrit, 45#define PMBUS_MAX_SENSORS_PER_TEMP 8 /* input, min, max, lcrit,
43 crit */ 46 crit, lowest, highest,
47 reset */
44 48
45#define PMBUS_MAX_INPUT_BOOLEANS 7 /* v: min_alarm, max_alarm, 49#define PMBUS_MAX_INPUT_BOOLEANS 7 /* v: min_alarm, max_alarm,
46 lcrit_alarm, crit_alarm; 50 lcrit_alarm, crit_alarm;
@@ -74,11 +78,13 @@
74#define PB_STATUS_INPUT_BASE (PB_STATUS_FAN34_BASE + PMBUS_PAGES) 78#define PB_STATUS_INPUT_BASE (PB_STATUS_FAN34_BASE + PMBUS_PAGES)
75#define PB_STATUS_TEMP_BASE (PB_STATUS_INPUT_BASE + 1) 79#define PB_STATUS_TEMP_BASE (PB_STATUS_INPUT_BASE + 1)
76 80
81#define PMBUS_NAME_SIZE 24
82
77struct pmbus_sensor { 83struct pmbus_sensor {
78 char name[I2C_NAME_SIZE]; /* sysfs sensor name */ 84 char name[PMBUS_NAME_SIZE]; /* sysfs sensor name */
79 struct sensor_device_attribute attribute; 85 struct sensor_device_attribute attribute;
80 u8 page; /* page number */ 86 u8 page; /* page number */
81 u8 reg; /* register */ 87 u16 reg; /* register */
82 enum pmbus_sensor_classes class; /* sensor class */ 88 enum pmbus_sensor_classes class; /* sensor class */
83 bool update; /* runtime sensor update needed */ 89 bool update; /* runtime sensor update needed */
84 int data; /* Sensor data. 90 int data; /* Sensor data.
@@ -86,14 +92,14 @@ struct pmbus_sensor {
86}; 92};
87 93
88struct pmbus_boolean { 94struct pmbus_boolean {
89 char name[I2C_NAME_SIZE]; /* sysfs boolean name */ 95 char name[PMBUS_NAME_SIZE]; /* sysfs boolean name */
90 struct sensor_device_attribute attribute; 96 struct sensor_device_attribute attribute;
91}; 97};
92 98
93struct pmbus_label { 99struct pmbus_label {
94 char name[I2C_NAME_SIZE]; /* sysfs label name */ 100 char name[PMBUS_NAME_SIZE]; /* sysfs label name */
95 struct sensor_device_attribute attribute; 101 struct sensor_device_attribute attribute;
96 char label[I2C_NAME_SIZE]; /* label */ 102 char label[PMBUS_NAME_SIZE]; /* label */
97}; 103};
98 104
99struct pmbus_data { 105struct pmbus_data {
@@ -162,19 +168,21 @@ int pmbus_set_page(struct i2c_client *client, u8 page)
162} 168}
163EXPORT_SYMBOL_GPL(pmbus_set_page); 169EXPORT_SYMBOL_GPL(pmbus_set_page);
164 170
165static int pmbus_write_byte(struct i2c_client *client, u8 page, u8 value) 171int pmbus_write_byte(struct i2c_client *client, int page, u8 value)
166{ 172{
167 int rv; 173 int rv;
168 174
169 rv = pmbus_set_page(client, page); 175 if (page >= 0) {
170 if (rv < 0) 176 rv = pmbus_set_page(client, page);
171 return rv; 177 if (rv < 0)
178 return rv;
179 }
172 180
173 return i2c_smbus_write_byte(client, value); 181 return i2c_smbus_write_byte(client, value);
174} 182}
183EXPORT_SYMBOL_GPL(pmbus_write_byte);
175 184
176static int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, 185int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word)
177 u16 word)
178{ 186{
179 int rv; 187 int rv;
180 188
@@ -184,6 +192,28 @@ static int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg,
184 192
185 return i2c_smbus_write_word_data(client, reg, word); 193 return i2c_smbus_write_word_data(client, reg, word);
186} 194}
195EXPORT_SYMBOL_GPL(pmbus_write_word_data);
196
197/*
198 * _pmbus_write_word_data() is similar to pmbus_write_word_data(), but checks if
199 * a device specific mapping function exists and calls it if necessary.
200 */
201static int _pmbus_write_word_data(struct i2c_client *client, int page, int reg,
202 u16 word)
203{
204 struct pmbus_data *data = i2c_get_clientdata(client);
205 const struct pmbus_driver_info *info = data->info;
206 int status;
207
208 if (info->write_word_data) {
209 status = info->write_word_data(client, page, reg, word);
210 if (status != -ENODATA)
211 return status;
212 }
213 if (reg >= PMBUS_VIRT_BASE)
214 return -EINVAL;
215 return pmbus_write_word_data(client, page, reg, word);
216}
187 217
188int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg) 218int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg)
189{ 219{
@@ -197,16 +227,57 @@ int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg)
197} 227}
198EXPORT_SYMBOL_GPL(pmbus_read_word_data); 228EXPORT_SYMBOL_GPL(pmbus_read_word_data);
199 229
200static int pmbus_read_byte_data(struct i2c_client *client, u8 page, u8 reg) 230/*
231 * _pmbus_read_word_data() is similar to pmbus_read_word_data(), but checks if
232 * a device specific mapping function exists and calls it if necessary.
233 */
234static int _pmbus_read_word_data(struct i2c_client *client, int page, int reg)
235{
236 struct pmbus_data *data = i2c_get_clientdata(client);
237 const struct pmbus_driver_info *info = data->info;
238 int status;
239
240 if (info->read_word_data) {
241 status = info->read_word_data(client, page, reg);
242 if (status != -ENODATA)
243 return status;
244 }
245 if (reg >= PMBUS_VIRT_BASE)
246 return -EINVAL;
247 return pmbus_read_word_data(client, page, reg);
248}
249
250int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg)
201{ 251{
202 int rv; 252 int rv;
203 253
204 rv = pmbus_set_page(client, page); 254 if (page >= 0) {
205 if (rv < 0) 255 rv = pmbus_set_page(client, page);
206 return rv; 256 if (rv < 0)
257 return rv;
258 }
207 259
208 return i2c_smbus_read_byte_data(client, reg); 260 return i2c_smbus_read_byte_data(client, reg);
209} 261}
262EXPORT_SYMBOL_GPL(pmbus_read_byte_data);
263
264/*
265 * _pmbus_read_byte_data() is similar to pmbus_read_byte_data(), but checks if
266 * a device specific mapping function exists and calls it if necessary.
267 */
268static int _pmbus_read_byte_data(struct i2c_client *client, int page, int reg)
269{
270 struct pmbus_data *data = i2c_get_clientdata(client);
271 const struct pmbus_driver_info *info = data->info;
272 int status;
273
274 if (info->read_byte_data) {
275 status = info->read_byte_data(client, page, reg);
276 if (status != -ENODATA)
277 return status;
278 }
279 return pmbus_read_byte_data(client, page, reg);
280}
210 281
211static void pmbus_clear_fault_page(struct i2c_client *client, int page) 282static void pmbus_clear_fault_page(struct i2c_client *client, int page)
212{ 283{
@@ -223,13 +294,13 @@ void pmbus_clear_faults(struct i2c_client *client)
223} 294}
224EXPORT_SYMBOL_GPL(pmbus_clear_faults); 295EXPORT_SYMBOL_GPL(pmbus_clear_faults);
225 296
226static int pmbus_check_status_cml(struct i2c_client *client, int page) 297static int pmbus_check_status_cml(struct i2c_client *client)
227{ 298{
228 int status, status2; 299 int status, status2;
229 300
230 status = pmbus_read_byte_data(client, page, PMBUS_STATUS_BYTE); 301 status = pmbus_read_byte_data(client, -1, PMBUS_STATUS_BYTE);
231 if (status < 0 || (status & PB_STATUS_CML)) { 302 if (status < 0 || (status & PB_STATUS_CML)) {
232 status2 = pmbus_read_byte_data(client, page, PMBUS_STATUS_CML); 303 status2 = pmbus_read_byte_data(client, -1, PMBUS_STATUS_CML);
233 if (status2 < 0 || (status2 & PB_CML_FAULT_INVALID_COMMAND)) 304 if (status2 < 0 || (status2 & PB_CML_FAULT_INVALID_COMMAND))
234 return -EINVAL; 305 return -EINVAL;
235 } 306 }
@@ -241,10 +312,10 @@ bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg)
241 int rv; 312 int rv;
242 struct pmbus_data *data = i2c_get_clientdata(client); 313 struct pmbus_data *data = i2c_get_clientdata(client);
243 314
244 rv = pmbus_read_byte_data(client, page, reg); 315 rv = _pmbus_read_byte_data(client, page, reg);
245 if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK)) 316 if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
246 rv = pmbus_check_status_cml(client, page); 317 rv = pmbus_check_status_cml(client);
247 pmbus_clear_fault_page(client, page); 318 pmbus_clear_fault_page(client, -1);
248 return rv >= 0; 319 return rv >= 0;
249} 320}
250EXPORT_SYMBOL_GPL(pmbus_check_byte_register); 321EXPORT_SYMBOL_GPL(pmbus_check_byte_register);
@@ -254,10 +325,10 @@ bool pmbus_check_word_register(struct i2c_client *client, int page, int reg)
254 int rv; 325 int rv;
255 struct pmbus_data *data = i2c_get_clientdata(client); 326 struct pmbus_data *data = i2c_get_clientdata(client);
256 327
257 rv = pmbus_read_word_data(client, page, reg); 328 rv = _pmbus_read_word_data(client, page, reg);
258 if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK)) 329 if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
259 rv = pmbus_check_status_cml(client, page); 330 rv = pmbus_check_status_cml(client);
260 pmbus_clear_fault_page(client, page); 331 pmbus_clear_fault_page(client, -1);
261 return rv >= 0; 332 return rv >= 0;
262} 333}
263EXPORT_SYMBOL_GPL(pmbus_check_word_register); 334EXPORT_SYMBOL_GPL(pmbus_check_word_register);
@@ -270,24 +341,6 @@ const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client *client)
270} 341}
271EXPORT_SYMBOL_GPL(pmbus_get_driver_info); 342EXPORT_SYMBOL_GPL(pmbus_get_driver_info);
272 343
273/*
274 * _pmbus_read_byte_data() is similar to pmbus_read_byte_data(), but checks if
275 * a device specific mapping funcion exists and calls it if necessary.
276 */
277static int _pmbus_read_byte_data(struct i2c_client *client, int page, int reg)
278{
279 struct pmbus_data *data = i2c_get_clientdata(client);
280 const struct pmbus_driver_info *info = data->info;
281 int status;
282
283 if (info->read_byte_data) {
284 status = info->read_byte_data(client, page, reg);
285 if (status != -ENODATA)
286 return status;
287 }
288 return pmbus_read_byte_data(client, page, reg);
289}
290
291static struct pmbus_data *pmbus_update_device(struct device *dev) 344static struct pmbus_data *pmbus_update_device(struct device *dev)
292{ 345{
293 struct i2c_client *client = to_i2c_client(dev); 346 struct i2c_client *client = to_i2c_client(dev);
@@ -347,8 +400,9 @@ static struct pmbus_data *pmbus_update_device(struct device *dev)
347 400
348 if (!data->valid || sensor->update) 401 if (!data->valid || sensor->update)
349 sensor->data 402 sensor->data
350 = pmbus_read_word_data(client, sensor->page, 403 = _pmbus_read_word_data(client,
351 sensor->reg); 404 sensor->page,
405 sensor->reg);
352 } 406 }
353 pmbus_clear_faults(client); 407 pmbus_clear_faults(client);
354 data->last_updated = jiffies; 408 data->last_updated = jiffies;
@@ -443,15 +497,37 @@ static long pmbus_reg2data_direct(struct pmbus_data *data,
443 return (val - b) / m; 497 return (val - b) / m;
444} 498}
445 499
500/*
501 * Convert VID sensor values to milli- or micro-units
502 * depending on sensor type.
503 * We currently only support VR11.
504 */
505static long pmbus_reg2data_vid(struct pmbus_data *data,
506 struct pmbus_sensor *sensor)
507{
508 long val = sensor->data;
509
510 if (val < 0x02 || val > 0xb2)
511 return 0;
512 return DIV_ROUND_CLOSEST(160000 - (val - 2) * 625, 100);
513}
514
446static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor) 515static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
447{ 516{
448 long val; 517 long val;
449 518
450 if (data->info->direct[sensor->class]) 519 switch (data->info->format[sensor->class]) {
520 case direct:
451 val = pmbus_reg2data_direct(data, sensor); 521 val = pmbus_reg2data_direct(data, sensor);
452 else 522 break;
523 case vid:
524 val = pmbus_reg2data_vid(data, sensor);
525 break;
526 case linear:
527 default:
453 val = pmbus_reg2data_linear(data, sensor); 528 val = pmbus_reg2data_linear(data, sensor);
454 529 break;
530 }
455 return val; 531 return val;
456} 532}
457 533
@@ -561,16 +637,31 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
561 return val; 637 return val;
562} 638}
563 639
640static u16 pmbus_data2reg_vid(struct pmbus_data *data,
641 enum pmbus_sensor_classes class, long val)
642{
643 val = SENSORS_LIMIT(val, 500, 1600);
644
645 return 2 + DIV_ROUND_CLOSEST((1600 - val) * 100, 625);
646}
647
564static u16 pmbus_data2reg(struct pmbus_data *data, 648static u16 pmbus_data2reg(struct pmbus_data *data,
565 enum pmbus_sensor_classes class, long val) 649 enum pmbus_sensor_classes class, long val)
566{ 650{
567 u16 regval; 651 u16 regval;
568 652
569 if (data->info->direct[class]) 653 switch (data->info->format[class]) {
654 case direct:
570 regval = pmbus_data2reg_direct(data, class, val); 655 regval = pmbus_data2reg_direct(data, class, val);
571 else 656 break;
657 case vid:
658 regval = pmbus_data2reg_vid(data, class, val);
659 break;
660 case linear:
661 default:
572 regval = pmbus_data2reg_linear(data, class, val); 662 regval = pmbus_data2reg_linear(data, class, val);
573 663 break;
664 }
574 return regval; 665 return regval;
575} 666}
576 667
@@ -682,7 +773,7 @@ static ssize_t pmbus_set_sensor(struct device *dev,
682 773
683 mutex_lock(&data->update_lock); 774 mutex_lock(&data->update_lock);
684 regval = pmbus_data2reg(data, sensor->class, val); 775 regval = pmbus_data2reg(data, sensor->class, val);
685 ret = pmbus_write_word_data(client, sensor->page, sensor->reg, regval); 776 ret = _pmbus_write_word_data(client, sensor->page, sensor->reg, regval);
686 if (ret < 0) 777 if (ret < 0)
687 rv = ret; 778 rv = ret;
688 else 779 else
@@ -867,7 +958,8 @@ static void pmbus_find_max_attr(struct i2c_client *client,
867 * and its associated alarm attribute. 958 * and its associated alarm attribute.
868 */ 959 */
869struct pmbus_limit_attr { 960struct pmbus_limit_attr {
870 u8 reg; /* Limit register */ 961 u16 reg; /* Limit register */
962 bool update; /* True if register needs updates */
871 const char *attr; /* Attribute name */ 963 const char *attr; /* Attribute name */
872 const char *alarm; /* Alarm attribute name */ 964 const char *alarm; /* Alarm attribute name */
873 u32 sbit; /* Alarm attribute status bit */ 965 u32 sbit; /* Alarm attribute status bit */
@@ -912,9 +1004,10 @@ static bool pmbus_add_limit_attrs(struct i2c_client *client,
912 if (pmbus_check_word_register(client, page, l->reg)) { 1004 if (pmbus_check_word_register(client, page, l->reg)) {
913 cindex = data->num_sensors; 1005 cindex = data->num_sensors;
914 pmbus_add_sensor(data, name, l->attr, index, page, 1006 pmbus_add_sensor(data, name, l->attr, index, page,
915 l->reg, attr->class, attr->update, 1007 l->reg, attr->class,
1008 attr->update || l->update,
916 false); 1009 false);
917 if (info->func[page] & attr->sfunc) { 1010 if (l->sbit && (info->func[page] & attr->sfunc)) {
918 if (attr->compare) { 1011 if (attr->compare) {
919 pmbus_add_boolean_cmp(data, name, 1012 pmbus_add_boolean_cmp(data, name,
920 l->alarm, index, 1013 l->alarm, index,
@@ -953,9 +1046,11 @@ static void pmbus_add_sensor_attrs_one(struct i2c_client *client,
953 index, page, cbase, attr); 1046 index, page, cbase, attr);
954 /* 1047 /*
955 * Add generic alarm attribute only if there are no individual 1048 * Add generic alarm attribute only if there are no individual
956 * alarm attributes, and if there is a global alarm bit. 1049 * alarm attributes, if there is a global alarm bit, and if
1050 * the generic status register for this page is accessible.
957 */ 1051 */
958 if (!have_alarm && attr->gbit) 1052 if (!have_alarm && attr->gbit &&
1053 pmbus_check_byte_register(client, page, PMBUS_STATUS_BYTE))
959 pmbus_add_boolean_reg(data, name, "alarm", index, 1054 pmbus_add_boolean_reg(data, name, "alarm", index,
960 PB_STATUS_BASE + page, 1055 PB_STATUS_BASE + page,
961 attr->gbit); 1056 attr->gbit);
@@ -1008,6 +1103,21 @@ static const struct pmbus_limit_attr vin_limit_attrs[] = {
1008 .attr = "crit", 1103 .attr = "crit",
1009 .alarm = "crit_alarm", 1104 .alarm = "crit_alarm",
1010 .sbit = PB_VOLTAGE_OV_FAULT, 1105 .sbit = PB_VOLTAGE_OV_FAULT,
1106 }, {
1107 .reg = PMBUS_VIRT_READ_VIN_AVG,
1108 .update = true,
1109 .attr = "average",
1110 }, {
1111 .reg = PMBUS_VIRT_READ_VIN_MIN,
1112 .update = true,
1113 .attr = "lowest",
1114 }, {
1115 .reg = PMBUS_VIRT_READ_VIN_MAX,
1116 .update = true,
1117 .attr = "highest",
1118 }, {
1119 .reg = PMBUS_VIRT_RESET_VIN_HISTORY,
1120 .attr = "reset_history",
1011 }, 1121 },
1012}; 1122};
1013 1123
@@ -1032,6 +1142,21 @@ static const struct pmbus_limit_attr vout_limit_attrs[] = {
1032 .attr = "crit", 1142 .attr = "crit",
1033 .alarm = "crit_alarm", 1143 .alarm = "crit_alarm",
1034 .sbit = PB_VOLTAGE_OV_FAULT, 1144 .sbit = PB_VOLTAGE_OV_FAULT,
1145 }, {
1146 .reg = PMBUS_VIRT_READ_VOUT_AVG,
1147 .update = true,
1148 .attr = "average",
1149 }, {
1150 .reg = PMBUS_VIRT_READ_VOUT_MIN,
1151 .update = true,
1152 .attr = "lowest",
1153 }, {
1154 .reg = PMBUS_VIRT_READ_VOUT_MAX,
1155 .update = true,
1156 .attr = "highest",
1157 }, {
1158 .reg = PMBUS_VIRT_RESET_VOUT_HISTORY,
1159 .attr = "reset_history",
1035 } 1160 }
1036}; 1161};
1037 1162
@@ -1078,6 +1203,21 @@ static const struct pmbus_limit_attr iin_limit_attrs[] = {
1078 .attr = "crit", 1203 .attr = "crit",
1079 .alarm = "crit_alarm", 1204 .alarm = "crit_alarm",
1080 .sbit = PB_IIN_OC_FAULT, 1205 .sbit = PB_IIN_OC_FAULT,
1206 }, {
1207 .reg = PMBUS_VIRT_READ_IIN_AVG,
1208 .update = true,
1209 .attr = "average",
1210 }, {
1211 .reg = PMBUS_VIRT_READ_IIN_MIN,
1212 .update = true,
1213 .attr = "lowest",
1214 }, {
1215 .reg = PMBUS_VIRT_READ_IIN_MAX,
1216 .update = true,
1217 .attr = "highest",
1218 }, {
1219 .reg = PMBUS_VIRT_RESET_IIN_HISTORY,
1220 .attr = "reset_history",
1081 } 1221 }
1082}; 1222};
1083 1223
@@ -1097,6 +1237,21 @@ static const struct pmbus_limit_attr iout_limit_attrs[] = {
1097 .attr = "crit", 1237 .attr = "crit",
1098 .alarm = "crit_alarm", 1238 .alarm = "crit_alarm",
1099 .sbit = PB_IOUT_OC_FAULT, 1239 .sbit = PB_IOUT_OC_FAULT,
1240 }, {
1241 .reg = PMBUS_VIRT_READ_IOUT_AVG,
1242 .update = true,
1243 .attr = "average",
1244 }, {
1245 .reg = PMBUS_VIRT_READ_IOUT_MIN,
1246 .update = true,
1247 .attr = "lowest",
1248 }, {
1249 .reg = PMBUS_VIRT_READ_IOUT_MAX,
1250 .update = true,
1251 .attr = "highest",
1252 }, {
1253 .reg = PMBUS_VIRT_RESET_IOUT_HISTORY,
1254 .attr = "reset_history",
1100 } 1255 }
1101}; 1256};
1102 1257
@@ -1132,6 +1287,17 @@ static const struct pmbus_limit_attr pin_limit_attrs[] = {
1132 .attr = "max", 1287 .attr = "max",
1133 .alarm = "alarm", 1288 .alarm = "alarm",
1134 .sbit = PB_PIN_OP_WARNING, 1289 .sbit = PB_PIN_OP_WARNING,
1290 }, {
1291 .reg = PMBUS_VIRT_READ_PIN_AVG,
1292 .update = true,
1293 .attr = "average",
1294 }, {
1295 .reg = PMBUS_VIRT_READ_PIN_MAX,
1296 .update = true,
1297 .attr = "input_highest",
1298 }, {
1299 .reg = PMBUS_VIRT_RESET_PIN_HISTORY,
1300 .attr = "reset_history",
1135 } 1301 }
1136}; 1302};
1137 1303
@@ -1200,6 +1366,39 @@ static const struct pmbus_limit_attr temp_limit_attrs[] = {
1200 .attr = "crit", 1366 .attr = "crit",
1201 .alarm = "crit_alarm", 1367 .alarm = "crit_alarm",
1202 .sbit = PB_TEMP_OT_FAULT, 1368 .sbit = PB_TEMP_OT_FAULT,
1369 }, {
1370 .reg = PMBUS_VIRT_READ_TEMP_MIN,
1371 .attr = "lowest",
1372 }, {
1373 .reg = PMBUS_VIRT_READ_TEMP_MAX,
1374 .attr = "highest",
1375 }, {
1376 .reg = PMBUS_VIRT_RESET_TEMP_HISTORY,
1377 .attr = "reset_history",
1378 }
1379};
1380
1381static const struct pmbus_limit_attr temp_limit_attrs23[] = {
1382 {
1383 .reg = PMBUS_UT_WARN_LIMIT,
1384 .attr = "min",
1385 .alarm = "min_alarm",
1386 .sbit = PB_TEMP_UT_WARNING,
1387 }, {
1388 .reg = PMBUS_UT_FAULT_LIMIT,
1389 .attr = "lcrit",
1390 .alarm = "lcrit_alarm",
1391 .sbit = PB_TEMP_UT_FAULT,
1392 }, {
1393 .reg = PMBUS_OT_WARN_LIMIT,
1394 .attr = "max",
1395 .alarm = "max_alarm",
1396 .sbit = PB_TEMP_OT_WARNING,
1397 }, {
1398 .reg = PMBUS_OT_FAULT_LIMIT,
1399 .attr = "crit",
1400 .alarm = "crit_alarm",
1401 .sbit = PB_TEMP_OT_FAULT,
1203 } 1402 }
1204}; 1403};
1205 1404
@@ -1226,8 +1425,8 @@ static const struct pmbus_sensor_attr temp_attributes[] = {
1226 .sfunc = PMBUS_HAVE_STATUS_TEMP, 1425 .sfunc = PMBUS_HAVE_STATUS_TEMP,
1227 .sbase = PB_STATUS_TEMP_BASE, 1426 .sbase = PB_STATUS_TEMP_BASE,
1228 .gbit = PB_STATUS_TEMPERATURE, 1427 .gbit = PB_STATUS_TEMPERATURE,
1229 .limit = temp_limit_attrs, 1428 .limit = temp_limit_attrs23,
1230 .nlimit = ARRAY_SIZE(temp_limit_attrs), 1429 .nlimit = ARRAY_SIZE(temp_limit_attrs23),
1231 }, { 1430 }, {
1232 .reg = PMBUS_READ_TEMPERATURE_3, 1431 .reg = PMBUS_READ_TEMPERATURE_3,
1233 .class = PSC_TEMPERATURE, 1432 .class = PSC_TEMPERATURE,
@@ -1238,8 +1437,8 @@ static const struct pmbus_sensor_attr temp_attributes[] = {
1238 .sfunc = PMBUS_HAVE_STATUS_TEMP, 1437 .sfunc = PMBUS_HAVE_STATUS_TEMP,
1239 .sbase = PB_STATUS_TEMP_BASE, 1438 .sbase = PB_STATUS_TEMP_BASE,
1240 .gbit = PB_STATUS_TEMPERATURE, 1439 .gbit = PB_STATUS_TEMPERATURE,
1241 .limit = temp_limit_attrs, 1440 .limit = temp_limit_attrs23,
1242 .nlimit = ARRAY_SIZE(temp_limit_attrs), 1441 .nlimit = ARRAY_SIZE(temp_limit_attrs23),
1243 } 1442 }
1244}; 1443};
1245 1444
@@ -1380,7 +1579,7 @@ static int pmbus_identify_common(struct i2c_client *client,
1380 */ 1579 */
1381 switch (vout_mode >> 5) { 1580 switch (vout_mode >> 5) {
1382 case 0: /* linear mode */ 1581 case 0: /* linear mode */
1383 if (data->info->direct[PSC_VOLTAGE_OUT]) 1582 if (data->info->format[PSC_VOLTAGE_OUT] != linear)
1384 return -ENODEV; 1583 return -ENODEV;
1385 1584
1386 exponent = vout_mode & 0x1f; 1585 exponent = vout_mode & 0x1f;
@@ -1389,8 +1588,12 @@ static int pmbus_identify_common(struct i2c_client *client,
1389 exponent |= ~0x1f; 1588 exponent |= ~0x1f;
1390 data->exponent = exponent; 1589 data->exponent = exponent;
1391 break; 1590 break;
1591 case 1: /* VID mode */
1592 if (data->info->format[PSC_VOLTAGE_OUT] != vid)
1593 return -ENODEV;
1594 break;
1392 case 2: /* direct mode */ 1595 case 2: /* direct mode */
1393 if (!data->info->direct[PSC_VOLTAGE_OUT]) 1596 if (data->info->format[PSC_VOLTAGE_OUT] != direct)
1394 return -ENODEV; 1597 return -ENODEV;
1395 break; 1598 break;
1396 default: 1599 default:
@@ -1457,18 +1660,6 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
1457 ret = -EINVAL; 1660 ret = -EINVAL;
1458 goto out_data; 1661 goto out_data;
1459 } 1662 }
1460 /*
1461 * Bail out if more than one page was configured, but we can not
1462 * select the highest page. This is an indication that the wrong
1463 * chip type was selected. Better bail out now than keep
1464 * returning errors later on.
1465 */
1466 if (info->pages > 1 && pmbus_set_page(client, info->pages - 1) < 0) {
1467 dev_err(&client->dev, "Failed to select page %d\n",
1468 info->pages - 1);
1469 ret = -EINVAL;
1470 goto out_data;
1471 }
1472 1663
1473 ret = pmbus_identify_common(client, data); 1664 ret = pmbus_identify_common(client, data);
1474 if (ret < 0) { 1665 if (ret < 0) {
diff --git a/drivers/hwmon/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index ace1c7319734..ace1c7319734 100644
--- a/drivers/hwmon/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
diff --git a/drivers/hwmon/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c
index ffcc1cf3609d..ffcc1cf3609d 100644
--- a/drivers/hwmon/ucd9200.c
+++ b/drivers/hwmon/pmbus/ucd9200.c
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 7d231cf5d2ce..fe4104c6b764 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -32,7 +32,7 @@
32#include <linux/sht15.h> 32#include <linux/sht15.h>
33#include <linux/regulator/consumer.h> 33#include <linux/regulator/consumer.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <asm/atomic.h> 35#include <linux/atomic.h>
36 36
37/* Commands */ 37/* Commands */
38#define SHT15_MEASURE_TEMP 0x03 38#define SHT15_MEASURE_TEMP 0x03
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 8abfa4a03ce1..ce1a32b71e47 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -673,32 +673,33 @@ static s32 pch_i2c_xfer(struct i2c_adapter *i2c_adap,
673 /* transfer not completed */ 673 /* transfer not completed */
674 adap->pch_i2c_xfer_in_progress = true; 674 adap->pch_i2c_xfer_in_progress = true;
675 675
676 pmsg = &msgs[0]; 676 for (i = 0; i < num && ret >= 0; i++) {
677 pmsg->flags |= adap->pch_buff_mode_en; 677 pmsg = &msgs[i];
678 status = pmsg->flags; 678 pmsg->flags |= adap->pch_buff_mode_en;
679 pch_dbg(adap, 679 status = pmsg->flags;
680 "After invoking I2C_MODE_SEL :flag= 0x%x\n", status); 680 pch_dbg(adap,
681 /* calculate sub address length and message length */ 681 "After invoking I2C_MODE_SEL :flag= 0x%x\n", status);
682 /* these are applicable only for buffer mode */ 682 /* calculate sub address length and message length */
683 subaddrlen = pmsg->buf[0]; 683 /* these are applicable only for buffer mode */
684 /* calculate actual message length excluding 684 subaddrlen = pmsg->buf[0];
685 * the sub address fields */ 685 /* calculate actual message length excluding
686 msglen = (pmsg->len) - (subaddrlen + 1); 686 * the sub address fields */
687 if (status & (I2C_M_RD)) { 687 msglen = (pmsg->len) - (subaddrlen + 1);
688 pch_dbg(adap, "invoking pch_i2c_readbytes\n"); 688
689 ret = pch_i2c_readbytes(i2c_adap, pmsg, (i + 1 == num), 689 if ((status & (I2C_M_RD)) != false) {
690 (i == 0)); 690 ret = pch_i2c_readbytes(i2c_adap, pmsg, (i + 1 == num),
691 } else { 691 (i == 0));
692 pch_dbg(adap, "invoking pch_i2c_writebytes\n"); 692 } else {
693 ret = pch_i2c_writebytes(i2c_adap, pmsg, (i + 1 == num), 693 ret = pch_i2c_writebytes(i2c_adap, pmsg, (i + 1 == num),
694 (i == 0)); 694 (i == 0));
695 }
695 } 696 }
696 697
697 adap->pch_i2c_xfer_in_progress = false; /* transfer completed */ 698 adap->pch_i2c_xfer_in_progress = false; /* transfer completed */
698 699
699 mutex_unlock(&pch_mutex); 700 mutex_unlock(&pch_mutex);
700 701
701 return ret; 702 return (ret < 0) ? ret : num;
702} 703}
703 704
704/** 705/**
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index fb3b4f8f8152..2440b7411978 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -26,6 +26,7 @@
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/i2c-tegra.h> 28#include <linux/i2c-tegra.h>
29#include <linux/of_i2c.h>
29 30
30#include <asm/unaligned.h> 31#include <asm/unaligned.h>
31 32
@@ -546,6 +547,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
546 struct resource *iomem; 547 struct resource *iomem;
547 struct clk *clk; 548 struct clk *clk;
548 struct clk *i2c_clk; 549 struct clk *i2c_clk;
550 const unsigned int *prop;
549 void *base; 551 void *base;
550 int irq; 552 int irq;
551 int ret = 0; 553 int ret = 0;
@@ -603,7 +605,17 @@ static int tegra_i2c_probe(struct platform_device *pdev)
603 i2c_dev->irq = irq; 605 i2c_dev->irq = irq;
604 i2c_dev->cont_id = pdev->id; 606 i2c_dev->cont_id = pdev->id;
605 i2c_dev->dev = &pdev->dev; 607 i2c_dev->dev = &pdev->dev;
606 i2c_dev->bus_clk_rate = pdata ? pdata->bus_clk_rate : 100000; 608
609 i2c_dev->bus_clk_rate = 100000; /* default clock rate */
610 if (pdata) {
611 i2c_dev->bus_clk_rate = pdata->bus_clk_rate;
612
613 } else if (i2c_dev->dev->of_node) { /* if there is a device tree node ... */
614 prop = of_get_property(i2c_dev->dev->of_node,
615 "clock-frequency", NULL);
616 if (prop)
617 i2c_dev->bus_clk_rate = be32_to_cpup(prop);
618 }
607 619
608 if (pdev->id == 3) 620 if (pdev->id == 3)
609 i2c_dev->is_dvc = 1; 621 i2c_dev->is_dvc = 1;
@@ -633,6 +645,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
633 i2c_dev->adapter.algo = &tegra_i2c_algo; 645 i2c_dev->adapter.algo = &tegra_i2c_algo;
634 i2c_dev->adapter.dev.parent = &pdev->dev; 646 i2c_dev->adapter.dev.parent = &pdev->dev;
635 i2c_dev->adapter.nr = pdev->id; 647 i2c_dev->adapter.nr = pdev->id;
648 i2c_dev->adapter.dev.of_node = pdev->dev.of_node;
636 649
637 ret = i2c_add_numbered_adapter(&i2c_dev->adapter); 650 ret = i2c_add_numbered_adapter(&i2c_dev->adapter);
638 if (ret) { 651 if (ret) {
@@ -640,6 +653,8 @@ static int tegra_i2c_probe(struct platform_device *pdev)
640 goto err_free_irq; 653 goto err_free_irq;
641 } 654 }
642 655
656 of_i2c_register_devices(&i2c_dev->adapter);
657
643 return 0; 658 return 0;
644err_free_irq: 659err_free_irq:
645 free_irq(i2c_dev->irq, i2c_dev); 660 free_irq(i2c_dev->irq, i2c_dev);
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 0347eed4a167..40c835309e49 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -31,7 +31,7 @@
31 */ 31 */
32 32
33#include <rdma/ib_umem.h> 33#include <rdma/ib_umem.h>
34#include <asm/atomic.h> 34#include <linux/atomic.h>
35 35
36#include "iw_cxgb4.h" 36#include "iw_cxgb4.h"
37 37
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h
index f09914cccf53..54c0d23bad92 100644
--- a/drivers/infiniband/hw/ehca/ehca_tools.h
+++ b/drivers/infiniband/hw/ehca/ehca_tools.h
@@ -58,7 +58,7 @@
58#include <linux/cpu.h> 58#include <linux/cpu.h>
59#include <linux/device.h> 59#include <linux/device.h>
60 60
61#include <asm/atomic.h> 61#include <linux/atomic.h>
62#include <asm/abs_addr.h> 62#include <asm/abs_addr.h>
63#include <asm/ibmebus.h> 63#include <asm/ibmebus.h>
64#include <asm/io.h> 64#include <asm/io.h>
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 73bc18465c9c..c118663e4437 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -34,7 +34,7 @@
34 34
35#define TCPOPT_TIMESTAMP 8 35#define TCPOPT_TIMESTAMP 8
36 36
37#include <asm/atomic.h> 37#include <linux/atomic.h>
38#include <linux/skbuff.h> 38#include <linux/skbuff.h>
39#include <linux/ip.h> 39#include <linux/ip.h>
40#include <linux/tcp.h> 40#include <linux/tcp.h>
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 7b6985a2e652..b3cc1e062b17 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -45,7 +45,7 @@
45 45
46#include <net/neighbour.h> 46#include <net/neighbour.h>
47 47
48#include <asm/atomic.h> 48#include <linux/atomic.h>
49 49
50#include <rdma/ib_verbs.h> 50#include <rdma/ib_verbs.h>
51#include <rdma/ib_pack.h> 51#include <rdma/ib_pack.h>
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 95a08a8ca8aa..5745b7fe158c 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -271,7 +271,7 @@ int iser_send_command(struct iscsi_conn *conn,
271 unsigned long edtl; 271 unsigned long edtl;
272 int err; 272 int err;
273 struct iser_data_buf *data_buf; 273 struct iser_data_buf *data_buf;
274 struct iscsi_cmd *hdr = (struct iscsi_cmd *)task->hdr; 274 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
275 struct scsi_cmnd *sc = task->sc; 275 struct scsi_cmnd *sc = task->sc;
276 struct iser_tx_desc *tx_desc = &iser_task->desc; 276 struct iser_tx_desc *tx_desc = &iser_task->desc;
277 277
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 7d5109bbd1ad..0bfa545675b8 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -39,7 +39,7 @@
39#include <linux/random.h> 39#include <linux/random.h>
40#include <linux/jiffies.h> 40#include <linux/jiffies.h>
41 41
42#include <asm/atomic.h> 42#include <linux/atomic.h>
43 43
44#include <scsi/scsi.h> 44#include <scsi/scsi.h>
45#include <scsi/scsi_device.h> 45#include <scsi/scsi_device.h>
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 56abf3d0e911..d72887585a14 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -154,10 +154,13 @@ static const struct xpad_device {
154 { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, 154 { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
155 { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX }, 155 { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
156 { 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 156 { 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
157 { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
158 { 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
157 { 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", 0, XTYPE_XBOX360 }, 159 { 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", 0, XTYPE_XBOX360 },
158 { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 160 { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
159 { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, 161 { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
160 { 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 }, 162 { 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
163 { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
161 { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, 164 { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
162 { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, 165 { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
163 { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, 166 { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
@@ -236,9 +239,10 @@ static struct usb_device_id xpad_table [] = {
236 XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */ 239 XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
237 XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */ 240 XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
238 XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */ 241 XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
242 XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
239 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ 243 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
240 XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */ 244 XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
241 XPAD_XBOX360_VENDOR(0x1bad), /* Rock Band Drums */ 245 XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
242 XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */ 246 XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
243 { } 247 { }
244}; 248};
@@ -545,7 +549,7 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
545 struct usb_endpoint_descriptor *ep_irq_out; 549 struct usb_endpoint_descriptor *ep_irq_out;
546 int error; 550 int error;
547 551
548 if (xpad->xtype != XTYPE_XBOX360 && xpad->xtype != XTYPE_XBOX) 552 if (xpad->xtype == XTYPE_UNKNOWN)
549 return 0; 553 return 0;
550 554
551 xpad->odata = usb_alloc_coherent(xpad->udev, XPAD_PKT_LEN, 555 xpad->odata = usb_alloc_coherent(xpad->udev, XPAD_PKT_LEN,
@@ -579,13 +583,13 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
579 583
580static void xpad_stop_output(struct usb_xpad *xpad) 584static void xpad_stop_output(struct usb_xpad *xpad)
581{ 585{
582 if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX) 586 if (xpad->xtype != XTYPE_UNKNOWN)
583 usb_kill_urb(xpad->irq_out); 587 usb_kill_urb(xpad->irq_out);
584} 588}
585 589
586static void xpad_deinit_output(struct usb_xpad *xpad) 590static void xpad_deinit_output(struct usb_xpad *xpad)
587{ 591{
588 if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX) { 592 if (xpad->xtype != XTYPE_UNKNOWN) {
589 usb_free_urb(xpad->irq_out); 593 usb_free_urb(xpad->irq_out);
590 usb_free_coherent(xpad->udev, XPAD_PKT_LEN, 594 usb_free_coherent(xpad->udev, XPAD_PKT_LEN,
591 xpad->odata, xpad->odata_dma); 595 xpad->odata, xpad->odata_dma);
@@ -632,6 +636,23 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
632 636
633 return usb_submit_urb(xpad->irq_out, GFP_ATOMIC); 637 return usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
634 638
639 case XTYPE_XBOX360W:
640 xpad->odata[0] = 0x00;
641 xpad->odata[1] = 0x01;
642 xpad->odata[2] = 0x0F;
643 xpad->odata[3] = 0xC0;
644 xpad->odata[4] = 0x00;
645 xpad->odata[5] = strong / 256;
646 xpad->odata[6] = weak / 256;
647 xpad->odata[7] = 0x00;
648 xpad->odata[8] = 0x00;
649 xpad->odata[9] = 0x00;
650 xpad->odata[10] = 0x00;
651 xpad->odata[11] = 0x00;
652 xpad->irq_out->transfer_buffer_length = 12;
653
654 return usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
655
635 default: 656 default:
636 dbg("%s - rumble command sent to unsupported xpad type: %d", 657 dbg("%s - rumble command sent to unsupported xpad type: %d",
637 __func__, xpad->xtype); 658 __func__, xpad->xtype);
@@ -644,7 +665,7 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
644 665
645static int xpad_init_ff(struct usb_xpad *xpad) 666static int xpad_init_ff(struct usb_xpad *xpad)
646{ 667{
647 if (xpad->xtype != XTYPE_XBOX360 && xpad->xtype != XTYPE_XBOX) 668 if (xpad->xtype == XTYPE_UNKNOWN)
648 return 0; 669 return 0;
649 670
650 input_set_capability(xpad->dev, EV_FF, FF_RUMBLE); 671 input_set_capability(xpad->dev, EV_FF, FF_RUMBLE);
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index af45d275f686..7b404e5443ed 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -9,7 +9,6 @@
9 */ 9 */
10 10
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/version.h>
13#include <linux/init.h> 12#include <linux/init.h>
14#include <linux/interrupt.h> 13#include <linux/interrupt.h>
15#include <linux/irq.h> 14#include <linux/irq.h>
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 631598663aab..c7708263051b 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -8,7 +8,6 @@
8 */ 8 */
9 9
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/version.h>
12#include <linux/init.h> 11#include <linux/init.h>
13#include <linux/interrupt.h> 12#include <linux/interrupt.h>
14#include <linux/irq.h> 13#include <linux/irq.h>
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 11478eb2c27d..19cfc0cf558c 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -1578,14 +1578,14 @@ static int __init atkbd_setup_forced_release(const struct dmi_system_id *id)
1578 atkbd_platform_fixup = atkbd_apply_forced_release_keylist; 1578 atkbd_platform_fixup = atkbd_apply_forced_release_keylist;
1579 atkbd_platform_fixup_data = id->driver_data; 1579 atkbd_platform_fixup_data = id->driver_data;
1580 1580
1581 return 0; 1581 return 1;
1582} 1582}
1583 1583
1584static int __init atkbd_setup_scancode_fixup(const struct dmi_system_id *id) 1584static int __init atkbd_setup_scancode_fixup(const struct dmi_system_id *id)
1585{ 1585{
1586 atkbd_platform_scancode_fixup = id->driver_data; 1586 atkbd_platform_scancode_fixup = id->driver_data;
1587 1587
1588 return 0; 1588 return 1;
1589} 1589}
1590 1590
1591static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = { 1591static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 6e6145b9a4c1..ce281d152275 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -2,6 +2,7 @@
2 * Driver for keys on GPIO lines capable of generating interrupts. 2 * Driver for keys on GPIO lines capable of generating interrupts.
3 * 3 *
4 * Copyright 2005 Phil Blundell 4 * Copyright 2005 Phil Blundell
5 * Copyright 2010, 2011 David Jander <david@protonic.nl>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -25,6 +26,8 @@
25#include <linux/gpio_keys.h> 26#include <linux/gpio_keys.h>
26#include <linux/workqueue.h> 27#include <linux/workqueue.h>
27#include <linux/gpio.h> 28#include <linux/gpio.h>
29#include <linux/of_platform.h>
30#include <linux/of_gpio.h>
28 31
29struct gpio_button_data { 32struct gpio_button_data {
30 struct gpio_keys_button *button; 33 struct gpio_keys_button *button;
@@ -415,7 +418,7 @@ static int __devinit gpio_keys_setup_key(struct platform_device *pdev,
415 if (!button->can_disable) 418 if (!button->can_disable)
416 irqflags |= IRQF_SHARED; 419 irqflags |= IRQF_SHARED;
417 420
418 error = request_any_context_irq(irq, gpio_keys_isr, irqflags, desc, bdata); 421 error = request_threaded_irq(irq, NULL, gpio_keys_isr, irqflags, desc, bdata);
419 if (error < 0) { 422 if (error < 0) {
420 dev_err(dev, "Unable to claim irq %d; error %d\n", 423 dev_err(dev, "Unable to claim irq %d; error %d\n",
421 irq, error); 424 irq, error);
@@ -445,15 +448,120 @@ static void gpio_keys_close(struct input_dev *input)
445 ddata->disable(input->dev.parent); 448 ddata->disable(input->dev.parent);
446} 449}
447 450
451/*
452 * Handlers for alternative sources of platform_data
453 */
454#ifdef CONFIG_OF
455/*
456 * Translate OpenFirmware node properties into platform_data
457 */
458static int gpio_keys_get_devtree_pdata(struct device *dev,
459 struct gpio_keys_platform_data *pdata)
460{
461 struct device_node *node, *pp;
462 int i;
463 struct gpio_keys_button *buttons;
464 const u32 *reg;
465 int len;
466
467 node = dev->of_node;
468 if (node == NULL)
469 return -ENODEV;
470
471 memset(pdata, 0, sizeof *pdata);
472
473 pdata->rep = !!of_get_property(node, "autorepeat", &len);
474
475 /* First count the subnodes */
476 pdata->nbuttons = 0;
477 pp = NULL;
478 while ((pp = of_get_next_child(node, pp)))
479 pdata->nbuttons++;
480
481 if (pdata->nbuttons == 0)
482 return -ENODEV;
483
484 buttons = kzalloc(pdata->nbuttons * (sizeof *buttons), GFP_KERNEL);
485 if (!buttons)
486 return -ENODEV;
487
488 pp = NULL;
489 i = 0;
490 while ((pp = of_get_next_child(node, pp))) {
491 enum of_gpio_flags flags;
492
493 if (!of_find_property(pp, "gpios", NULL)) {
494 pdata->nbuttons--;
495 dev_warn(dev, "Found button without gpios\n");
496 continue;
497 }
498 buttons[i].gpio = of_get_gpio_flags(pp, 0, &flags);
499 buttons[i].active_low = flags & OF_GPIO_ACTIVE_LOW;
500
501 reg = of_get_property(pp, "linux,code", &len);
502 if (!reg) {
503 dev_err(dev, "Button without keycode: 0x%x\n", buttons[i].gpio);
504 goto out_fail;
505 }
506 buttons[i].code = be32_to_cpup(reg);
507
508 buttons[i].desc = of_get_property(pp, "label", &len);
509
510 reg = of_get_property(pp, "linux,input-type", &len);
511 buttons[i].type = reg ? be32_to_cpup(reg) : EV_KEY;
512
513 buttons[i].wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL);
514
515 reg = of_get_property(pp, "debounce-interval", &len);
516 buttons[i].debounce_interval = reg ? be32_to_cpup(reg) : 5;
517
518 i++;
519 }
520
521 pdata->buttons = buttons;
522
523 return 0;
524
525out_fail:
526 kfree(buttons);
527 return -ENODEV;
528}
529
530static struct of_device_id gpio_keys_of_match[] = {
531 { .compatible = "gpio-keys", },
532 { },
533};
534MODULE_DEVICE_TABLE(of, gpio_keys_of_match);
535
536#else
537
538static int gpio_keys_get_devtree_pdata(struct device *dev,
539 struct gpio_keys_platform_data *altp)
540{
541 return -ENODEV;
542}
543
544#define gpio_keys_of_match NULL
545
546#endif
547
448static int __devinit gpio_keys_probe(struct platform_device *pdev) 548static int __devinit gpio_keys_probe(struct platform_device *pdev)
449{ 549{
450 struct gpio_keys_platform_data *pdata = pdev->dev.platform_data; 550 struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
451 struct gpio_keys_drvdata *ddata; 551 struct gpio_keys_drvdata *ddata;
452 struct device *dev = &pdev->dev; 552 struct device *dev = &pdev->dev;
553 struct gpio_keys_platform_data alt_pdata;
453 struct input_dev *input; 554 struct input_dev *input;
454 int i, error; 555 int i, error;
455 int wakeup = 0; 556 int wakeup = 0;
456 557
558 if (!pdata) {
559 error = gpio_keys_get_devtree_pdata(dev, &alt_pdata);
560 if (error)
561 return error;
562 pdata = &alt_pdata;
563 }
564
457 ddata = kzalloc(sizeof(struct gpio_keys_drvdata) + 565 ddata = kzalloc(sizeof(struct gpio_keys_drvdata) +
458 pdata->nbuttons * sizeof(struct gpio_button_data), 566 pdata->nbuttons * sizeof(struct gpio_button_data),
459 GFP_KERNEL); 567 GFP_KERNEL);
@@ -544,13 +652,15 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
544 fail1: 652 fail1:
545 input_free_device(input); 653 input_free_device(input);
546 kfree(ddata); 654 kfree(ddata);
655 /* If we have no platform_data, we allocated buttons dynamically. */
656 if (!pdev->dev.platform_data)
657 kfree(pdata->buttons);
547 658
548 return error; 659 return error;
549} 660}
550 661
551static int __devexit gpio_keys_remove(struct platform_device *pdev) 662static int __devexit gpio_keys_remove(struct platform_device *pdev)
552{ 663{
553 struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
554 struct gpio_keys_drvdata *ddata = platform_get_drvdata(pdev); 664 struct gpio_keys_drvdata *ddata = platform_get_drvdata(pdev);
555 struct input_dev *input = ddata->input; 665 struct input_dev *input = ddata->input;
556 int i; 666 int i;
@@ -559,31 +669,39 @@ static int __devexit gpio_keys_remove(struct platform_device *pdev)
559 669
560 device_init_wakeup(&pdev->dev, 0); 670 device_init_wakeup(&pdev->dev, 0);
561 671
562 for (i = 0; i < pdata->nbuttons; i++) { 672 for (i = 0; i < ddata->n_buttons; i++) {
563 int irq = gpio_to_irq(pdata->buttons[i].gpio); 673 int irq = gpio_to_irq(ddata->data[i].button->gpio);
564 free_irq(irq, &ddata->data[i]); 674 free_irq(irq, &ddata->data[i]);
565 if (ddata->data[i].timer_debounce) 675 if (ddata->data[i].timer_debounce)
566 del_timer_sync(&ddata->data[i].timer); 676 del_timer_sync(&ddata->data[i].timer);
567 cancel_work_sync(&ddata->data[i].work); 677 cancel_work_sync(&ddata->data[i].work);
568 gpio_free(pdata->buttons[i].gpio); 678 gpio_free(ddata->data[i].button->gpio);
569 } 679 }
570 680
571 input_unregister_device(input); 681 input_unregister_device(input);
572 682
683 /*
684 * If we had no platform_data, we allocated buttons dynamically, and
685 * must free them here. ddata->data[0].button is the pointer to the
686 * beginning of the allocated array.
687 */
688 if (!pdev->dev.platform_data)
689 kfree(ddata->data[0].button);
690
691 kfree(ddata);
692
573 return 0; 693 return 0;
574} 694}
575 695
576 696#ifdef CONFIG_PM_SLEEP
577#ifdef CONFIG_PM
578static int gpio_keys_suspend(struct device *dev) 697static int gpio_keys_suspend(struct device *dev)
579{ 698{
580 struct platform_device *pdev = to_platform_device(dev); 699 struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
581 struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
582 int i; 700 int i;
583 701
584 if (device_may_wakeup(&pdev->dev)) { 702 if (device_may_wakeup(dev)) {
585 for (i = 0; i < pdata->nbuttons; i++) { 703 for (i = 0; i < ddata->n_buttons; i++) {
586 struct gpio_keys_button *button = &pdata->buttons[i]; 704 struct gpio_keys_button *button = ddata->data[i].button;
587 if (button->wakeup) { 705 if (button->wakeup) {
588 int irq = gpio_to_irq(button->gpio); 706 int irq = gpio_to_irq(button->gpio);
589 enable_irq_wake(irq); 707 enable_irq_wake(irq);
@@ -596,15 +714,13 @@ static int gpio_keys_suspend(struct device *dev)
596 714
597static int gpio_keys_resume(struct device *dev) 715static int gpio_keys_resume(struct device *dev)
598{ 716{
599 struct platform_device *pdev = to_platform_device(dev); 717 struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
600 struct gpio_keys_drvdata *ddata = platform_get_drvdata(pdev);
601 struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
602 int i; 718 int i;
603 719
604 for (i = 0; i < pdata->nbuttons; i++) { 720 for (i = 0; i < ddata->n_buttons; i++) {
605 721
606 struct gpio_keys_button *button = &pdata->buttons[i]; 722 struct gpio_keys_button *button = ddata->data[i].button;
607 if (button->wakeup && device_may_wakeup(&pdev->dev)) { 723 if (button->wakeup && device_may_wakeup(dev)) {
608 int irq = gpio_to_irq(button->gpio); 724 int irq = gpio_to_irq(button->gpio);
609 disable_irq_wake(irq); 725 disable_irq_wake(irq);
610 } 726 }
@@ -615,22 +731,18 @@ static int gpio_keys_resume(struct device *dev)
615 731
616 return 0; 732 return 0;
617} 733}
618
619static const struct dev_pm_ops gpio_keys_pm_ops = {
620 .suspend = gpio_keys_suspend,
621 .resume = gpio_keys_resume,
622};
623#endif 734#endif
624 735
736static SIMPLE_DEV_PM_OPS(gpio_keys_pm_ops, gpio_keys_suspend, gpio_keys_resume);
737
625static struct platform_driver gpio_keys_device_driver = { 738static struct platform_driver gpio_keys_device_driver = {
626 .probe = gpio_keys_probe, 739 .probe = gpio_keys_probe,
627 .remove = __devexit_p(gpio_keys_remove), 740 .remove = __devexit_p(gpio_keys_remove),
628 .driver = { 741 .driver = {
629 .name = "gpio-keys", 742 .name = "gpio-keys",
630 .owner = THIS_MODULE, 743 .owner = THIS_MODULE,
631#ifdef CONFIG_PM
632 .pm = &gpio_keys_pm_ops, 744 .pm = &gpio_keys_pm_ops,
633#endif 745 .of_match_table = gpio_keys_of_match,
634 } 746 }
635}; 747};
636 748
@@ -644,10 +756,10 @@ static void __exit gpio_keys_exit(void)
644 platform_driver_unregister(&gpio_keys_device_driver); 756 platform_driver_unregister(&gpio_keys_device_driver);
645} 757}
646 758
647module_init(gpio_keys_init); 759late_initcall(gpio_keys_init);
648module_exit(gpio_keys_exit); 760module_exit(gpio_keys_exit);
649 761
650MODULE_LICENSE("GPL"); 762MODULE_LICENSE("GPL");
651MODULE_AUTHOR("Phil Blundell <pb@handhelds.org>"); 763MODULE_AUTHOR("Phil Blundell <pb@handhelds.org>");
652MODULE_DESCRIPTION("Keyboard driver for CPU GPIOs"); 764MODULE_DESCRIPTION("Keyboard driver for GPIOs");
653MODULE_ALIAS("platform:gpio-keys"); 765MODULE_ALIAS("platform:gpio-keys");
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index 71f744a8e686..ab0acaf7fe8f 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -146,7 +146,6 @@ struct lm8323_chip {
146 /* device lock */ 146 /* device lock */
147 struct mutex lock; 147 struct mutex lock;
148 struct i2c_client *client; 148 struct i2c_client *client;
149 struct work_struct work;
150 struct input_dev *idev; 149 struct input_dev *idev;
151 bool kp_enabled; 150 bool kp_enabled;
152 bool pm_suspend; 151 bool pm_suspend;
@@ -162,7 +161,6 @@ struct lm8323_chip {
162 161
163#define client_to_lm8323(c) container_of(c, struct lm8323_chip, client) 162#define client_to_lm8323(c) container_of(c, struct lm8323_chip, client)
164#define dev_to_lm8323(d) container_of(d, struct lm8323_chip, client->dev) 163#define dev_to_lm8323(d) container_of(d, struct lm8323_chip, client->dev)
165#define work_to_lm8323(w) container_of(w, struct lm8323_chip, work)
166#define cdev_to_pwm(c) container_of(c, struct lm8323_pwm, cdev) 164#define cdev_to_pwm(c) container_of(c, struct lm8323_pwm, cdev)
167#define work_to_pwm(w) container_of(w, struct lm8323_pwm, work) 165#define work_to_pwm(w) container_of(w, struct lm8323_pwm, work)
168 166
@@ -375,9 +373,9 @@ static void pwm_done(struct lm8323_pwm *pwm)
375 * Bottom half: handle the interrupt by posting key events, or dealing with 373 * Bottom half: handle the interrupt by posting key events, or dealing with
376 * errors appropriately. 374 * errors appropriately.
377 */ 375 */
378static void lm8323_work(struct work_struct *work) 376static irqreturn_t lm8323_irq(int irq, void *_lm)
379{ 377{
380 struct lm8323_chip *lm = work_to_lm8323(work); 378 struct lm8323_chip *lm = _lm;
381 u8 ints; 379 u8 ints;
382 int i; 380 int i;
383 381
@@ -409,16 +407,6 @@ static void lm8323_work(struct work_struct *work)
409 } 407 }
410 408
411 mutex_unlock(&lm->lock); 409 mutex_unlock(&lm->lock);
412}
413
414/*
415 * We cannot use I2C in interrupt context, so we just schedule work.
416 */
417static irqreturn_t lm8323_irq(int irq, void *data)
418{
419 struct lm8323_chip *lm = data;
420
421 schedule_work(&lm->work);
422 410
423 return IRQ_HANDLED; 411 return IRQ_HANDLED;
424} 412}
@@ -675,7 +663,6 @@ static int __devinit lm8323_probe(struct i2c_client *client,
675 lm->client = client; 663 lm->client = client;
676 lm->idev = idev; 664 lm->idev = idev;
677 mutex_init(&lm->lock); 665 mutex_init(&lm->lock);
678 INIT_WORK(&lm->work, lm8323_work);
679 666
680 lm->size_x = pdata->size_x; 667 lm->size_x = pdata->size_x;
681 lm->size_y = pdata->size_y; 668 lm->size_y = pdata->size_y;
@@ -746,9 +733,8 @@ static int __devinit lm8323_probe(struct i2c_client *client,
746 goto fail3; 733 goto fail3;
747 } 734 }
748 735
749 err = request_irq(client->irq, lm8323_irq, 736 err = request_threaded_irq(client->irq, NULL, lm8323_irq,
750 IRQF_TRIGGER_FALLING | IRQF_DISABLED, 737 IRQF_TRIGGER_LOW|IRQF_ONESHOT, "lm8323", lm);
751 "lm8323", lm);
752 if (err) { 738 if (err) {
753 dev_err(&client->dev, "could not get IRQ %d\n", client->irq); 739 dev_err(&client->dev, "could not get IRQ %d\n", client->irq);
754 goto fail4; 740 goto fail4;
@@ -783,7 +769,6 @@ static int __devexit lm8323_remove(struct i2c_client *client)
783 769
784 disable_irq_wake(client->irq); 770 disable_irq_wake(client->irq);
785 free_irq(client->irq, lm); 771 free_irq(client->irq, lm);
786 cancel_work_sync(&lm->work);
787 772
788 input_unregister_device(lm->idev); 773 input_unregister_device(lm->idev);
789 774
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index 0a9e81194888..1c1615d9a7f9 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -43,14 +43,15 @@
43 * enabled capacitance sensing inputs and its run/suspend mode. 43 * enabled capacitance sensing inputs and its run/suspend mode.
44 */ 44 */
45#define ELECTRODE_CONF_ADDR 0x5e 45#define ELECTRODE_CONF_ADDR 0x5e
46#define ELECTRODE_CONF_QUICK_CHARGE 0x80
46#define AUTO_CONFIG_CTRL_ADDR 0x7b 47#define AUTO_CONFIG_CTRL_ADDR 0x7b
47#define AUTO_CONFIG_USL_ADDR 0x7d 48#define AUTO_CONFIG_USL_ADDR 0x7d
48#define AUTO_CONFIG_LSL_ADDR 0x7e 49#define AUTO_CONFIG_LSL_ADDR 0x7e
49#define AUTO_CONFIG_TL_ADDR 0x7f 50#define AUTO_CONFIG_TL_ADDR 0x7f
50 51
51/* Threshold of touch/release trigger */ 52/* Threshold of touch/release trigger */
52#define TOUCH_THRESHOLD 0x0f 53#define TOUCH_THRESHOLD 0x08
53#define RELEASE_THRESHOLD 0x0a 54#define RELEASE_THRESHOLD 0x05
54/* Masks for touch and release triggers */ 55/* Masks for touch and release triggers */
55#define TOUCH_STATUS_MASK 0xfff 56#define TOUCH_STATUS_MASK 0xfff
56/* MPR121 has 12 keys */ 57/* MPR121 has 12 keys */
@@ -127,7 +128,7 @@ static int __devinit mpr121_phys_init(const struct mpr121_platform_data *pdata,
127 struct i2c_client *client) 128 struct i2c_client *client)
128{ 129{
129 const struct mpr121_init_register *reg; 130 const struct mpr121_init_register *reg;
130 unsigned char usl, lsl, tl; 131 unsigned char usl, lsl, tl, eleconf;
131 int i, t, vdd, ret; 132 int i, t, vdd, ret;
132 133
133 /* Set up touch/release threshold for ele0-ele11 */ 134 /* Set up touch/release threshold for ele0-ele11 */
@@ -163,8 +164,15 @@ static int __devinit mpr121_phys_init(const struct mpr121_platform_data *pdata,
163 ret = i2c_smbus_write_byte_data(client, AUTO_CONFIG_USL_ADDR, usl); 164 ret = i2c_smbus_write_byte_data(client, AUTO_CONFIG_USL_ADDR, usl);
164 ret |= i2c_smbus_write_byte_data(client, AUTO_CONFIG_LSL_ADDR, lsl); 165 ret |= i2c_smbus_write_byte_data(client, AUTO_CONFIG_LSL_ADDR, lsl);
165 ret |= i2c_smbus_write_byte_data(client, AUTO_CONFIG_TL_ADDR, tl); 166 ret |= i2c_smbus_write_byte_data(client, AUTO_CONFIG_TL_ADDR, tl);
167
168 /*
169 * Quick charge bit will let the capacitive charge to ready
170 * state quickly, or the buttons may not function after system
171 * boot.
172 */
173 eleconf = mpr121->keycount | ELECTRODE_CONF_QUICK_CHARGE;
166 ret |= i2c_smbus_write_byte_data(client, ELECTRODE_CONF_ADDR, 174 ret |= i2c_smbus_write_byte_data(client, ELECTRODE_CONF_ADDR,
167 mpr121->keycount); 175 eleconf);
168 if (ret != 0) 176 if (ret != 0)
169 goto err_i2c_write; 177 goto err_i2c_write;
170 178
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c
index 6229c3e8e78b..e7cc51d0fb34 100644
--- a/drivers/input/keyboard/pmic8xxx-keypad.c
+++ b/drivers/input/keyboard/pmic8xxx-keypad.c
@@ -700,9 +700,9 @@ static int __devinit pmic8xxx_kp_probe(struct platform_device *pdev)
700 return 0; 700 return 0;
701 701
702err_pmic_reg_read: 702err_pmic_reg_read:
703 free_irq(kp->key_stuck_irq, NULL); 703 free_irq(kp->key_stuck_irq, kp);
704err_req_stuck_irq: 704err_req_stuck_irq:
705 free_irq(kp->key_sense_irq, NULL); 705 free_irq(kp->key_sense_irq, kp);
706err_gpio_config: 706err_gpio_config:
707err_get_irq: 707err_get_irq:
708 input_free_device(kp->input); 708 input_free_device(kp->input);
@@ -717,8 +717,8 @@ static int __devexit pmic8xxx_kp_remove(struct platform_device *pdev)
717 struct pmic8xxx_kp *kp = platform_get_drvdata(pdev); 717 struct pmic8xxx_kp *kp = platform_get_drvdata(pdev);
718 718
719 device_init_wakeup(&pdev->dev, 0); 719 device_init_wakeup(&pdev->dev, 0);
720 free_irq(kp->key_stuck_irq, NULL); 720 free_irq(kp->key_stuck_irq, kp);
721 free_irq(kp->key_sense_irq, NULL); 721 free_irq(kp->key_sense_irq, kp);
722 input_unregister_device(kp->input); 722 input_unregister_device(kp->input);
723 kfree(kp); 723 kfree(kp);
724 724
diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c
index ca7b89196ab7..b21bf5b876bb 100644
--- a/drivers/input/keyboard/qt1070.c
+++ b/drivers/input/keyboard/qt1070.c
@@ -239,8 +239,6 @@ static int __devexit qt1070_remove(struct i2c_client *client)
239 input_unregister_device(data->input); 239 input_unregister_device(data->input);
240 kfree(data); 240 kfree(data);
241 241
242 i2c_set_clientdata(client, NULL);
243
244 return 0; 242 return 0;
245} 243}
246 244
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index 6876700a4469..934aeb583b30 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -291,7 +291,7 @@ static int __devexit sh_keysc_remove(struct platform_device *pdev)
291 return 0; 291 return 0;
292} 292}
293 293
294#if CONFIG_PM_SLEEP 294#ifdef CONFIG_PM_SLEEP
295static int sh_keysc_suspend(struct device *dev) 295static int sh_keysc_suspend(struct device *dev)
296{ 296{
297 struct platform_device *pdev = to_platform_device(dev); 297 struct platform_device *pdev = to_platform_device(dev);
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index 2b3b73ec6689..da3828fc2c09 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -657,7 +657,7 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
657 657
658 input_set_drvdata(input_dev, kbc); 658 input_set_drvdata(input_dev, kbc);
659 659
660 input_dev->evbit[0] = BIT_MASK(EV_KEY); 660 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
661 input_set_capability(input_dev, EV_MSC, MSC_SCAN); 661 input_set_capability(input_dev, EV_MSC, MSC_SCAN);
662 662
663 input_dev->keycode = kbc->keycode; 663 input_dev->keycode = kbc->keycode;
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
index c8f097a15d89..1c58681de81f 100644
--- a/drivers/input/keyboard/tnetv107x-keypad.c
+++ b/drivers/input/keyboard/tnetv107x-keypad.c
@@ -337,5 +337,5 @@ module_exit(keypad_exit);
337 337
338MODULE_AUTHOR("Cyril Chemparathy"); 338MODULE_AUTHOR("Cyril Chemparathy");
339MODULE_DESCRIPTION("TNETV107X Keypad Driver"); 339MODULE_DESCRIPTION("TNETV107X Keypad Driver");
340MODULE_ALIAS("platform: tnetv107x-keypad"); 340MODULE_ALIAS("platform:tnetv107x-keypad");
341MODULE_LICENSE("GPL"); 341MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 45dc6aa62ba4..c9104bb4db06 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -100,6 +100,27 @@ config INPUT_MAX8925_ONKEY
100 To compile this driver as a module, choose M here: the module 100 To compile this driver as a module, choose M here: the module
101 will be called max8925_onkey. 101 will be called max8925_onkey.
102 102
103config INPUT_MMA8450
104 tristate "MMA8450 - Freescale's 3-Axis, 8/12-bit Digital Accelerometer"
105 depends on I2C
106 select INPUT_POLLDEV
107 help
108 Say Y here if you want to support Freescale's MMA8450 Accelerometer
109 through I2C interface.
110
111 To compile this driver as a module, choose M here: the
112 module will be called mma8450.
113
114config INPUT_MPU3050
115 tristate "MPU3050 Triaxial gyroscope sensor"
116 depends on I2C
117 help
118 Say Y here if you want to support InvenSense MPU3050
119 connected via an I2C bus.
120
121 To compile this driver as a module, choose M here: the
122 module will be called mpu3050.
123
103config INPUT_APANEL 124config INPUT_APANEL
104 tristate "Fujitsu Lifebook Application Panel buttons" 125 tristate "Fujitsu Lifebook Application Panel buttons"
105 depends on X86 && I2C && LEDS_CLASS 126 depends on X86 && I2C && LEDS_CLASS
@@ -209,6 +230,23 @@ config INPUT_KEYSPAN_REMOTE
209 To compile this driver as a module, choose M here: the module will 230 To compile this driver as a module, choose M here: the module will
210 be called keyspan_remote. 231 be called keyspan_remote.
211 232
233config INPUT_KXTJ9
234 tristate "Kionix KXTJ9 tri-axis digital accelerometer"
235 depends on I2C
236 help
237 Say Y here to enable support for the Kionix KXTJ9 digital tri-axis
238 accelerometer.
239
240 To compile this driver as a module, choose M here: the module will
241 be called kxtj9.
242
243config INPUT_KXTJ9_POLLED_MODE
244 bool "Enable polling mode support"
245 depends on INPUT_KXTJ9
246 select INPUT_POLLDEV
247 help
248 Say Y here if you need accelerometer to work in polling mode.
249
212config INPUT_POWERMATE 250config INPUT_POWERMATE
213 tristate "Griffin PowerMate and Contour Jog support" 251 tristate "Griffin PowerMate and Contour Jog support"
214 depends on USB_ARCH_HAS_HCD 252 depends on USB_ARCH_HAS_HCD
@@ -267,7 +305,7 @@ config INPUT_TWL4030_PWRBUTTON
267config INPUT_TWL4030_VIBRA 305config INPUT_TWL4030_VIBRA
268 tristate "Support for TWL4030 Vibrator" 306 tristate "Support for TWL4030 Vibrator"
269 depends on TWL4030_CORE 307 depends on TWL4030_CORE
270 select TWL4030_CODEC 308 select MFD_TWL4030_AUDIO
271 select INPUT_FF_MEMLESS 309 select INPUT_FF_MEMLESS
272 help 310 help
273 This option enables support for TWL4030 Vibrator Driver. 311 This option enables support for TWL4030 Vibrator Driver.
@@ -275,6 +313,17 @@ config INPUT_TWL4030_VIBRA
275 To compile this driver as a module, choose M here. The module will 313 To compile this driver as a module, choose M here. The module will
276 be called twl4030_vibra. 314 be called twl4030_vibra.
277 315
316config INPUT_TWL6040_VIBRA
317 tristate "Support for TWL6040 Vibrator"
318 depends on TWL4030_CORE
319 select TWL6040_CORE
320 select INPUT_FF_MEMLESS
321 help
322 This option enables support for TWL6040 Vibrator Driver.
323
324 To compile this driver as a module, choose M here. The module will
325 be called twl6040_vibra.
326
278config INPUT_UINPUT 327config INPUT_UINPUT
279 tristate "User level driver support" 328 tristate "User level driver support"
280 help 329 help
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 38efb2cb182b..299ad5edba84 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -25,8 +25,11 @@ obj-$(CONFIG_INPUT_DM355EVM) += dm355evm_keys.o
25obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o 25obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
26obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o 26obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
27obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o 27obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
28obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
28obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o 29obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
29obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o 30obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o
31obj-$(CONFIG_INPUT_MMA8450) += mma8450.o
32obj-$(CONFIG_INPUT_MPU3050) += mpu3050.o
30obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o 33obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o
31obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o 34obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o
32obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o 35obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o
@@ -40,9 +43,9 @@ obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o
40obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o 43obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o
41obj-$(CONFIG_INPUT_TWL4030_PWRBUTTON) += twl4030-pwrbutton.o 44obj-$(CONFIG_INPUT_TWL4030_PWRBUTTON) += twl4030-pwrbutton.o
42obj-$(CONFIG_INPUT_TWL4030_VIBRA) += twl4030-vibra.o 45obj-$(CONFIG_INPUT_TWL4030_VIBRA) += twl4030-vibra.o
46obj-$(CONFIG_INPUT_TWL6040_VIBRA) += twl6040-vibra.o
43obj-$(CONFIG_INPUT_UINPUT) += uinput.o 47obj-$(CONFIG_INPUT_UINPUT) += uinput.o
44obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o 48obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o
45obj-$(CONFIG_INPUT_WM831X_ON) += wm831x-on.o 49obj-$(CONFIG_INPUT_WM831X_ON) += wm831x-on.o
46obj-$(CONFIG_INPUT_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o 50obj-$(CONFIG_INPUT_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o
47obj-$(CONFIG_INPUT_YEALINK) += yealink.o 51obj-$(CONFIG_INPUT_YEALINK) += yealink.o
48
diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c
index 4f72bdd69410..d00edc9f39d1 100644
--- a/drivers/input/misc/bfin_rotary.c
+++ b/drivers/input/misc/bfin_rotary.c
@@ -6,7 +6,6 @@
6 */ 6 */
7 7
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/version.h>
10#include <linux/init.h> 9#include <linux/init.h>
11#include <linux/interrupt.h> 10#include <linux/interrupt.h>
12#include <linux/irq.h> 11#include <linux/irq.h>
diff --git a/drivers/input/misc/kxtj9.c b/drivers/input/misc/kxtj9.c
new file mode 100644
index 000000000000..c456f63b6bae
--- /dev/null
+++ b/drivers/input/misc/kxtj9.c
@@ -0,0 +1,671 @@
1/*
2 * Copyright (C) 2011 Kionix, Inc.
3 * Written by Chris Hudson <chudson@kionix.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
17 * 02111-1307, USA
18 */
19
20#include <linux/delay.h>
21#include <linux/i2c.h>
22#include <linux/input.h>
23#include <linux/interrupt.h>
24#include <linux/slab.h>
25#include <linux/input/kxtj9.h>
26#include <linux/input-polldev.h>
27
28#define NAME "kxtj9"
29#define G_MAX 8000
30/* OUTPUT REGISTERS */
31#define XOUT_L 0x06
32#define WHO_AM_I 0x0F
33/* CONTROL REGISTERS */
34#define INT_REL 0x1A
35#define CTRL_REG1 0x1B
36#define INT_CTRL1 0x1E
37#define DATA_CTRL 0x21
38/* CONTROL REGISTER 1 BITS */
39#define PC1_OFF 0x7F
40#define PC1_ON (1 << 7)
41/* Data ready funtion enable bit: set during probe if using irq mode */
42#define DRDYE (1 << 5)
43/* INTERRUPT CONTROL REGISTER 1 BITS */
44/* Set these during probe if using irq mode */
45#define KXTJ9_IEL (1 << 3)
46#define KXTJ9_IEA (1 << 4)
47#define KXTJ9_IEN (1 << 5)
48/* INPUT_ABS CONSTANTS */
49#define FUZZ 3
50#define FLAT 3
51/* RESUME STATE INDICES */
52#define RES_DATA_CTRL 0
53#define RES_CTRL_REG1 1
54#define RES_INT_CTRL1 2
55#define RESUME_ENTRIES 3
56
57/*
58 * The following table lists the maximum appropriate poll interval for each
59 * available output data rate.
60 */
61static const struct {
62 unsigned int cutoff;
63 u8 mask;
64} kxtj9_odr_table[] = {
65 { 3, ODR800F },
66 { 5, ODR400F },
67 { 10, ODR200F },
68 { 20, ODR100F },
69 { 40, ODR50F },
70 { 80, ODR25F },
71 { 0, ODR12_5F},
72};
73
74struct kxtj9_data {
75 struct i2c_client *client;
76 struct kxtj9_platform_data pdata;
77 struct input_dev *input_dev;
78#ifdef CONFIG_INPUT_KXTJ9_POLLED_MODE
79 struct input_polled_dev *poll_dev;
80#endif
81 unsigned int last_poll_interval;
82 u8 shift;
83 u8 ctrl_reg1;
84 u8 data_ctrl;
85 u8 int_ctrl;
86};
87
88static int kxtj9_i2c_read(struct kxtj9_data *tj9, u8 addr, u8 *data, int len)
89{
90 struct i2c_msg msgs[] = {
91 {
92 .addr = tj9->client->addr,
93 .flags = tj9->client->flags,
94 .len = 1,
95 .buf = &addr,
96 },
97 {
98 .addr = tj9->client->addr,
99 .flags = tj9->client->flags | I2C_M_RD,
100 .len = len,
101 .buf = data,
102 },
103 };
104
105 return i2c_transfer(tj9->client->adapter, msgs, 2);
106}
107
108static void kxtj9_report_acceleration_data(struct kxtj9_data *tj9)
109{
110 s16 acc_data[3]; /* Data bytes from hardware xL, xH, yL, yH, zL, zH */
111 s16 x, y, z;
112 int err;
113
114 err = kxtj9_i2c_read(tj9, XOUT_L, (u8 *)acc_data, 6);
115 if (err < 0)
116 dev_err(&tj9->client->dev, "accelerometer data read failed\n");
117
118 x = le16_to_cpu(acc_data[tj9->pdata.axis_map_x]) >> tj9->shift;
119 y = le16_to_cpu(acc_data[tj9->pdata.axis_map_y]) >> tj9->shift;
120 z = le16_to_cpu(acc_data[tj9->pdata.axis_map_z]) >> tj9->shift;
121
122 input_report_abs(tj9->input_dev, ABS_X, tj9->pdata.negate_x ? -x : x);
123 input_report_abs(tj9->input_dev, ABS_Y, tj9->pdata.negate_y ? -y : y);
124 input_report_abs(tj9->input_dev, ABS_Z, tj9->pdata.negate_z ? -z : z);
125 input_sync(tj9->input_dev);
126}
127
128static irqreturn_t kxtj9_isr(int irq, void *dev)
129{
130 struct kxtj9_data *tj9 = dev;
131 int err;
132
133 /* data ready is the only possible interrupt type */
134 kxtj9_report_acceleration_data(tj9);
135
136 err = i2c_smbus_read_byte_data(tj9->client, INT_REL);
137 if (err < 0)
138 dev_err(&tj9->client->dev,
139 "error clearing interrupt status: %d\n", err);
140
141 return IRQ_HANDLED;
142}
143
144static int kxtj9_update_g_range(struct kxtj9_data *tj9, u8 new_g_range)
145{
146 switch (new_g_range) {
147 case KXTJ9_G_2G:
148 tj9->shift = 4;
149 break;
150 case KXTJ9_G_4G:
151 tj9->shift = 3;
152 break;
153 case KXTJ9_G_8G:
154 tj9->shift = 2;
155 break;
156 default:
157 return -EINVAL;
158 }
159
160 tj9->ctrl_reg1 &= 0xe7;
161 tj9->ctrl_reg1 |= new_g_range;
162
163 return 0;
164}
165
166static int kxtj9_update_odr(struct kxtj9_data *tj9, unsigned int poll_interval)
167{
168 int err;
169 int i;
170
171 /* Use the lowest ODR that can support the requested poll interval */
172 for (i = 0; i < ARRAY_SIZE(kxtj9_odr_table); i++) {
173 tj9->data_ctrl = kxtj9_odr_table[i].mask;
174 if (poll_interval < kxtj9_odr_table[i].cutoff)
175 break;
176 }
177
178 err = i2c_smbus_write_byte_data(tj9->client, CTRL_REG1, 0);
179 if (err < 0)
180 return err;
181
182 err = i2c_smbus_write_byte_data(tj9->client, DATA_CTRL, tj9->data_ctrl);
183 if (err < 0)
184 return err;
185
186 err = i2c_smbus_write_byte_data(tj9->client, CTRL_REG1, tj9->ctrl_reg1);
187 if (err < 0)
188 return err;
189
190 return 0;
191}
192
193static int kxtj9_device_power_on(struct kxtj9_data *tj9)
194{
195 if (tj9->pdata.power_on)
196 return tj9->pdata.power_on();
197
198 return 0;
199}
200
201static void kxtj9_device_power_off(struct kxtj9_data *tj9)
202{
203 int err;
204
205 tj9->ctrl_reg1 &= PC1_OFF;
206 err = i2c_smbus_write_byte_data(tj9->client, CTRL_REG1, tj9->ctrl_reg1);
207 if (err < 0)
208 dev_err(&tj9->client->dev, "soft power off failed\n");
209
210 if (tj9->pdata.power_off)
211 tj9->pdata.power_off();
212}
213
214static int kxtj9_enable(struct kxtj9_data *tj9)
215{
216 int err;
217
218 err = kxtj9_device_power_on(tj9);
219 if (err < 0)
220 return err;
221
222 /* ensure that PC1 is cleared before updating control registers */
223 err = i2c_smbus_write_byte_data(tj9->client, CTRL_REG1, 0);
224 if (err < 0)
225 return err;
226
227 /* only write INT_CTRL_REG1 if in irq mode */
228 if (tj9->client->irq) {
229 err = i2c_smbus_write_byte_data(tj9->client,
230 INT_CTRL1, tj9->int_ctrl);
231 if (err < 0)
232 return err;
233 }
234
235 err = kxtj9_update_g_range(tj9, tj9->pdata.g_range);
236 if (err < 0)
237 return err;
238
239 /* turn on outputs */
240 tj9->ctrl_reg1 |= PC1_ON;
241 err = i2c_smbus_write_byte_data(tj9->client, CTRL_REG1, tj9->ctrl_reg1);
242 if (err < 0)
243 return err;
244
245 err = kxtj9_update_odr(tj9, tj9->last_poll_interval);
246 if (err < 0)
247 return err;
248
249 /* clear initial interrupt if in irq mode */
250 if (tj9->client->irq) {
251 err = i2c_smbus_read_byte_data(tj9->client, INT_REL);
252 if (err < 0) {
253 dev_err(&tj9->client->dev,
254 "error clearing interrupt: %d\n", err);
255 goto fail;
256 }
257 }
258
259 return 0;
260
261fail:
262 kxtj9_device_power_off(tj9);
263 return err;
264}
265
266static void kxtj9_disable(struct kxtj9_data *tj9)
267{
268 kxtj9_device_power_off(tj9);
269}
270
271static int kxtj9_input_open(struct input_dev *input)
272{
273 struct kxtj9_data *tj9 = input_get_drvdata(input);
274
275 return kxtj9_enable(tj9);
276}
277
278static void kxtj9_input_close(struct input_dev *dev)
279{
280 struct kxtj9_data *tj9 = input_get_drvdata(dev);
281
282 kxtj9_disable(tj9);
283}
284
285static void __devinit kxtj9_init_input_device(struct kxtj9_data *tj9,
286 struct input_dev *input_dev)
287{
288 __set_bit(EV_ABS, input_dev->evbit);
289 input_set_abs_params(input_dev, ABS_X, -G_MAX, G_MAX, FUZZ, FLAT);
290 input_set_abs_params(input_dev, ABS_Y, -G_MAX, G_MAX, FUZZ, FLAT);
291 input_set_abs_params(input_dev, ABS_Z, -G_MAX, G_MAX, FUZZ, FLAT);
292
293 input_dev->name = "kxtj9_accel";
294 input_dev->id.bustype = BUS_I2C;
295 input_dev->dev.parent = &tj9->client->dev;
296}
297
298static int __devinit kxtj9_setup_input_device(struct kxtj9_data *tj9)
299{
300 struct input_dev *input_dev;
301 int err;
302
303 input_dev = input_allocate_device();
304 if (!input_dev) {
305 dev_err(&tj9->client->dev, "input device allocate failed\n");
306 return -ENOMEM;
307 }
308
309 tj9->input_dev = input_dev;
310
311 input_dev->open = kxtj9_input_open;
312 input_dev->close = kxtj9_input_close;
313 input_set_drvdata(input_dev, tj9);
314
315 kxtj9_init_input_device(tj9, input_dev);
316
317 err = input_register_device(tj9->input_dev);
318 if (err) {
319 dev_err(&tj9->client->dev,
320 "unable to register input polled device %s: %d\n",
321 tj9->input_dev->name, err);
322 input_free_device(tj9->input_dev);
323 return err;
324 }
325
326 return 0;
327}
328
329/*
330 * When IRQ mode is selected, we need to provide an interface to allow the user
331 * to change the output data rate of the part. For consistency, we are using
332 * the set_poll method, which accepts a poll interval in milliseconds, and then
333 * calls update_odr() while passing this value as an argument. In IRQ mode, the
334 * data outputs will not be read AT the requested poll interval, rather, the
335 * lowest ODR that can support the requested interval. The client application
336 * will be responsible for retrieving data from the input node at the desired
337 * interval.
338 */
339
340/* Returns currently selected poll interval (in ms) */
341static ssize_t kxtj9_get_poll(struct device *dev,
342 struct device_attribute *attr, char *buf)
343{
344 struct i2c_client *client = to_i2c_client(dev);
345 struct kxtj9_data *tj9 = i2c_get_clientdata(client);
346
347 return sprintf(buf, "%d\n", tj9->last_poll_interval);
348}
349
350/* Allow users to select a new poll interval (in ms) */
351static ssize_t kxtj9_set_poll(struct device *dev, struct device_attribute *attr,
352 const char *buf, size_t count)
353{
354 struct i2c_client *client = to_i2c_client(dev);
355 struct kxtj9_data *tj9 = i2c_get_clientdata(client);
356 struct input_dev *input_dev = tj9->input_dev;
357 unsigned int interval;
358 int error;
359
360 error = kstrtouint(buf, 10, &interval);
361 if (error < 0)
362 return error;
363
364 /* Lock the device to prevent races with open/close (and itself) */
365 mutex_lock(&input_dev->mutex);
366
367 disable_irq(client->irq);
368
369 /*
370 * Set current interval to the greater of the minimum interval or
371 * the requested interval
372 */
373 tj9->last_poll_interval = max(interval, tj9->pdata.min_interval);
374
375 kxtj9_update_odr(tj9, tj9->last_poll_interval);
376
377 enable_irq(client->irq);
378 mutex_unlock(&input_dev->mutex);
379
380 return count;
381}
382
383static DEVICE_ATTR(poll, S_IRUGO|S_IWUSR, kxtj9_get_poll, kxtj9_set_poll);
384
385static struct attribute *kxtj9_attributes[] = {
386 &dev_attr_poll.attr,
387 NULL
388};
389
390static struct attribute_group kxtj9_attribute_group = {
391 .attrs = kxtj9_attributes
392};
393
394
395#ifdef CONFIG_INPUT_KXTJ9_POLLED_MODE
396static void kxtj9_poll(struct input_polled_dev *dev)
397{
398 struct kxtj9_data *tj9 = dev->private;
399 unsigned int poll_interval = dev->poll_interval;
400
401 kxtj9_report_acceleration_data(tj9);
402
403 if (poll_interval != tj9->last_poll_interval) {
404 kxtj9_update_odr(tj9, poll_interval);
405 tj9->last_poll_interval = poll_interval;
406 }
407}
408
409static void kxtj9_polled_input_open(struct input_polled_dev *dev)
410{
411 struct kxtj9_data *tj9 = dev->private;
412
413 kxtj9_enable(tj9);
414}
415
416static void kxtj9_polled_input_close(struct input_polled_dev *dev)
417{
418 struct kxtj9_data *tj9 = dev->private;
419
420 kxtj9_disable(tj9);
421}
422
423static int __devinit kxtj9_setup_polled_device(struct kxtj9_data *tj9)
424{
425 int err;
426 struct input_polled_dev *poll_dev;
427 poll_dev = input_allocate_polled_device();
428
429 if (!poll_dev) {
430 dev_err(&tj9->client->dev,
431 "Failed to allocate polled device\n");
432 return -ENOMEM;
433 }
434
435 tj9->poll_dev = poll_dev;
436 tj9->input_dev = poll_dev->input;
437
438 poll_dev->private = tj9;
439 poll_dev->poll = kxtj9_poll;
440 poll_dev->open = kxtj9_polled_input_open;
441 poll_dev->close = kxtj9_polled_input_close;
442
443 kxtj9_init_input_device(tj9, poll_dev->input);
444
445 err = input_register_polled_device(poll_dev);
446 if (err) {
447 dev_err(&tj9->client->dev,
448 "Unable to register polled device, err=%d\n", err);
449 input_free_polled_device(poll_dev);
450 return err;
451 }
452
453 return 0;
454}
455
456static void __devexit kxtj9_teardown_polled_device(struct kxtj9_data *tj9)
457{
458 input_unregister_polled_device(tj9->poll_dev);
459 input_free_polled_device(tj9->poll_dev);
460}
461
462#else
463
464static inline int kxtj9_setup_polled_device(struct kxtj9_data *tj9)
465{
466 return -ENOSYS;
467}
468
469static inline void kxtj9_teardown_polled_device(struct kxtj9_data *tj9)
470{
471}
472
473#endif
474
475static int __devinit kxtj9_verify(struct kxtj9_data *tj9)
476{
477 int retval;
478
479 retval = kxtj9_device_power_on(tj9);
480 if (retval < 0)
481 return retval;
482
483 retval = i2c_smbus_read_byte_data(tj9->client, WHO_AM_I);
484 if (retval < 0) {
485 dev_err(&tj9->client->dev, "read err int source\n");
486 goto out;
487 }
488
489 retval = retval != 0x06 ? -EIO : 0;
490
491out:
492 kxtj9_device_power_off(tj9);
493 return retval;
494}
495
496static int __devinit kxtj9_probe(struct i2c_client *client,
497 const struct i2c_device_id *id)
498{
499 const struct kxtj9_platform_data *pdata = client->dev.platform_data;
500 struct kxtj9_data *tj9;
501 int err;
502
503 if (!i2c_check_functionality(client->adapter,
504 I2C_FUNC_I2C | I2C_FUNC_SMBUS_BYTE_DATA)) {
505 dev_err(&client->dev, "client is not i2c capable\n");
506 return -ENXIO;
507 }
508
509 if (!pdata) {
510 dev_err(&client->dev, "platform data is NULL; exiting\n");
511 return -EINVAL;
512 }
513
514 tj9 = kzalloc(sizeof(*tj9), GFP_KERNEL);
515 if (!tj9) {
516 dev_err(&client->dev,
517 "failed to allocate memory for module data\n");
518 return -ENOMEM;
519 }
520
521 tj9->client = client;
522 tj9->pdata = *pdata;
523
524 if (pdata->init) {
525 err = pdata->init();
526 if (err < 0)
527 goto err_free_mem;
528 }
529
530 err = kxtj9_verify(tj9);
531 if (err < 0) {
532 dev_err(&client->dev, "device not recognized\n");
533 goto err_pdata_exit;
534 }
535
536 i2c_set_clientdata(client, tj9);
537
538 tj9->ctrl_reg1 = tj9->pdata.res_12bit | tj9->pdata.g_range;
539 tj9->data_ctrl = tj9->pdata.data_odr_init;
540
541 if (client->irq) {
542 /* If in irq mode, populate INT_CTRL_REG1 and enable DRDY. */
543 tj9->int_ctrl |= KXTJ9_IEN | KXTJ9_IEA | KXTJ9_IEL;
544 tj9->ctrl_reg1 |= DRDYE;
545
546 err = kxtj9_setup_input_device(tj9);
547 if (err)
548 goto err_pdata_exit;
549
550 err = request_threaded_irq(client->irq, NULL, kxtj9_isr,
551 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
552 "kxtj9-irq", tj9);
553 if (err) {
554 dev_err(&client->dev, "request irq failed: %d\n", err);
555 goto err_destroy_input;
556 }
557
558 err = sysfs_create_group(&client->dev.kobj, &kxtj9_attribute_group);
559 if (err) {
560 dev_err(&client->dev, "sysfs create failed: %d\n", err);
561 goto err_free_irq;
562 }
563
564 } else {
565 err = kxtj9_setup_polled_device(tj9);
566 if (err)
567 goto err_pdata_exit;
568 }
569
570 return 0;
571
572err_free_irq:
573 free_irq(client->irq, tj9);
574err_destroy_input:
575 input_unregister_device(tj9->input_dev);
576err_pdata_exit:
577 if (tj9->pdata.exit)
578 tj9->pdata.exit();
579err_free_mem:
580 kfree(tj9);
581 return err;
582}
583
584static int __devexit kxtj9_remove(struct i2c_client *client)
585{
586 struct kxtj9_data *tj9 = i2c_get_clientdata(client);
587
588 if (client->irq) {
589 sysfs_remove_group(&client->dev.kobj, &kxtj9_attribute_group);
590 free_irq(client->irq, tj9);
591 input_unregister_device(tj9->input_dev);
592 } else {
593 kxtj9_teardown_polled_device(tj9);
594 }
595
596 if (tj9->pdata.exit)
597 tj9->pdata.exit();
598
599 kfree(tj9);
600
601 return 0;
602}
603
604#ifdef CONFIG_PM_SLEEP
605static int kxtj9_suspend(struct device *dev)
606{
607 struct i2c_client *client = to_i2c_client(dev);
608 struct kxtj9_data *tj9 = i2c_get_clientdata(client);
609 struct input_dev *input_dev = tj9->input_dev;
610
611 mutex_lock(&input_dev->mutex);
612
613 if (input_dev->users)
614 kxtj9_disable(tj9);
615
616 mutex_unlock(&input_dev->mutex);
617 return 0;
618}
619
620static int kxtj9_resume(struct device *dev)
621{
622 struct i2c_client *client = to_i2c_client(dev);
623 struct kxtj9_data *tj9 = i2c_get_clientdata(client);
624 struct input_dev *input_dev = tj9->input_dev;
625 int retval = 0;
626
627 mutex_lock(&input_dev->mutex);
628
629 if (input_dev->users)
630 kxtj9_enable(tj9);
631
632 mutex_unlock(&input_dev->mutex);
633 return retval;
634}
635#endif
636
637static SIMPLE_DEV_PM_OPS(kxtj9_pm_ops, kxtj9_suspend, kxtj9_resume);
638
639static const struct i2c_device_id kxtj9_id[] = {
640 { NAME, 0 },
641 { },
642};
643
644MODULE_DEVICE_TABLE(i2c, kxtj9_id);
645
646static struct i2c_driver kxtj9_driver = {
647 .driver = {
648 .name = NAME,
649 .owner = THIS_MODULE,
650 .pm = &kxtj9_pm_ops,
651 },
652 .probe = kxtj9_probe,
653 .remove = __devexit_p(kxtj9_remove),
654 .id_table = kxtj9_id,
655};
656
657static int __init kxtj9_init(void)
658{
659 return i2c_add_driver(&kxtj9_driver);
660}
661module_init(kxtj9_init);
662
663static void __exit kxtj9_exit(void)
664{
665 i2c_del_driver(&kxtj9_driver);
666}
667module_exit(kxtj9_exit);
668
669MODULE_DESCRIPTION("KXTJ9 accelerometer driver");
670MODULE_AUTHOR("Chris Hudson <chudson@kionix.com>");
671MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
new file mode 100644
index 000000000000..20f8f9284f02
--- /dev/null
+++ b/drivers/input/misc/mma8450.c
@@ -0,0 +1,256 @@
1/*
2 * Driver for Freescale's 3-Axis Accelerometer MMA8450
3 *
4 * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/slab.h>
24#include <linux/delay.h>
25#include <linux/i2c.h>
26#include <linux/input-polldev.h>
27
28#define MMA8450_DRV_NAME "mma8450"
29
30#define MODE_CHANGE_DELAY_MS 100
31#define POLL_INTERVAL 100
32#define POLL_INTERVAL_MAX 500
33
34/* register definitions */
35#define MMA8450_STATUS 0x00
36#define MMA8450_STATUS_ZXYDR 0x08
37
38#define MMA8450_OUT_X8 0x01
39#define MMA8450_OUT_Y8 0x02
40#define MMA8450_OUT_Z8 0x03
41
42#define MMA8450_OUT_X_LSB 0x05
43#define MMA8450_OUT_X_MSB 0x06
44#define MMA8450_OUT_Y_LSB 0x07
45#define MMA8450_OUT_Y_MSB 0x08
46#define MMA8450_OUT_Z_LSB 0x09
47#define MMA8450_OUT_Z_MSB 0x0a
48
49#define MMA8450_XYZ_DATA_CFG 0x16
50
51#define MMA8450_CTRL_REG1 0x38
52#define MMA8450_CTRL_REG2 0x39
53
54/* mma8450 status */
55struct mma8450 {
56 struct i2c_client *client;
57 struct input_polled_dev *idev;
58};
59
60static int mma8450_read(struct mma8450 *m, unsigned off)
61{
62 struct i2c_client *c = m->client;
63 int ret;
64
65 ret = i2c_smbus_read_byte_data(c, off);
66 if (ret < 0)
67 dev_err(&c->dev,
68 "failed to read register 0x%02x, error %d\n",
69 off, ret);
70
71 return ret;
72}
73
74static int mma8450_write(struct mma8450 *m, unsigned off, u8 v)
75{
76 struct i2c_client *c = m->client;
77 int error;
78
79 error = i2c_smbus_write_byte_data(c, off, v);
80 if (error < 0) {
81 dev_err(&c->dev,
82 "failed to write to register 0x%02x, error %d\n",
83 off, error);
84 return error;
85 }
86
87 return 0;
88}
89
90static int mma8450_read_xyz(struct mma8450 *m, int *x, int *y, int *z)
91{
92 struct i2c_client *c = m->client;
93 u8 buff[6];
94 int err;
95
96 err = i2c_smbus_read_i2c_block_data(c, MMA8450_OUT_X_LSB, 6, buff);
97 if (err < 0) {
98 dev_err(&c->dev,
99 "failed to read block data at 0x%02x, error %d\n",
100 MMA8450_OUT_X_LSB, err);
101 return err;
102 }
103
104 *x = ((buff[1] << 4) & 0xff0) | (buff[0] & 0xf);
105 *y = ((buff[3] << 4) & 0xff0) | (buff[2] & 0xf);
106 *z = ((buff[5] << 4) & 0xff0) | (buff[4] & 0xf);
107
108 return 0;
109}
110
111static void mma8450_poll(struct input_polled_dev *dev)
112{
113 struct mma8450 *m = dev->private;
114 int x, y, z;
115 int ret;
116 int err;
117
118 ret = mma8450_read(m, MMA8450_STATUS);
119 if (ret < 0)
120 return;
121
122 if (!(ret & MMA8450_STATUS_ZXYDR))
123 return;
124
125 err = mma8450_read_xyz(m, &x, &y, &z);
126 if (err)
127 return;
128
129 input_report_abs(dev->input, ABS_X, x);
130 input_report_abs(dev->input, ABS_Y, y);
131 input_report_abs(dev->input, ABS_Z, z);
132 input_sync(dev->input);
133}
134
135/* Initialize the MMA8450 chip */
136static void mma8450_open(struct input_polled_dev *dev)
137{
138 struct mma8450 *m = dev->private;
139 int err;
140
141 /* enable all events from X/Y/Z, no FIFO */
142 err = mma8450_write(m, MMA8450_XYZ_DATA_CFG, 0x07);
143 if (err)
144 return;
145
146 /*
147 * Sleep mode poll rate - 50Hz
148 * System output data rate - 400Hz
149 * Full scale selection - Active, +/- 2G
150 */
151 err = mma8450_write(m, MMA8450_CTRL_REG1, 0x01);
152 if (err < 0)
153 return;
154
155 msleep(MODE_CHANGE_DELAY_MS);
156}
157
158static void mma8450_close(struct input_polled_dev *dev)
159{
160 struct mma8450 *m = dev->private;
161
162 mma8450_write(m, MMA8450_CTRL_REG1, 0x00);
163 mma8450_write(m, MMA8450_CTRL_REG2, 0x01);
164}
165
166/*
167 * I2C init/probing/exit functions
168 */
169static int __devinit mma8450_probe(struct i2c_client *c,
170 const struct i2c_device_id *id)
171{
172 struct input_polled_dev *idev;
173 struct mma8450 *m;
174 int err;
175
176 m = kzalloc(sizeof(struct mma8450), GFP_KERNEL);
177 idev = input_allocate_polled_device();
178 if (!m || !idev) {
179 err = -ENOMEM;
180 goto err_free_mem;
181 }
182
183 m->client = c;
184 m->idev = idev;
185
186 idev->private = m;
187 idev->input->name = MMA8450_DRV_NAME;
188 idev->input->id.bustype = BUS_I2C;
189 idev->poll = mma8450_poll;
190 idev->poll_interval = POLL_INTERVAL;
191 idev->poll_interval_max = POLL_INTERVAL_MAX;
192 idev->open = mma8450_open;
193 idev->close = mma8450_close;
194
195 __set_bit(EV_ABS, idev->input->evbit);
196 input_set_abs_params(idev->input, ABS_X, -2048, 2047, 32, 32);
197 input_set_abs_params(idev->input, ABS_Y, -2048, 2047, 32, 32);
198 input_set_abs_params(idev->input, ABS_Z, -2048, 2047, 32, 32);
199
200 err = input_register_polled_device(idev);
201 if (err) {
202 dev_err(&c->dev, "failed to register polled input device\n");
203 goto err_free_mem;
204 }
205
206 return 0;
207
208err_free_mem:
209 input_free_polled_device(idev);
210 kfree(m);
211 return err;
212}
213
214static int __devexit mma8450_remove(struct i2c_client *c)
215{
216 struct mma8450 *m = i2c_get_clientdata(c);
217 struct input_polled_dev *idev = m->idev;
218
219 input_unregister_polled_device(idev);
220 input_free_polled_device(idev);
221 kfree(m);
222
223 return 0;
224}
225
226static const struct i2c_device_id mma8450_id[] = {
227 { MMA8450_DRV_NAME, 0 },
228 { },
229};
230MODULE_DEVICE_TABLE(i2c, mma8450_id);
231
232static struct i2c_driver mma8450_driver = {
233 .driver = {
234 .name = MMA8450_DRV_NAME,
235 .owner = THIS_MODULE,
236 },
237 .probe = mma8450_probe,
238 .remove = __devexit_p(mma8450_remove),
239 .id_table = mma8450_id,
240};
241
242static int __init mma8450_init(void)
243{
244 return i2c_add_driver(&mma8450_driver);
245}
246module_init(mma8450_init);
247
248static void __exit mma8450_exit(void)
249{
250 i2c_del_driver(&mma8450_driver);
251}
252module_exit(mma8450_exit);
253
254MODULE_AUTHOR("Freescale Semiconductor, Inc.");
255MODULE_DESCRIPTION("MMA8450 3-Axis Accelerometer Driver");
256MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
new file mode 100644
index 000000000000..b95fac15b2ea
--- /dev/null
+++ b/drivers/input/misc/mpu3050.c
@@ -0,0 +1,376 @@
1/*
2 * MPU3050 Tri-axis gyroscope driver
3 *
4 * Copyright (C) 2011 Wistron Co.Ltd
5 * Joseph Lai <joseph_lai@wistron.com>
6 *
7 * Trimmed down by Alan Cox <alan@linux.intel.com> to produce this version
8 *
9 * This is a 'lite' version of the driver, while we consider the right way
10 * to present the other features to user space. In particular it requires the
11 * device has an IRQ, and it only provides an input interface, so is not much
12 * use for device orientation. A fuller version is available from the Meego
13 * tree.
14 *
15 * This program is based on bma023.c.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; version 2 of the License.
20 *
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License along
27 * with this program; if not, write to the Free Software Foundation, Inc.,
28 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
29 *
30 */
31
32#include <linux/module.h>
33#include <linux/init.h>
34#include <linux/interrupt.h>
35#include <linux/platform_device.h>
36#include <linux/mutex.h>
37#include <linux/err.h>
38#include <linux/i2c.h>
39#include <linux/input.h>
40#include <linux/delay.h>
41#include <linux/slab.h>
42#include <linux/pm_runtime.h>
43
44#define MPU3050_CHIP_ID_REG 0x00
45#define MPU3050_CHIP_ID 0x69
46#define MPU3050_XOUT_H 0x1D
47#define MPU3050_PWR_MGM 0x3E
48#define MPU3050_PWR_MGM_POS 6
49#define MPU3050_PWR_MGM_MASK 0x40
50
51#define MPU3050_AUTO_DELAY 1000
52
53#define MPU3050_MIN_VALUE -32768
54#define MPU3050_MAX_VALUE 32767
55
56struct axis_data {
57 s16 x;
58 s16 y;
59 s16 z;
60};
61
62struct mpu3050_sensor {
63 struct i2c_client *client;
64 struct device *dev;
65 struct input_dev *idev;
66};
67
68/**
69 * mpu3050_xyz_read_reg - read the axes values
70 * @buffer: provide register addr and get register
71 * @length: length of register
72 *
73 * Reads the register values in one transaction or returns a negative
74 * error code on failure.
75 */
76static int mpu3050_xyz_read_reg(struct i2c_client *client,
77 u8 *buffer, int length)
78{
79 /*
80 * Annoying we can't make this const because the i2c layer doesn't
81 * declare input buffers const.
82 */
83 char cmd = MPU3050_XOUT_H;
84 struct i2c_msg msg[] = {
85 {
86 .addr = client->addr,
87 .flags = 0,
88 .len = 1,
89 .buf = &cmd,
90 },
91 {
92 .addr = client->addr,
93 .flags = I2C_M_RD,
94 .len = length,
95 .buf = buffer,
96 },
97 };
98
99 return i2c_transfer(client->adapter, msg, 2);
100}
101
102/**
103 * mpu3050_read_xyz - get co-ordinates from device
104 * @client: i2c address of sensor
105 * @coords: co-ordinates to update
106 *
107 * Return the converted X Y and Z co-ordinates from the sensor device
108 */
109static void mpu3050_read_xyz(struct i2c_client *client,
110 struct axis_data *coords)
111{
112 u16 buffer[3];
113
114 mpu3050_xyz_read_reg(client, (u8 *)buffer, 6);
115 coords->x = be16_to_cpu(buffer[0]);
116 coords->y = be16_to_cpu(buffer[1]);
117 coords->z = be16_to_cpu(buffer[2]);
118 dev_dbg(&client->dev, "%s: x %d, y %d, z %d\n", __func__,
119 coords->x, coords->y, coords->z);
120}
121
122/**
123 * mpu3050_set_power_mode - set the power mode
124 * @client: i2c client for the sensor
125 * @val: value to switch on/off of power, 1: normal power, 0: low power
126 *
127 * Put device to normal-power mode or low-power mode.
128 */
129static void mpu3050_set_power_mode(struct i2c_client *client, u8 val)
130{
131 u8 value;
132
133 value = i2c_smbus_read_byte_data(client, MPU3050_PWR_MGM);
134 value = (value & ~MPU3050_PWR_MGM_MASK) |
135 (((val << MPU3050_PWR_MGM_POS) & MPU3050_PWR_MGM_MASK) ^
136 MPU3050_PWR_MGM_MASK);
137 i2c_smbus_write_byte_data(client, MPU3050_PWR_MGM, value);
138}
139
140/**
141 * mpu3050_input_open - called on input event open
142 * @input: input dev of opened device
143 *
144 * The input layer calls this function when input event is opened. The
145 * function will push the device to resume. Then, the device is ready
146 * to provide data.
147 */
148static int mpu3050_input_open(struct input_dev *input)
149{
150 struct mpu3050_sensor *sensor = input_get_drvdata(input);
151
152 pm_runtime_get(sensor->dev);
153
154 return 0;
155}
156
157/**
158 * mpu3050_input_close - called on input event close
159 * @input: input dev of closed device
160 *
161 * The input layer calls this function when input event is closed. The
162 * function will push the device to suspend.
163 */
164static void mpu3050_input_close(struct input_dev *input)
165{
166 struct mpu3050_sensor *sensor = input_get_drvdata(input);
167
168 pm_runtime_put(sensor->dev);
169}
170
171/**
172 * mpu3050_interrupt_thread - handle an IRQ
173 * @irq: interrupt numner
174 * @data: the sensor
175 *
176 * Called by the kernel single threaded after an interrupt occurs. Read
177 * the sensor data and generate an input event for it.
178 */
179static irqreturn_t mpu3050_interrupt_thread(int irq, void *data)
180{
181 struct mpu3050_sensor *sensor = data;
182 struct axis_data axis;
183
184 mpu3050_read_xyz(sensor->client, &axis);
185
186 input_report_abs(sensor->idev, ABS_X, axis.x);
187 input_report_abs(sensor->idev, ABS_Y, axis.y);
188 input_report_abs(sensor->idev, ABS_Z, axis.z);
189 input_sync(sensor->idev);
190
191 return IRQ_HANDLED;
192}
193
194/**
195 * mpu3050_probe - device detection callback
196 * @client: i2c client of found device
197 * @id: id match information
198 *
199 * The I2C layer calls us when it believes a sensor is present at this
200 * address. Probe to see if this is correct and to validate the device.
201 *
202 * If present install the relevant sysfs interfaces and input device.
203 */
204static int __devinit mpu3050_probe(struct i2c_client *client,
205 const struct i2c_device_id *id)
206{
207 struct mpu3050_sensor *sensor;
208 struct input_dev *idev;
209 int ret;
210 int error;
211
212 sensor = kzalloc(sizeof(struct mpu3050_sensor), GFP_KERNEL);
213 idev = input_allocate_device();
214 if (!sensor || !idev) {
215 dev_err(&client->dev, "failed to allocate driver data\n");
216 error = -ENOMEM;
217 goto err_free_mem;
218 }
219
220 sensor->client = client;
221 sensor->dev = &client->dev;
222 sensor->idev = idev;
223
224 mpu3050_set_power_mode(client, 1);
225 msleep(10);
226
227 ret = i2c_smbus_read_byte_data(client, MPU3050_CHIP_ID_REG);
228 if (ret < 0) {
229 dev_err(&client->dev, "failed to detect device\n");
230 error = -ENXIO;
231 goto err_free_mem;
232 }
233
234 if (ret != MPU3050_CHIP_ID) {
235 dev_err(&client->dev, "unsupported chip id\n");
236 error = -ENXIO;
237 goto err_free_mem;
238 }
239
240 idev->name = "MPU3050";
241 idev->id.bustype = BUS_I2C;
242 idev->dev.parent = &client->dev;
243
244 idev->open = mpu3050_input_open;
245 idev->close = mpu3050_input_close;
246
247 __set_bit(EV_ABS, idev->evbit);
248 input_set_abs_params(idev, ABS_X,
249 MPU3050_MIN_VALUE, MPU3050_MAX_VALUE, 0, 0);
250 input_set_abs_params(idev, ABS_Y,
251 MPU3050_MIN_VALUE, MPU3050_MAX_VALUE, 0, 0);
252 input_set_abs_params(idev, ABS_Z,
253 MPU3050_MIN_VALUE, MPU3050_MAX_VALUE, 0, 0);
254
255 input_set_drvdata(idev, sensor);
256
257 pm_runtime_set_active(&client->dev);
258
259 error = request_threaded_irq(client->irq,
260 NULL, mpu3050_interrupt_thread,
261 IRQF_TRIGGER_RISING,
262 "mpu_int", sensor);
263 if (error) {
264 dev_err(&client->dev,
265 "can't get IRQ %d, error %d\n", client->irq, error);
266 goto err_pm_set_suspended;
267 }
268
269 error = input_register_device(idev);
270 if (error) {
271 dev_err(&client->dev, "failed to register input device\n");
272 goto err_free_irq;
273 }
274
275 pm_runtime_enable(&client->dev);
276 pm_runtime_set_autosuspend_delay(&client->dev, MPU3050_AUTO_DELAY);
277
278 return 0;
279
280err_free_irq:
281 free_irq(client->irq, sensor);
282err_pm_set_suspended:
283 pm_runtime_set_suspended(&client->dev);
284err_free_mem:
285 input_unregister_device(idev);
286 kfree(sensor);
287 return error;
288}
289
290/**
291 * mpu3050_remove - remove a sensor
292 * @client: i2c client of sensor being removed
293 *
294 * Our sensor is going away, clean up the resources.
295 */
296static int __devexit mpu3050_remove(struct i2c_client *client)
297{
298 struct mpu3050_sensor *sensor = i2c_get_clientdata(client);
299
300 pm_runtime_disable(&client->dev);
301 pm_runtime_set_suspended(&client->dev);
302
303 free_irq(client->irq, sensor);
304 input_unregister_device(sensor->idev);
305 kfree(sensor);
306
307 return 0;
308}
309
310#ifdef CONFIG_PM
311/**
312 * mpu3050_suspend - called on device suspend
313 * @dev: device being suspended
314 *
315 * Put the device into sleep mode before we suspend the machine.
316 */
317static int mpu3050_suspend(struct device *dev)
318{
319 struct i2c_client *client = to_i2c_client(dev);
320
321 mpu3050_set_power_mode(client, 0);
322
323 return 0;
324}
325
326/**
327 * mpu3050_resume - called on device resume
328 * @dev: device being resumed
329 *
330 * Put the device into powered mode on resume.
331 */
332static int mpu3050_resume(struct device *dev)
333{
334 struct i2c_client *client = to_i2c_client(dev);
335
336 mpu3050_set_power_mode(client, 1);
337 msleep(100); /* wait for gyro chip resume */
338
339 return 0;
340}
341#endif
342
343static UNIVERSAL_DEV_PM_OPS(mpu3050_pm, mpu3050_suspend, mpu3050_resume, NULL);
344
345static const struct i2c_device_id mpu3050_ids[] = {
346 { "mpu3050", 0 },
347 { }
348};
349MODULE_DEVICE_TABLE(i2c, mpu3050_ids);
350
351static struct i2c_driver mpu3050_i2c_driver = {
352 .driver = {
353 .name = "mpu3050",
354 .owner = THIS_MODULE,
355 .pm = &mpu3050_pm,
356 },
357 .probe = mpu3050_probe,
358 .remove = __devexit_p(mpu3050_remove),
359 .id_table = mpu3050_ids,
360};
361
362static int __init mpu3050_init(void)
363{
364 return i2c_add_driver(&mpu3050_i2c_driver);
365}
366module_init(mpu3050_init);
367
368static void __exit mpu3050_exit(void)
369{
370 i2c_del_driver(&mpu3050_i2c_driver);
371}
372module_exit(mpu3050_exit);
373
374MODULE_AUTHOR("Wistron Corp.");
375MODULE_DESCRIPTION("MPU3050 Tri-axis gyroscope driver");
376MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 014dd4ad0d4f..3c1a432c14dc 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -28,7 +28,7 @@
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/workqueue.h> 29#include <linux/workqueue.h>
30#include <linux/i2c/twl.h> 30#include <linux/i2c/twl.h>
31#include <linux/mfd/twl4030-codec.h> 31#include <linux/mfd/twl4030-audio.h>
32#include <linux/input.h> 32#include <linux/input.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34 34
@@ -67,7 +67,7 @@ static void vibra_enable(struct vibra_info *info)
67{ 67{
68 u8 reg; 68 u8 reg;
69 69
70 twl4030_codec_enable_resource(TWL4030_CODEC_RES_POWER); 70 twl4030_audio_enable_resource(TWL4030_AUDIO_RES_POWER);
71 71
72 /* turn H-Bridge on */ 72 /* turn H-Bridge on */
73 twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, 73 twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE,
@@ -75,7 +75,7 @@ static void vibra_enable(struct vibra_info *info)
75 twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, 75 twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
76 (reg | TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL); 76 (reg | TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL);
77 77
78 twl4030_codec_enable_resource(TWL4030_CODEC_RES_APLL); 78 twl4030_audio_enable_resource(TWL4030_AUDIO_RES_APLL);
79 79
80 info->enabled = true; 80 info->enabled = true;
81} 81}
@@ -90,8 +90,8 @@ static void vibra_disable(struct vibra_info *info)
90 twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, 90 twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
91 (reg & ~TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL); 91 (reg & ~TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL);
92 92
93 twl4030_codec_disable_resource(TWL4030_CODEC_RES_APLL); 93 twl4030_audio_disable_resource(TWL4030_AUDIO_RES_APLL);
94 twl4030_codec_disable_resource(TWL4030_CODEC_RES_POWER); 94 twl4030_audio_disable_resource(TWL4030_AUDIO_RES_POWER);
95 95
96 info->enabled = false; 96 info->enabled = false;
97} 97}
@@ -196,7 +196,7 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
196 196
197static int __devinit twl4030_vibra_probe(struct platform_device *pdev) 197static int __devinit twl4030_vibra_probe(struct platform_device *pdev)
198{ 198{
199 struct twl4030_codec_vibra_data *pdata = pdev->dev.platform_data; 199 struct twl4030_vibra_data *pdata = pdev->dev.platform_data;
200 struct vibra_info *info; 200 struct vibra_info *info;
201 int ret; 201 int ret;
202 202
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
new file mode 100644
index 000000000000..c43002e7ec72
--- /dev/null
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -0,0 +1,423 @@
1/*
2 * twl6040-vibra.c - TWL6040 Vibrator driver
3 *
4 * Author: Jorge Eduardo Candelaria <jorge.candelaria@ti.com>
5 * Author: Misael Lopez Cruz <misael.lopez@ti.com>
6 *
7 * Copyright: (C) 2011 Texas Instruments, Inc.
8 *
9 * Based on twl4030-vibra.c by Henrik Saari <henrik.saari@nokia.com>
10 * Felipe Balbi <felipe.balbi@nokia.com>
11 * Jari Vanhala <ext-javi.vanhala@nokia.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 * 02110-1301 USA
26 *
27 */
28#include <linux/module.h>
29#include <linux/platform_device.h>
30#include <linux/workqueue.h>
31#include <linux/i2c/twl.h>
32#include <linux/mfd/twl6040.h>
33#include <linux/slab.h>
34#include <linux/delay.h>
35#include <linux/regulator/consumer.h>
36
37#define EFFECT_DIR_180_DEG 0x8000
38
39/* Recommended modulation index 85% */
40#define TWL6040_VIBRA_MOD 85
41
42#define TWL6040_NUM_SUPPLIES 2
43
44struct vibra_info {
45 struct device *dev;
46 struct input_dev *input_dev;
47 struct workqueue_struct *workqueue;
48 struct work_struct play_work;
49 struct mutex mutex;
50 int irq;
51
52 bool enabled;
53 int weak_speed;
54 int strong_speed;
55 int direction;
56
57 unsigned int vibldrv_res;
58 unsigned int vibrdrv_res;
59 unsigned int viblmotor_res;
60 unsigned int vibrmotor_res;
61
62 struct regulator_bulk_data supplies[TWL6040_NUM_SUPPLIES];
63
64 struct twl6040 *twl6040;
65};
66
67static irqreturn_t twl6040_vib_irq_handler(int irq, void *data)
68{
69 struct vibra_info *info = data;
70 struct twl6040 *twl6040 = info->twl6040;
71 u8 status;
72
73 status = twl6040_reg_read(twl6040, TWL6040_REG_STATUS);
74 if (status & TWL6040_VIBLOCDET) {
75 dev_warn(info->dev, "Left Vibrator overcurrent detected\n");
76 twl6040_clear_bits(twl6040, TWL6040_REG_VIBCTLL,
77 TWL6040_VIBENAL);
78 }
79 if (status & TWL6040_VIBROCDET) {
80 dev_warn(info->dev, "Right Vibrator overcurrent detected\n");
81 twl6040_clear_bits(twl6040, TWL6040_REG_VIBCTLR,
82 TWL6040_VIBENAR);
83 }
84
85 return IRQ_HANDLED;
86}
87
88static void twl6040_vibra_enable(struct vibra_info *info)
89{
90 struct twl6040 *twl6040 = info->twl6040;
91 int ret;
92
93 ret = regulator_bulk_enable(ARRAY_SIZE(info->supplies), info->supplies);
94 if (ret) {
95 dev_err(info->dev, "failed to enable regulators %d\n", ret);
96 return;
97 }
98
99 twl6040_power(info->twl6040, 1);
100 if (twl6040->rev <= TWL6040_REV_ES1_1) {
101 /*
102 * ERRATA: Disable overcurrent protection for at least
103 * 3ms when enabling vibrator drivers to avoid false
104 * overcurrent detection
105 */
106 twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLL,
107 TWL6040_VIBENAL | TWL6040_VIBCTRLL);
108 twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLR,
109 TWL6040_VIBENAR | TWL6040_VIBCTRLR);
110 usleep_range(3000, 3500);
111 }
112
113 twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLL,
114 TWL6040_VIBENAL);
115 twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLR,
116 TWL6040_VIBENAR);
117
118 info->enabled = true;
119}
120
121static void twl6040_vibra_disable(struct vibra_info *info)
122{
123 struct twl6040 *twl6040 = info->twl6040;
124
125 twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLL, 0x00);
126 twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLR, 0x00);
127 twl6040_power(info->twl6040, 0);
128
129 regulator_bulk_disable(ARRAY_SIZE(info->supplies), info->supplies);
130
131 info->enabled = false;
132}
133
134static u8 twl6040_vibra_code(int vddvib, int vibdrv_res, int motor_res,
135 int speed, int direction)
136{
137 int vpk, max_code;
138 u8 vibdat;
139
140 /* output swing */
141 vpk = (vddvib * motor_res * TWL6040_VIBRA_MOD) /
142 (100 * (vibdrv_res + motor_res));
143
144 /* 50mV per VIBDAT code step */
145 max_code = vpk / 50;
146 if (max_code > TWL6040_VIBDAT_MAX)
147 max_code = TWL6040_VIBDAT_MAX;
148
149 /* scale speed to max allowed code */
150 vibdat = (u8)((speed * max_code) / USHRT_MAX);
151
152 /* 2's complement for direction > 180 degrees */
153 vibdat *= direction;
154
155 return vibdat;
156}
157
158static void twl6040_vibra_set_effect(struct vibra_info *info)
159{
160 struct twl6040 *twl6040 = info->twl6040;
161 u8 vibdatl, vibdatr;
162 int volt;
163
164 /* weak motor */
165 volt = regulator_get_voltage(info->supplies[0].consumer) / 1000;
166 vibdatl = twl6040_vibra_code(volt, info->vibldrv_res,
167 info->viblmotor_res,
168 info->weak_speed, info->direction);
169
170 /* strong motor */
171 volt = regulator_get_voltage(info->supplies[1].consumer) / 1000;
172 vibdatr = twl6040_vibra_code(volt, info->vibrdrv_res,
173 info->vibrmotor_res,
174 info->strong_speed, info->direction);
175
176 twl6040_reg_write(twl6040, TWL6040_REG_VIBDATL, vibdatl);
177 twl6040_reg_write(twl6040, TWL6040_REG_VIBDATR, vibdatr);
178}
179
180static void vibra_play_work(struct work_struct *work)
181{
182 struct vibra_info *info = container_of(work,
183 struct vibra_info, play_work);
184
185 mutex_lock(&info->mutex);
186
187 if (info->weak_speed || info->strong_speed) {
188 if (!info->enabled)
189 twl6040_vibra_enable(info);
190
191 twl6040_vibra_set_effect(info);
192 } else if (info->enabled)
193 twl6040_vibra_disable(info);
194
195 mutex_unlock(&info->mutex);
196}
197
198static int vibra_play(struct input_dev *input, void *data,
199 struct ff_effect *effect)
200{
201 struct vibra_info *info = input_get_drvdata(input);
202 int ret;
203
204 info->weak_speed = effect->u.rumble.weak_magnitude;
205 info->strong_speed = effect->u.rumble.strong_magnitude;
206 info->direction = effect->direction < EFFECT_DIR_180_DEG ? 1 : -1;
207
208 ret = queue_work(info->workqueue, &info->play_work);
209 if (!ret) {
210 dev_info(&input->dev, "work is already on queue\n");
211 return ret;
212 }
213
214 return 0;
215}
216
217static void twl6040_vibra_close(struct input_dev *input)
218{
219 struct vibra_info *info = input_get_drvdata(input);
220
221 cancel_work_sync(&info->play_work);
222
223 mutex_lock(&info->mutex);
224
225 if (info->enabled)
226 twl6040_vibra_disable(info);
227
228 mutex_unlock(&info->mutex);
229}
230
231#if CONFIG_PM_SLEEP
232static int twl6040_vibra_suspend(struct device *dev)
233{
234 struct platform_device *pdev = to_platform_device(dev);
235 struct vibra_info *info = platform_get_drvdata(pdev);
236
237 mutex_lock(&info->mutex);
238
239 if (info->enabled)
240 twl6040_vibra_disable(info);
241
242 mutex_unlock(&info->mutex);
243
244 return 0;
245}
246
247#endif
248
249static SIMPLE_DEV_PM_OPS(twl6040_vibra_pm_ops, twl6040_vibra_suspend, NULL);
250
251static int __devinit twl6040_vibra_probe(struct platform_device *pdev)
252{
253 struct twl4030_vibra_data *pdata = pdev->dev.platform_data;
254 struct vibra_info *info;
255 int ret;
256
257 if (!pdata) {
258 dev_err(&pdev->dev, "platform_data not available\n");
259 return -EINVAL;
260 }
261
262 info = kzalloc(sizeof(*info), GFP_KERNEL);
263 if (!info) {
264 dev_err(&pdev->dev, "couldn't allocate memory\n");
265 return -ENOMEM;
266 }
267
268 info->dev = &pdev->dev;
269 info->twl6040 = dev_get_drvdata(pdev->dev.parent);
270 info->vibldrv_res = pdata->vibldrv_res;
271 info->vibrdrv_res = pdata->vibrdrv_res;
272 info->viblmotor_res = pdata->viblmotor_res;
273 info->vibrmotor_res = pdata->vibrmotor_res;
274 if ((!info->vibldrv_res && !info->viblmotor_res) ||
275 (!info->vibrdrv_res && !info->vibrmotor_res)) {
276 dev_err(info->dev, "invalid vibra driver/motor resistance\n");
277 ret = -EINVAL;
278 goto err_kzalloc;
279 }
280
281 info->irq = platform_get_irq(pdev, 0);
282 if (info->irq < 0) {
283 dev_err(info->dev, "invalid irq\n");
284 ret = -EINVAL;
285 goto err_kzalloc;
286 }
287
288 mutex_init(&info->mutex);
289
290 info->input_dev = input_allocate_device();
291 if (info->input_dev == NULL) {
292 dev_err(info->dev, "couldn't allocate input device\n");
293 ret = -ENOMEM;
294 goto err_kzalloc;
295 }
296
297 input_set_drvdata(info->input_dev, info);
298
299 info->input_dev->name = "twl6040:vibrator";
300 info->input_dev->id.version = 1;
301 info->input_dev->dev.parent = pdev->dev.parent;
302 info->input_dev->close = twl6040_vibra_close;
303 __set_bit(FF_RUMBLE, info->input_dev->ffbit);
304
305 ret = input_ff_create_memless(info->input_dev, NULL, vibra_play);
306 if (ret < 0) {
307 dev_err(info->dev, "couldn't register vibrator to FF\n");
308 goto err_ialloc;
309 }
310
311 ret = input_register_device(info->input_dev);
312 if (ret < 0) {
313 dev_err(info->dev, "couldn't register input device\n");
314 goto err_iff;
315 }
316
317 platform_set_drvdata(pdev, info);
318
319 ret = request_threaded_irq(info->irq, NULL, twl6040_vib_irq_handler, 0,
320 "twl6040_irq_vib", info);
321 if (ret) {
322 dev_err(info->dev, "VIB IRQ request failed: %d\n", ret);
323 goto err_irq;
324 }
325
326 info->supplies[0].supply = "vddvibl";
327 info->supplies[1].supply = "vddvibr";
328 ret = regulator_bulk_get(info->dev, ARRAY_SIZE(info->supplies),
329 info->supplies);
330 if (ret) {
331 dev_err(info->dev, "couldn't get regulators %d\n", ret);
332 goto err_regulator;
333 }
334
335 if (pdata->vddvibl_uV) {
336 ret = regulator_set_voltage(info->supplies[0].consumer,
337 pdata->vddvibl_uV,
338 pdata->vddvibl_uV);
339 if (ret) {
340 dev_err(info->dev, "failed to set VDDVIBL volt %d\n",
341 ret);
342 goto err_voltage;
343 }
344 }
345
346 if (pdata->vddvibr_uV) {
347 ret = regulator_set_voltage(info->supplies[1].consumer,
348 pdata->vddvibr_uV,
349 pdata->vddvibr_uV);
350 if (ret) {
351 dev_err(info->dev, "failed to set VDDVIBR volt %d\n",
352 ret);
353 goto err_voltage;
354 }
355 }
356
357 info->workqueue = alloc_workqueue("twl6040-vibra", 0, 0);
358 if (info->workqueue == NULL) {
359 dev_err(info->dev, "couldn't create workqueue\n");
360 ret = -ENOMEM;
361 goto err_voltage;
362 }
363 INIT_WORK(&info->play_work, vibra_play_work);
364
365 return 0;
366
367err_voltage:
368 regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies);
369err_regulator:
370 free_irq(info->irq, info);
371err_irq:
372 input_unregister_device(info->input_dev);
373 info->input_dev = NULL;
374err_iff:
375 if (info->input_dev)
376 input_ff_destroy(info->input_dev);
377err_ialloc:
378 input_free_device(info->input_dev);
379err_kzalloc:
380 kfree(info);
381 return ret;
382}
383
384static int __devexit twl6040_vibra_remove(struct platform_device *pdev)
385{
386 struct vibra_info *info = platform_get_drvdata(pdev);
387
388 input_unregister_device(info->input_dev);
389 free_irq(info->irq, info);
390 regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies);
391 destroy_workqueue(info->workqueue);
392 kfree(info);
393
394 return 0;
395}
396
397static struct platform_driver twl6040_vibra_driver = {
398 .probe = twl6040_vibra_probe,
399 .remove = __devexit_p(twl6040_vibra_remove),
400 .driver = {
401 .name = "twl6040-vibra",
402 .owner = THIS_MODULE,
403 .pm = &twl6040_vibra_pm_ops,
404 },
405};
406
407static int __init twl6040_vibra_init(void)
408{
409 return platform_driver_register(&twl6040_vibra_driver);
410}
411module_init(twl6040_vibra_init);
412
413static void __exit twl6040_vibra_exit(void)
414{
415 platform_driver_unregister(&twl6040_vibra_driver);
416}
417module_exit(twl6040_vibra_exit);
418
419MODULE_ALIAS("platform:twl6040-vibra");
420MODULE_DESCRIPTION("TWL6040 Vibra driver");
421MODULE_LICENSE("GPL");
422MODULE_AUTHOR("Jorge Eduardo Candelaria <jorge.candelaria@ti.com>");
423MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>");
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 62bae99424e6..ad2e51c04db8 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -373,7 +373,7 @@ static struct xenbus_driver xenkbd_driver = {
373 373
374static int __init xenkbd_init(void) 374static int __init xenkbd_init(void)
375{ 375{
376 if (!xen_pv_domain()) 376 if (!xen_domain())
377 return -ENODEV; 377 return -ENODEV;
378 378
379 /* Nothing to do if running in dom0. */ 379 /* Nothing to do if running in dom0. */
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index 7b6ce178f1b6..58902fbb9896 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -191,7 +191,7 @@ static void __exit gpio_mouse_exit(void)
191} 191}
192module_exit(gpio_mouse_exit); 192module_exit(gpio_mouse_exit);
193 193
194MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>"); 194MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
195MODULE_DESCRIPTION("GPIO mouse driver"); 195MODULE_DESCRIPTION("GPIO mouse driver");
196MODULE_LICENSE("GPL"); 196MODULE_LICENSE("GPL");
197MODULE_ALIAS("platform:gpio_mouse"); /* work with hotplug and coldplug */ 197MODULE_ALIAS("platform:gpio_mouse"); /* work with hotplug and coldplug */
diff --git a/drivers/input/mouse/lifebook.c b/drivers/input/mouse/lifebook.c
index c31ad11df6bb..83bcaba96b89 100644
--- a/drivers/input/mouse/lifebook.c
+++ b/drivers/input/mouse/lifebook.c
@@ -33,7 +33,7 @@ static const char *desired_serio_phys;
33static int lifebook_limit_serio3(const struct dmi_system_id *d) 33static int lifebook_limit_serio3(const struct dmi_system_id *d)
34{ 34{
35 desired_serio_phys = "isa0060/serio3"; 35 desired_serio_phys = "isa0060/serio3";
36 return 0; 36 return 1;
37} 37}
38 38
39static bool lifebook_use_6byte_proto; 39static bool lifebook_use_6byte_proto;
@@ -41,7 +41,7 @@ static bool lifebook_use_6byte_proto;
41static int lifebook_set_6byte_proto(const struct dmi_system_id *d) 41static int lifebook_set_6byte_proto(const struct dmi_system_id *d)
42{ 42{
43 lifebook_use_6byte_proto = true; 43 lifebook_use_6byte_proto = true;
44 return 0; 44 return 1;
45} 45}
46 46
47static const struct dmi_system_id __initconst lifebook_dmi_table[] = { 47static const struct dmi_system_id __initconst lifebook_dmi_table[] = {
diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c
index 943cfec15665..6c5d84fcdea1 100644
--- a/drivers/input/mouse/pxa930_trkball.c
+++ b/drivers/input/mouse/pxa930_trkball.c
@@ -12,7 +12,6 @@
12 12
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/input.h> 14#include <linux/input.h>
15#include <linux/version.h>
16#include <linux/interrupt.h> 15#include <linux/interrupt.h>
17#include <linux/module.h> 16#include <linux/module.h>
18#include <linux/platform_device.h> 17#include <linux/platform_device.h>
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index 1242775fee19..2fc887a51066 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -20,7 +20,6 @@
20 */ 20 */
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/version.h>
24#include <linux/input.h> 23#include <linux/input.h>
25#include <linux/ctype.h> 24#include <linux/ctype.h>
26#include <linux/libps2.h> 25#include <linux/libps2.h>
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index e06e045bf907..5538fc657af1 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -207,27 +207,37 @@ static int synaptics_identify(struct psmouse *psmouse)
207static int synaptics_resolution(struct psmouse *psmouse) 207static int synaptics_resolution(struct psmouse *psmouse)
208{ 208{
209 struct synaptics_data *priv = psmouse->private; 209 struct synaptics_data *priv = psmouse->private;
210 unsigned char res[3]; 210 unsigned char resp[3];
211 unsigned char max[3];
212 211
213 if (SYN_ID_MAJOR(priv->identity) < 4) 212 if (SYN_ID_MAJOR(priv->identity) < 4)
214 return 0; 213 return 0;
215 214
216 if (synaptics_send_cmd(psmouse, SYN_QUE_RESOLUTION, res) == 0) { 215 if (synaptics_send_cmd(psmouse, SYN_QUE_RESOLUTION, resp) == 0) {
217 if (res[0] != 0 && (res[1] & 0x80) && res[2] != 0) { 216 if (resp[0] != 0 && (resp[1] & 0x80) && resp[2] != 0) {
218 priv->x_res = res[0]; /* x resolution in units/mm */ 217 priv->x_res = resp[0]; /* x resolution in units/mm */
219 priv->y_res = res[2]; /* y resolution in units/mm */ 218 priv->y_res = resp[2]; /* y resolution in units/mm */
220 } 219 }
221 } 220 }
222 221
223 if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 && 222 if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 &&
224 SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) { 223 SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) {
225 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_DIMENSIONS, max)) { 224 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) {
226 printk(KERN_ERR "Synaptics claims to have dimensions query," 225 printk(KERN_ERR "Synaptics claims to have max coordinates"
227 " but I'm not able to read it.\n"); 226 " query, but I'm not able to read it.\n");
227 } else {
228 priv->x_max = (resp[0] << 5) | ((resp[1] & 0x0f) << 1);
229 priv->y_max = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3);
230 }
231 }
232
233 if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 &&
234 SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c)) {
235 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MIN_COORDS, resp)) {
236 printk(KERN_ERR "Synaptics claims to have min coordinates"
237 " query, but I'm not able to read it.\n");
228 } else { 238 } else {
229 priv->x_max = (max[0] << 5) | ((max[1] & 0x0f) << 1); 239 priv->x_min = (resp[0] << 5) | ((resp[1] & 0x0f) << 1);
230 priv->y_max = (max[2] << 5) | ((max[1] & 0xf0) >> 3); 240 priv->y_min = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3);
231 } 241 }
232 } 242 }
233 243
@@ -406,26 +416,10 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
406 memset(hw, 0, sizeof(struct synaptics_hw_state)); 416 memset(hw, 0, sizeof(struct synaptics_hw_state));
407 417
408 if (SYN_MODEL_NEWABS(priv->model_id)) { 418 if (SYN_MODEL_NEWABS(priv->model_id)) {
409 hw->x = (((buf[3] & 0x10) << 8) |
410 ((buf[1] & 0x0f) << 8) |
411 buf[4]);
412 hw->y = (((buf[3] & 0x20) << 7) |
413 ((buf[1] & 0xf0) << 4) |
414 buf[5]);
415
416 hw->z = buf[2];
417 hw->w = (((buf[0] & 0x30) >> 2) | 419 hw->w = (((buf[0] & 0x30) >> 2) |
418 ((buf[0] & 0x04) >> 1) | 420 ((buf[0] & 0x04) >> 1) |
419 ((buf[3] & 0x04) >> 2)); 421 ((buf[3] & 0x04) >> 2));
420 422
421 if (SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) && hw->w == 2) {
422 /* Gesture packet: (x, y, z) at half resolution */
423 priv->mt.x = (((buf[4] & 0x0f) << 8) | buf[1]) << 1;
424 priv->mt.y = (((buf[4] & 0xf0) << 4) | buf[2]) << 1;
425 priv->mt.z = ((buf[3] & 0x30) | (buf[5] & 0x0f)) << 1;
426 return 1;
427 }
428
429 hw->left = (buf[0] & 0x01) ? 1 : 0; 423 hw->left = (buf[0] & 0x01) ? 1 : 0;
430 hw->right = (buf[0] & 0x02) ? 1 : 0; 424 hw->right = (buf[0] & 0x02) ? 1 : 0;
431 425
@@ -448,6 +442,22 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
448 hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0; 442 hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0;
449 } 443 }
450 444
445 if (SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) && hw->w == 2) {
446 /* Gesture packet: (x, y, z) at half resolution */
447 priv->mt.x = (((buf[4] & 0x0f) << 8) | buf[1]) << 1;
448 priv->mt.y = (((buf[4] & 0xf0) << 4) | buf[2]) << 1;
449 priv->mt.z = ((buf[3] & 0x30) | (buf[5] & 0x0f)) << 1;
450 return 1;
451 }
452
453 hw->x = (((buf[3] & 0x10) << 8) |
454 ((buf[1] & 0x0f) << 8) |
455 buf[4]);
456 hw->y = (((buf[3] & 0x20) << 7) |
457 ((buf[1] & 0xf0) << 4) |
458 buf[5]);
459 hw->z = buf[2];
460
451 if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) && 461 if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) &&
452 ((buf[0] ^ buf[3]) & 0x02)) { 462 ((buf[0] ^ buf[3]) & 0x02)) {
453 switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) { 463 switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) {
@@ -485,7 +495,8 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
485 return 0; 495 return 0;
486} 496}
487 497
488static void set_slot(struct input_dev *dev, int slot, bool active, int x, int y) 498static void synaptics_report_semi_mt_slot(struct input_dev *dev, int slot,
499 bool active, int x, int y)
489{ 500{
490 input_mt_slot(dev, slot); 501 input_mt_slot(dev, slot);
491 input_mt_report_slot_state(dev, MT_TOOL_FINGER, active); 502 input_mt_report_slot_state(dev, MT_TOOL_FINGER, active);
@@ -502,14 +513,16 @@ static void synaptics_report_semi_mt_data(struct input_dev *dev,
502 int num_fingers) 513 int num_fingers)
503{ 514{
504 if (num_fingers >= 2) { 515 if (num_fingers >= 2) {
505 set_slot(dev, 0, true, min(a->x, b->x), min(a->y, b->y)); 516 synaptics_report_semi_mt_slot(dev, 0, true, min(a->x, b->x),
506 set_slot(dev, 1, true, max(a->x, b->x), max(a->y, b->y)); 517 min(a->y, b->y));
518 synaptics_report_semi_mt_slot(dev, 1, true, max(a->x, b->x),
519 max(a->y, b->y));
507 } else if (num_fingers == 1) { 520 } else if (num_fingers == 1) {
508 set_slot(dev, 0, true, a->x, a->y); 521 synaptics_report_semi_mt_slot(dev, 0, true, a->x, a->y);
509 set_slot(dev, 1, false, 0, 0); 522 synaptics_report_semi_mt_slot(dev, 1, false, 0, 0);
510 } else { 523 } else {
511 set_slot(dev, 0, false, 0, 0); 524 synaptics_report_semi_mt_slot(dev, 0, false, 0, 0);
512 set_slot(dev, 1, false, 0, 0); 525 synaptics_report_semi_mt_slot(dev, 1, false, 0, 0);
513 } 526 }
514} 527}
515 528
@@ -684,23 +697,36 @@ static psmouse_ret_t synaptics_process_byte(struct psmouse *psmouse)
684static void set_input_params(struct input_dev *dev, struct synaptics_data *priv) 697static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
685{ 698{
686 int i; 699 int i;
700 int fuzz = SYN_CAP_REDUCED_FILTERING(priv->ext_cap_0c) ?
701 SYN_REDUCED_FILTER_FUZZ : 0;
687 702
688 __set_bit(INPUT_PROP_POINTER, dev->propbit); 703 __set_bit(INPUT_PROP_POINTER, dev->propbit);
689 704
690 __set_bit(EV_ABS, dev->evbit); 705 __set_bit(EV_ABS, dev->evbit);
691 input_set_abs_params(dev, ABS_X, 706 input_set_abs_params(dev, ABS_X,
692 XMIN_NOMINAL, priv->x_max ?: XMAX_NOMINAL, 0, 0); 707 priv->x_min ?: XMIN_NOMINAL,
708 priv->x_max ?: XMAX_NOMINAL,
709 fuzz, 0);
693 input_set_abs_params(dev, ABS_Y, 710 input_set_abs_params(dev, ABS_Y,
694 YMIN_NOMINAL, priv->y_max ?: YMAX_NOMINAL, 0, 0); 711 priv->y_min ?: YMIN_NOMINAL,
712 priv->y_max ?: YMAX_NOMINAL,
713 fuzz, 0);
695 input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0); 714 input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0);
696 715
697 if (SYN_CAP_ADV_GESTURE(priv->ext_cap_0c)) { 716 if (SYN_CAP_ADV_GESTURE(priv->ext_cap_0c)) {
698 __set_bit(INPUT_PROP_SEMI_MT, dev->propbit); 717 __set_bit(INPUT_PROP_SEMI_MT, dev->propbit);
699 input_mt_init_slots(dev, 2); 718 input_mt_init_slots(dev, 2);
700 input_set_abs_params(dev, ABS_MT_POSITION_X, XMIN_NOMINAL, 719 input_set_abs_params(dev, ABS_MT_POSITION_X,
701 priv->x_max ?: XMAX_NOMINAL, 0, 0); 720 priv->x_min ?: XMIN_NOMINAL,
702 input_set_abs_params(dev, ABS_MT_POSITION_Y, YMIN_NOMINAL, 721 priv->x_max ?: XMAX_NOMINAL,
703 priv->y_max ?: YMAX_NOMINAL, 0, 0); 722 fuzz, 0);
723 input_set_abs_params(dev, ABS_MT_POSITION_Y,
724 priv->y_min ?: YMIN_NOMINAL,
725 priv->y_max ?: YMAX_NOMINAL,
726 fuzz, 0);
727
728 input_abs_set_res(dev, ABS_MT_POSITION_X, priv->x_res);
729 input_abs_set_res(dev, ABS_MT_POSITION_Y, priv->y_res);
704 } 730 }
705 731
706 if (SYN_CAP_PALMDETECT(priv->capabilities)) 732 if (SYN_CAP_PALMDETECT(priv->capabilities))
@@ -971,4 +997,3 @@ bool synaptics_supported(void)
971} 997}
972 998
973#endif /* CONFIG_MOUSE_PS2_SYNAPTICS */ 999#endif /* CONFIG_MOUSE_PS2_SYNAPTICS */
974
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 7453938bf5ef..ca040aa80fa7 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -19,7 +19,8 @@
19#define SYN_QUE_RESOLUTION 0x08 19#define SYN_QUE_RESOLUTION 0x08
20#define SYN_QUE_EXT_CAPAB 0x09 20#define SYN_QUE_EXT_CAPAB 0x09
21#define SYN_QUE_EXT_CAPAB_0C 0x0c 21#define SYN_QUE_EXT_CAPAB_0C 0x0c
22#define SYN_QUE_EXT_DIMENSIONS 0x0d 22#define SYN_QUE_EXT_MAX_COORDS 0x0d
23#define SYN_QUE_EXT_MIN_COORDS 0x0f
23 24
24/* synatics modes */ 25/* synatics modes */
25#define SYN_BIT_ABSOLUTE_MODE (1 << 7) 26#define SYN_BIT_ABSOLUTE_MODE (1 << 7)
@@ -66,18 +67,21 @@
66 * 1 0x60 multifinger mode identifies firmware finger counting 67 * 1 0x60 multifinger mode identifies firmware finger counting
67 * (not reporting!) algorithm. 68 * (not reporting!) algorithm.
68 * Not particularly meaningful 69 * Not particularly meaningful
69 * 1 0x80 covered pad W clipped to 14, 15 == pad mostly covered 70 * 1 0x80 covered pad W clipped to 14, 15 == pad mostly covered
70 * 2 0x01 clickpad bit 1 2-button ClickPad 71 * 2 0x01 clickpad bit 1 2-button ClickPad
71 * 2 0x02 deluxe LED controls touchpad support LED commands 72 * 2 0x02 deluxe LED controls touchpad support LED commands
72 * ala multimedia control bar 73 * ala multimedia control bar
73 * 2 0x04 reduced filtering firmware does less filtering on 74 * 2 0x04 reduced filtering firmware does less filtering on
74 * position data, driver should watch 75 * position data, driver should watch
75 * for noise. 76 * for noise.
77 * 2 0x20 report min query 0x0f gives min coord reported
76 */ 78 */
77#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */ 79#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */
78#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */ 80#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */
79#define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000) 81#define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000)
82#define SYN_CAP_MIN_DIMENSIONS(ex0c) ((ex0c) & 0x002000)
80#define SYN_CAP_ADV_GESTURE(ex0c) ((ex0c) & 0x080000) 83#define SYN_CAP_ADV_GESTURE(ex0c) ((ex0c) & 0x080000)
84#define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400)
81 85
82/* synaptics modes query bits */ 86/* synaptics modes query bits */
83#define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7)) 87#define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7))
@@ -104,6 +108,9 @@
104#define SYN_NEWABS_RELAXED 2 108#define SYN_NEWABS_RELAXED 2
105#define SYN_OLDABS 3 109#define SYN_OLDABS 3
106 110
111/* amount to fuzz position data when touchpad reports reduced filtering */
112#define SYN_REDUCED_FILTER_FUZZ 8
113
107/* 114/*
108 * A structure to describe the state of the touchpad hardware (buttons and pad) 115 * A structure to describe the state of the touchpad hardware (buttons and pad)
109 */ 116 */
@@ -130,7 +137,8 @@ struct synaptics_data {
130 unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */ 137 unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */
131 unsigned long int identity; /* Identification */ 138 unsigned long int identity; /* Identification */
132 unsigned int x_res, y_res; /* X/Y resolution in units/mm */ 139 unsigned int x_res, y_res; /* X/Y resolution in units/mm */
133 unsigned int x_max, y_max; /* Max dimensions (from FW) */ 140 unsigned int x_max, y_max; /* Max coordinates (from FW) */
141 unsigned int x_min, y_min; /* Min coordinates (from FW) */
134 142
135 unsigned char pkt_type; /* packet type - old, new, etc */ 143 unsigned char pkt_type; /* packet type - old, new, etc */
136 unsigned char mode; /* current mode byte */ 144 unsigned char mode; /* current mode byte */
diff --git a/drivers/input/serio/at32psif.c b/drivers/input/serio/at32psif.c
index 6ee8f0ddad51..95280f9207e1 100644
--- a/drivers/input/serio/at32psif.c
+++ b/drivers/input/serio/at32psif.c
@@ -372,6 +372,6 @@ static void __exit psif_exit(void)
372module_init(psif_init); 372module_init(psif_init);
373module_exit(psif_exit); 373module_exit(psif_exit);
374 374
375MODULE_AUTHOR("Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>"); 375MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
376MODULE_DESCRIPTION("Atmel AVR32 PSIF PS/2 driver"); 376MODULE_DESCRIPTION("Atmel AVR32 PSIF PS/2 driver");
377MODULE_LICENSE("GPL"); 377MODULE_LICENSE("GPL");
diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c
index 42206205e4f5..979c443bf1ef 100644
--- a/drivers/input/serio/hp_sdc.c
+++ b/drivers/input/serio/hp_sdc.c
@@ -795,7 +795,7 @@ int hp_sdc_release_cooked_irq(hp_sdc_irqhook *callback)
795 795
796/************************* Keepalive timer task *********************/ 796/************************* Keepalive timer task *********************/
797 797
798void hp_sdc_kicker (unsigned long data) 798static void hp_sdc_kicker(unsigned long data)
799{ 799{
800 tasklet_schedule(&hp_sdc.task); 800 tasklet_schedule(&hp_sdc.task);
801 /* Re-insert the periodic task. */ 801 /* Re-insert the periodic task. */
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 0a619c558bfb..6d89fd1842c3 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -225,7 +225,6 @@
225 /* toolMode codes 225 /* toolMode codes
226 */ 226 */
227#define AIPTEK_TOOL_BUTTON_PEN_MODE BTN_TOOL_PEN 227#define AIPTEK_TOOL_BUTTON_PEN_MODE BTN_TOOL_PEN
228#define AIPTEK_TOOL_BUTTON_PEN_MODE BTN_TOOL_PEN
229#define AIPTEK_TOOL_BUTTON_PENCIL_MODE BTN_TOOL_PENCIL 228#define AIPTEK_TOOL_BUTTON_PENCIL_MODE BTN_TOOL_PENCIL
230#define AIPTEK_TOOL_BUTTON_BRUSH_MODE BTN_TOOL_BRUSH 229#define AIPTEK_TOOL_BUTTON_BRUSH_MODE BTN_TOOL_BRUSH
231#define AIPTEK_TOOL_BUTTON_AIRBRUSH_MODE BTN_TOOL_AIRBRUSH 230#define AIPTEK_TOOL_BUTTON_AIRBRUSH_MODE BTN_TOOL_AIRBRUSH
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 08ba5ad9c9be..03ebcc8b24b5 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -15,6 +15,7 @@
15#include "wacom_wac.h" 15#include "wacom_wac.h"
16#include "wacom.h" 16#include "wacom.h"
17#include <linux/input/mt.h> 17#include <linux/input/mt.h>
18#include <linux/hid.h>
18 19
19/* resolution for penabled devices */ 20/* resolution for penabled devices */
20#define WACOM_PL_RES 20 21#define WACOM_PL_RES 20
@@ -264,6 +265,7 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
264 wacom->id[0] = 0; 265 wacom->id[0] = 0;
265 input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */ 266 input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */
266 input_report_key(input, wacom->tool[0], prox); 267 input_report_key(input, wacom->tool[0], prox);
268 input_event(input, EV_MSC, MSC_SERIAL, 1);
267 input_sync(input); /* sync last event */ 269 input_sync(input); /* sync last event */
268 } 270 }
269 271
@@ -273,11 +275,10 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
273 prox = data[7] & 0xf8; 275 prox = data[7] & 0xf8;
274 if (prox || wacom->id[1]) { 276 if (prox || wacom->id[1]) {
275 wacom->id[1] = PAD_DEVICE_ID; 277 wacom->id[1] = PAD_DEVICE_ID;
276 input_report_key(input, BTN_0, (data[7] & 0x40)); 278 input_report_key(input, BTN_BACK, (data[7] & 0x40));
277 input_report_key(input, BTN_4, (data[7] & 0x80)); 279 input_report_key(input, BTN_FORWARD, (data[7] & 0x80));
278 rw = ((data[7] & 0x18) >> 3) - ((data[7] & 0x20) >> 3); 280 rw = ((data[7] & 0x18) >> 3) - ((data[7] & 0x20) >> 3);
279 input_report_rel(input, REL_WHEEL, rw); 281 input_report_rel(input, REL_WHEEL, rw);
280 input_report_key(input, BTN_TOOL_FINGER, 0xf0);
281 if (!prox) 282 if (!prox)
282 wacom->id[1] = 0; 283 wacom->id[1] = 0;
283 input_report_abs(input, ABS_MISC, wacom->id[1]); 284 input_report_abs(input, ABS_MISC, wacom->id[1]);
@@ -290,18 +291,17 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
290 prox = (data[7] & 0xf8) || data[8]; 291 prox = (data[7] & 0xf8) || data[8];
291 if (prox || wacom->id[1]) { 292 if (prox || wacom->id[1]) {
292 wacom->id[1] = PAD_DEVICE_ID; 293 wacom->id[1] = PAD_DEVICE_ID;
293 input_report_key(input, BTN_0, (data[7] & 0x08)); 294 input_report_key(input, BTN_BACK, (data[7] & 0x08));
294 input_report_key(input, BTN_1, (data[7] & 0x20)); 295 input_report_key(input, BTN_LEFT, (data[7] & 0x20));
295 input_report_key(input, BTN_4, (data[7] & 0x10)); 296 input_report_key(input, BTN_FORWARD, (data[7] & 0x10));
296 input_report_key(input, BTN_5, (data[7] & 0x40)); 297 input_report_key(input, BTN_RIGHT, (data[7] & 0x40));
297 input_report_abs(input, ABS_WHEEL, (data[8] & 0x7f)); 298 input_report_abs(input, ABS_WHEEL, (data[8] & 0x7f));
298 input_report_key(input, BTN_TOOL_FINGER, 0xf0);
299 if (!prox) 299 if (!prox)
300 wacom->id[1] = 0; 300 wacom->id[1] = 0;
301 input_report_abs(input, ABS_MISC, wacom->id[1]); 301 input_report_abs(input, ABS_MISC, wacom->id[1]);
302 input_event(input, EV_MSC, MSC_SERIAL, 0xf0); 302 input_event(input, EV_MSC, MSC_SERIAL, 0xf0);
303 retval = 1;
303 } 304 }
304 retval = 1;
305 break; 305 break;
306 } 306 }
307exit: 307exit:
@@ -494,10 +494,6 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
494 494
495 /* pad packets. Works as a second tool and is always in prox */ 495 /* pad packets. Works as a second tool and is always in prox */
496 if (data[0] == WACOM_REPORT_INTUOSPAD) { 496 if (data[0] == WACOM_REPORT_INTUOSPAD) {
497 /* initiate the pad as a device */
498 if (wacom->tool[1] != BTN_TOOL_FINGER)
499 wacom->tool[1] = BTN_TOOL_FINGER;
500
501 if (features->type >= INTUOS4S && features->type <= INTUOS4L) { 497 if (features->type >= INTUOS4S && features->type <= INTUOS4L) {
502 input_report_key(input, BTN_0, (data[2] & 0x01)); 498 input_report_key(input, BTN_0, (data[2] & 0x01));
503 input_report_key(input, BTN_1, (data[3] & 0x01)); 499 input_report_key(input, BTN_1, (data[3] & 0x01));
@@ -1080,18 +1076,14 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
1080 1076
1081 switch (wacom_wac->features.type) { 1077 switch (wacom_wac->features.type) {
1082 case WACOM_MO: 1078 case WACOM_MO:
1083 __set_bit(BTN_1, input_dev->keybit);
1084 __set_bit(BTN_5, input_dev->keybit);
1085
1086 input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0); 1079 input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
1087 /* fall through */ 1080 /* fall through */
1088 1081
1089 case WACOM_G4: 1082 case WACOM_G4:
1090 input_set_capability(input_dev, EV_MSC, MSC_SERIAL); 1083 input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
1091 1084
1092 __set_bit(BTN_TOOL_FINGER, input_dev->keybit); 1085 __set_bit(BTN_BACK, input_dev->keybit);
1093 __set_bit(BTN_0, input_dev->keybit); 1086 __set_bit(BTN_FORWARD, input_dev->keybit);
1094 __set_bit(BTN_4, input_dev->keybit);
1095 /* fall through */ 1087 /* fall through */
1096 1088
1097 case GRAPHIRE: 1089 case GRAPHIRE:
@@ -1127,10 +1119,12 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
1127 case CINTIQ: 1119 case CINTIQ:
1128 for (i = 0; i < 8; i++) 1120 for (i = 0; i < 8; i++)
1129 __set_bit(BTN_0 + i, input_dev->keybit); 1121 __set_bit(BTN_0 + i, input_dev->keybit);
1130 __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
1131 1122
1132 input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); 1123 if (wacom_wac->features.type != WACOM_21UX2) {
1133 input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); 1124 input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
1125 input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
1126 }
1127
1134 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 1128 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
1135 wacom_setup_cintiq(wacom_wac); 1129 wacom_setup_cintiq(wacom_wac);
1136 break; 1130 break;
@@ -1151,8 +1145,6 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
1151 __set_bit(BTN_2, input_dev->keybit); 1145 __set_bit(BTN_2, input_dev->keybit);
1152 __set_bit(BTN_3, input_dev->keybit); 1146 __set_bit(BTN_3, input_dev->keybit);
1153 1147
1154 __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
1155
1156 input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); 1148 input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
1157 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 1149 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
1158 /* fall through */ 1150 /* fall through */
@@ -1170,7 +1162,6 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
1170 case INTUOS4S: 1162 case INTUOS4S:
1171 for (i = 0; i < 7; i++) 1163 for (i = 0; i < 7; i++)
1172 __set_bit(BTN_0 + i, input_dev->keybit); 1164 __set_bit(BTN_0 + i, input_dev->keybit);
1173 __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
1174 1165
1175 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 1166 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
1176 wacom_setup_intuos(wacom_wac); 1167 wacom_setup_intuos(wacom_wac);
@@ -1295,6 +1286,12 @@ static const struct wacom_features wacom_features_0x65 =
1295static const struct wacom_features wacom_features_0x69 = 1286static const struct wacom_features wacom_features_0x69 =
1296 { "Wacom Bamboo1", WACOM_PKGLEN_GRAPHIRE, 5104, 3712, 511, 1287 { "Wacom Bamboo1", WACOM_PKGLEN_GRAPHIRE, 5104, 3712, 511,
1297 63, GRAPHIRE, WACOM_PENPRTN_RES, WACOM_PENPRTN_RES }; 1288 63, GRAPHIRE, WACOM_PENPRTN_RES, WACOM_PENPRTN_RES };
1289static const struct wacom_features wacom_features_0x6A =
1290 { "Wacom Bamboo1 4x6", WACOM_PKGLEN_GRAPHIRE, 14760, 9225, 1023,
1291 63, GRAPHIRE, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
1292static const struct wacom_features wacom_features_0x6B =
1293 { "Wacom Bamboo1 5x8", WACOM_PKGLEN_GRAPHIRE, 21648, 13530, 1023,
1294 63, GRAPHIRE, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
1298static const struct wacom_features wacom_features_0x20 = 1295static const struct wacom_features wacom_features_0x20 =
1299 { "Wacom Intuos 4x5", WACOM_PKGLEN_INTUOS, 12700, 10600, 1023, 1296 { "Wacom Intuos 4x5", WACOM_PKGLEN_INTUOS, 12700, 10600, 1023,
1300 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 1297 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1427,6 +1424,9 @@ static const struct wacom_features wacom_features_0x90 =
1427static const struct wacom_features wacom_features_0x93 = 1424static const struct wacom_features wacom_features_0x93 =
1428 { "Wacom ISDv4 93", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 1425 { "Wacom ISDv4 93", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255,
1429 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 1426 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
1427static const struct wacom_features wacom_features_0x97 =
1428 { "Wacom ISDv4 97", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 511,
1429 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
1430static const struct wacom_features wacom_features_0x9A = 1430static const struct wacom_features wacom_features_0x9A =
1431 { "Wacom ISDv4 9A", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 1431 { "Wacom ISDv4 9A", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255,
1432 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 1432 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1458,7 +1458,7 @@ static const struct wacom_features wacom_features_0xD3 =
1458 { "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 1458 { "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023,
1459 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 1459 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
1460static const struct wacom_features wacom_features_0xD4 = 1460static const struct wacom_features wacom_features_0xD4 =
1461 { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 255, 1461 { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
1462 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 1462 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
1463static const struct wacom_features wacom_features_0xD6 = 1463static const struct wacom_features wacom_features_0xD6 =
1464 { "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 1464 { "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
@@ -1483,6 +1483,11 @@ static const struct wacom_features wacom_features_0x6004 =
1483 USB_DEVICE(USB_VENDOR_ID_WACOM, prod), \ 1483 USB_DEVICE(USB_VENDOR_ID_WACOM, prod), \
1484 .driver_info = (kernel_ulong_t)&wacom_features_##prod 1484 .driver_info = (kernel_ulong_t)&wacom_features_##prod
1485 1485
1486#define USB_DEVICE_DETAILED(prod, class, sub, proto) \
1487 USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_WACOM, prod, class, \
1488 sub, proto), \
1489 .driver_info = (kernel_ulong_t)&wacom_features_##prod
1490
1486#define USB_DEVICE_LENOVO(prod) \ 1491#define USB_DEVICE_LENOVO(prod) \
1487 USB_DEVICE(USB_VENDOR_ID_LENOVO, prod), \ 1492 USB_DEVICE(USB_VENDOR_ID_LENOVO, prod), \
1488 .driver_info = (kernel_ulong_t)&wacom_features_##prod 1493 .driver_info = (kernel_ulong_t)&wacom_features_##prod
@@ -1506,6 +1511,8 @@ const struct usb_device_id wacom_ids[] = {
1506 { USB_DEVICE_WACOM(0x64) }, 1511 { USB_DEVICE_WACOM(0x64) },
1507 { USB_DEVICE_WACOM(0x65) }, 1512 { USB_DEVICE_WACOM(0x65) },
1508 { USB_DEVICE_WACOM(0x69) }, 1513 { USB_DEVICE_WACOM(0x69) },
1514 { USB_DEVICE_WACOM(0x6A) },
1515 { USB_DEVICE_WACOM(0x6B) },
1509 { USB_DEVICE_WACOM(0x20) }, 1516 { USB_DEVICE_WACOM(0x20) },
1510 { USB_DEVICE_WACOM(0x21) }, 1517 { USB_DEVICE_WACOM(0x21) },
1511 { USB_DEVICE_WACOM(0x22) }, 1518 { USB_DEVICE_WACOM(0x22) },
@@ -1545,7 +1552,13 @@ const struct usb_device_id wacom_ids[] = {
1545 { USB_DEVICE_WACOM(0xC5) }, 1552 { USB_DEVICE_WACOM(0xC5) },
1546 { USB_DEVICE_WACOM(0xC6) }, 1553 { USB_DEVICE_WACOM(0xC6) },
1547 { USB_DEVICE_WACOM(0xC7) }, 1554 { USB_DEVICE_WACOM(0xC7) },
1548 { USB_DEVICE_WACOM(0xCE) }, 1555 /*
1556 * DTU-2231 has two interfaces on the same configuration,
1557 * only one is used.
1558 */
1559 { USB_DEVICE_DETAILED(0xCE, USB_CLASS_HID,
1560 USB_INTERFACE_SUBCLASS_BOOT,
1561 USB_INTERFACE_PROTOCOL_MOUSE) },
1549 { USB_DEVICE_WACOM(0xD0) }, 1562 { USB_DEVICE_WACOM(0xD0) },
1550 { USB_DEVICE_WACOM(0xD1) }, 1563 { USB_DEVICE_WACOM(0xD1) },
1551 { USB_DEVICE_WACOM(0xD2) }, 1564 { USB_DEVICE_WACOM(0xD2) },
@@ -1560,6 +1573,7 @@ const struct usb_device_id wacom_ids[] = {
1560 { USB_DEVICE_WACOM(0xCC) }, 1573 { USB_DEVICE_WACOM(0xCC) },
1561 { USB_DEVICE_WACOM(0x90) }, 1574 { USB_DEVICE_WACOM(0x90) },
1562 { USB_DEVICE_WACOM(0x93) }, 1575 { USB_DEVICE_WACOM(0x93) },
1576 { USB_DEVICE_WACOM(0x97) },
1563 { USB_DEVICE_WACOM(0x9A) }, 1577 { USB_DEVICE_WACOM(0x9A) },
1564 { USB_DEVICE_WACOM(0x9F) }, 1578 { USB_DEVICE_WACOM(0x9F) },
1565 { USB_DEVICE_WACOM(0xE2) }, 1579 { USB_DEVICE_WACOM(0xE2) },
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 5196861b86ef..d507b9b67806 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -967,17 +967,12 @@ static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads784
967 ts->get_pendown_state = pdata->get_pendown_state; 967 ts->get_pendown_state = pdata->get_pendown_state;
968 } else if (gpio_is_valid(pdata->gpio_pendown)) { 968 } else if (gpio_is_valid(pdata->gpio_pendown)) {
969 969
970 err = gpio_request(pdata->gpio_pendown, "ads7846_pendown"); 970 err = gpio_request_one(pdata->gpio_pendown, GPIOF_IN,
971 "ads7846_pendown");
971 if (err) { 972 if (err) {
972 dev_err(&spi->dev, "failed to request pendown GPIO%d\n", 973 dev_err(&spi->dev,
973 pdata->gpio_pendown); 974 "failed to request/setup pendown GPIO%d: %d\n",
974 return err; 975 pdata->gpio_pendown, err);
975 }
976 err = gpio_direction_input(pdata->gpio_pendown);
977 if (err) {
978 dev_err(&spi->dev, "failed to setup pendown GPIO%d\n",
979 pdata->gpio_pendown);
980 gpio_free(pdata->gpio_pendown);
981 return err; 976 return err;
982 } 977 }
983 978
diff --git a/drivers/input/touchscreen/atmel-wm97xx.c b/drivers/input/touchscreen/atmel-wm97xx.c
index fa8e56bd9094..8034cbb20f74 100644
--- a/drivers/input/touchscreen/atmel-wm97xx.c
+++ b/drivers/input/touchscreen/atmel-wm97xx.c
@@ -164,7 +164,7 @@ static irqreturn_t atmel_wm97xx_channel_b_interrupt(int irq, void *dev_id)
164 164
165 data = ac97c_readl(atmel_wm97xx, CBRHR); 165 data = ac97c_readl(atmel_wm97xx, CBRHR);
166 value = data & 0x0fff; 166 value = data & 0x0fff;
167 source = data & WM97XX_ADCSRC_MASK; 167 source = data & WM97XX_ADCSEL_MASK;
168 pen_down = (data & WM97XX_PEN_DOWN) >> 8; 168 pen_down = (data & WM97XX_PEN_DOWN) >> 8;
169 169
170 if (source == WM97XX_ADCSEL_X) 170 if (source == WM97XX_ADCSEL_X)
@@ -442,6 +442,6 @@ static void __exit atmel_wm97xx_exit(void)
442} 442}
443module_exit(atmel_wm97xx_exit); 443module_exit(atmel_wm97xx_exit);
444 444
445MODULE_AUTHOR("Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>"); 445MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
446MODULE_DESCRIPTION("wm97xx continuous touch driver for Atmel AT91 and AVR32"); 446MODULE_DESCRIPTION("wm97xx continuous touch driver for Atmel AT91 and AVR32");
447MODULE_LICENSE("GPL"); 447MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 1e61387c73ca..ae00604a6a81 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -48,41 +48,47 @@
48#define MXT_OBJECT_SIZE 6 48#define MXT_OBJECT_SIZE 6
49 49
50/* Object types */ 50/* Object types */
51#define MXT_DEBUG_DIAGNOSTIC 37 51#define MXT_DEBUG_DIAGNOSTIC_T37 37
52#define MXT_GEN_MESSAGE 5 52#define MXT_GEN_MESSAGE_T5 5
53#define MXT_GEN_COMMAND 6 53#define MXT_GEN_COMMAND_T6 6
54#define MXT_GEN_POWER 7 54#define MXT_GEN_POWER_T7 7
55#define MXT_GEN_ACQUIRE 8 55#define MXT_GEN_ACQUIRE_T8 8
56#define MXT_TOUCH_MULTI 9 56#define MXT_GEN_DATASOURCE_T53 53
57#define MXT_TOUCH_KEYARRAY 15 57#define MXT_TOUCH_MULTI_T9 9
58#define MXT_TOUCH_PROXIMITY 23 58#define MXT_TOUCH_KEYARRAY_T15 15
59#define MXT_PROCI_GRIPFACE 20 59#define MXT_TOUCH_PROXIMITY_T23 23
60#define MXT_PROCG_NOISE 22 60#define MXT_TOUCH_PROXKEY_T52 52
61#define MXT_PROCI_ONETOUCH 24 61#define MXT_PROCI_GRIPFACE_T20 20
62#define MXT_PROCI_TWOTOUCH 27 62#define MXT_PROCG_NOISE_T22 22
63#define MXT_PROCI_GRIP 40 63#define MXT_PROCI_ONETOUCH_T24 24
64#define MXT_PROCI_PALM 41 64#define MXT_PROCI_TWOTOUCH_T27 27
65#define MXT_SPT_COMMSCONFIG 18 65#define MXT_PROCI_GRIP_T40 40
66#define MXT_SPT_GPIOPWM 19 66#define MXT_PROCI_PALM_T41 41
67#define MXT_SPT_SELFTEST 25 67#define MXT_PROCI_TOUCHSUPPRESSION_T42 42
68#define MXT_SPT_CTECONFIG 28 68#define MXT_PROCI_STYLUS_T47 47
69#define MXT_SPT_USERDATA 38 69#define MXT_PROCG_NOISESUPPRESSION_T48 48
70#define MXT_SPT_DIGITIZER 43 70#define MXT_SPT_COMMSCONFIG_T18 18
71#define MXT_SPT_MESSAGECOUNT 44 71#define MXT_SPT_GPIOPWM_T19 19
72 72#define MXT_SPT_SELFTEST_T25 25
73/* MXT_GEN_COMMAND field */ 73#define MXT_SPT_CTECONFIG_T28 28
74#define MXT_SPT_USERDATA_T38 38
75#define MXT_SPT_DIGITIZER_T43 43
76#define MXT_SPT_MESSAGECOUNT_T44 44
77#define MXT_SPT_CTECONFIG_T46 46
78
79/* MXT_GEN_COMMAND_T6 field */
74#define MXT_COMMAND_RESET 0 80#define MXT_COMMAND_RESET 0
75#define MXT_COMMAND_BACKUPNV 1 81#define MXT_COMMAND_BACKUPNV 1
76#define MXT_COMMAND_CALIBRATE 2 82#define MXT_COMMAND_CALIBRATE 2
77#define MXT_COMMAND_REPORTALL 3 83#define MXT_COMMAND_REPORTALL 3
78#define MXT_COMMAND_DIAGNOSTIC 5 84#define MXT_COMMAND_DIAGNOSTIC 5
79 85
80/* MXT_GEN_POWER field */ 86/* MXT_GEN_POWER_T7 field */
81#define MXT_POWER_IDLEACQINT 0 87#define MXT_POWER_IDLEACQINT 0
82#define MXT_POWER_ACTVACQINT 1 88#define MXT_POWER_ACTVACQINT 1
83#define MXT_POWER_ACTV2IDLETO 2 89#define MXT_POWER_ACTV2IDLETO 2
84 90
85/* MXT_GEN_ACQUIRE field */ 91/* MXT_GEN_ACQUIRE_T8 field */
86#define MXT_ACQUIRE_CHRGTIME 0 92#define MXT_ACQUIRE_CHRGTIME 0
87#define MXT_ACQUIRE_TCHDRIFT 2 93#define MXT_ACQUIRE_TCHDRIFT 2
88#define MXT_ACQUIRE_DRIFTST 3 94#define MXT_ACQUIRE_DRIFTST 3
@@ -91,7 +97,7 @@
91#define MXT_ACQUIRE_ATCHCALST 6 97#define MXT_ACQUIRE_ATCHCALST 6
92#define MXT_ACQUIRE_ATCHCALSTHR 7 98#define MXT_ACQUIRE_ATCHCALSTHR 7
93 99
94/* MXT_TOUCH_MULTI field */ 100/* MXT_TOUCH_MULTI_T9 field */
95#define MXT_TOUCH_CTRL 0 101#define MXT_TOUCH_CTRL 0
96#define MXT_TOUCH_XORIGIN 1 102#define MXT_TOUCH_XORIGIN 1
97#define MXT_TOUCH_YORIGIN 2 103#define MXT_TOUCH_YORIGIN 2
@@ -121,7 +127,7 @@
121#define MXT_TOUCH_YEDGEDIST 29 127#define MXT_TOUCH_YEDGEDIST 29
122#define MXT_TOUCH_JUMPLIMIT 30 128#define MXT_TOUCH_JUMPLIMIT 30
123 129
124/* MXT_PROCI_GRIPFACE field */ 130/* MXT_PROCI_GRIPFACE_T20 field */
125#define MXT_GRIPFACE_CTRL 0 131#define MXT_GRIPFACE_CTRL 0
126#define MXT_GRIPFACE_XLOGRIP 1 132#define MXT_GRIPFACE_XLOGRIP 1
127#define MXT_GRIPFACE_XHIGRIP 2 133#define MXT_GRIPFACE_XHIGRIP 2
@@ -151,11 +157,11 @@
151#define MXT_NOISE_FREQ4 15 157#define MXT_NOISE_FREQ4 15
152#define MXT_NOISE_IDLEGCAFVALID 16 158#define MXT_NOISE_IDLEGCAFVALID 16
153 159
154/* MXT_SPT_COMMSCONFIG */ 160/* MXT_SPT_COMMSCONFIG_T18 */
155#define MXT_COMMS_CTRL 0 161#define MXT_COMMS_CTRL 0
156#define MXT_COMMS_CMD 1 162#define MXT_COMMS_CMD 1
157 163
158/* MXT_SPT_CTECONFIG field */ 164/* MXT_SPT_CTECONFIG_T28 field */
159#define MXT_CTE_CTRL 0 165#define MXT_CTE_CTRL 0
160#define MXT_CTE_CMD 1 166#define MXT_CTE_CMD 1
161#define MXT_CTE_MODE 2 167#define MXT_CTE_MODE 2
@@ -166,7 +172,7 @@
166#define MXT_VOLTAGE_DEFAULT 2700000 172#define MXT_VOLTAGE_DEFAULT 2700000
167#define MXT_VOLTAGE_STEP 10000 173#define MXT_VOLTAGE_STEP 10000
168 174
169/* Define for MXT_GEN_COMMAND */ 175/* Define for MXT_GEN_COMMAND_T6 */
170#define MXT_BOOT_VALUE 0xa5 176#define MXT_BOOT_VALUE 0xa5
171#define MXT_BACKUP_VALUE 0x55 177#define MXT_BACKUP_VALUE 0x55
172#define MXT_BACKUP_TIME 25 /* msec */ 178#define MXT_BACKUP_TIME 25 /* msec */
@@ -256,24 +262,31 @@ struct mxt_data {
256static bool mxt_object_readable(unsigned int type) 262static bool mxt_object_readable(unsigned int type)
257{ 263{
258 switch (type) { 264 switch (type) {
259 case MXT_GEN_MESSAGE: 265 case MXT_GEN_MESSAGE_T5:
260 case MXT_GEN_COMMAND: 266 case MXT_GEN_COMMAND_T6:
261 case MXT_GEN_POWER: 267 case MXT_GEN_POWER_T7:
262 case MXT_GEN_ACQUIRE: 268 case MXT_GEN_ACQUIRE_T8:
263 case MXT_TOUCH_MULTI: 269 case MXT_GEN_DATASOURCE_T53:
264 case MXT_TOUCH_KEYARRAY: 270 case MXT_TOUCH_MULTI_T9:
265 case MXT_TOUCH_PROXIMITY: 271 case MXT_TOUCH_KEYARRAY_T15:
266 case MXT_PROCI_GRIPFACE: 272 case MXT_TOUCH_PROXIMITY_T23:
267 case MXT_PROCG_NOISE: 273 case MXT_TOUCH_PROXKEY_T52:
268 case MXT_PROCI_ONETOUCH: 274 case MXT_PROCI_GRIPFACE_T20:
269 case MXT_PROCI_TWOTOUCH: 275 case MXT_PROCG_NOISE_T22:
270 case MXT_PROCI_GRIP: 276 case MXT_PROCI_ONETOUCH_T24:
271 case MXT_PROCI_PALM: 277 case MXT_PROCI_TWOTOUCH_T27:
272 case MXT_SPT_COMMSCONFIG: 278 case MXT_PROCI_GRIP_T40:
273 case MXT_SPT_GPIOPWM: 279 case MXT_PROCI_PALM_T41:
274 case MXT_SPT_SELFTEST: 280 case MXT_PROCI_TOUCHSUPPRESSION_T42:
275 case MXT_SPT_CTECONFIG: 281 case MXT_PROCI_STYLUS_T47:
276 case MXT_SPT_USERDATA: 282 case MXT_PROCG_NOISESUPPRESSION_T48:
283 case MXT_SPT_COMMSCONFIG_T18:
284 case MXT_SPT_GPIOPWM_T19:
285 case MXT_SPT_SELFTEST_T25:
286 case MXT_SPT_CTECONFIG_T28:
287 case MXT_SPT_USERDATA_T38:
288 case MXT_SPT_DIGITIZER_T43:
289 case MXT_SPT_CTECONFIG_T46:
277 return true; 290 return true;
278 default: 291 default:
279 return false; 292 return false;
@@ -283,21 +296,28 @@ static bool mxt_object_readable(unsigned int type)
283static bool mxt_object_writable(unsigned int type) 296static bool mxt_object_writable(unsigned int type)
284{ 297{
285 switch (type) { 298 switch (type) {
286 case MXT_GEN_COMMAND: 299 case MXT_GEN_COMMAND_T6:
287 case MXT_GEN_POWER: 300 case MXT_GEN_POWER_T7:
288 case MXT_GEN_ACQUIRE: 301 case MXT_GEN_ACQUIRE_T8:
289 case MXT_TOUCH_MULTI: 302 case MXT_TOUCH_MULTI_T9:
290 case MXT_TOUCH_KEYARRAY: 303 case MXT_TOUCH_KEYARRAY_T15:
291 case MXT_TOUCH_PROXIMITY: 304 case MXT_TOUCH_PROXIMITY_T23:
292 case MXT_PROCI_GRIPFACE: 305 case MXT_TOUCH_PROXKEY_T52:
293 case MXT_PROCG_NOISE: 306 case MXT_PROCI_GRIPFACE_T20:
294 case MXT_PROCI_ONETOUCH: 307 case MXT_PROCG_NOISE_T22:
295 case MXT_PROCI_TWOTOUCH: 308 case MXT_PROCI_ONETOUCH_T24:
296 case MXT_PROCI_GRIP: 309 case MXT_PROCI_TWOTOUCH_T27:
297 case MXT_PROCI_PALM: 310 case MXT_PROCI_GRIP_T40:
298 case MXT_SPT_GPIOPWM: 311 case MXT_PROCI_PALM_T41:
299 case MXT_SPT_SELFTEST: 312 case MXT_PROCI_TOUCHSUPPRESSION_T42:
300 case MXT_SPT_CTECONFIG: 313 case MXT_PROCI_STYLUS_T47:
314 case MXT_PROCG_NOISESUPPRESSION_T48:
315 case MXT_SPT_COMMSCONFIG_T18:
316 case MXT_SPT_GPIOPWM_T19:
317 case MXT_SPT_SELFTEST_T25:
318 case MXT_SPT_CTECONFIG_T28:
319 case MXT_SPT_DIGITIZER_T43:
320 case MXT_SPT_CTECONFIG_T46:
301 return true; 321 return true;
302 default: 322 default:
303 return false; 323 return false;
@@ -455,7 +475,7 @@ static int mxt_read_message(struct mxt_data *data,
455 struct mxt_object *object; 475 struct mxt_object *object;
456 u16 reg; 476 u16 reg;
457 477
458 object = mxt_get_object(data, MXT_GEN_MESSAGE); 478 object = mxt_get_object(data, MXT_GEN_MESSAGE_T5);
459 if (!object) 479 if (!object)
460 return -EINVAL; 480 return -EINVAL;
461 481
@@ -597,8 +617,8 @@ static irqreturn_t mxt_interrupt(int irq, void *dev_id)
597 617
598 reportid = message.reportid; 618 reportid = message.reportid;
599 619
600 /* whether reportid is thing of MXT_TOUCH_MULTI */ 620 /* whether reportid is thing of MXT_TOUCH_MULTI_T9 */
601 object = mxt_get_object(data, MXT_TOUCH_MULTI); 621 object = mxt_get_object(data, MXT_TOUCH_MULTI_T9);
602 if (!object) 622 if (!object)
603 goto end; 623 goto end;
604 624
@@ -635,7 +655,9 @@ static int mxt_check_reg_init(struct mxt_data *data)
635 if (!mxt_object_writable(object->type)) 655 if (!mxt_object_writable(object->type))
636 continue; 656 continue;
637 657
638 for (j = 0; j < object->size + 1; j++) { 658 for (j = 0;
659 j < (object->size + 1) * (object->instances + 1);
660 j++) {
639 config_offset = index + j; 661 config_offset = index + j;
640 if (config_offset > pdata->config_length) { 662 if (config_offset > pdata->config_length) {
641 dev_err(dev, "Not enough config data!\n"); 663 dev_err(dev, "Not enough config data!\n");
@@ -644,7 +666,7 @@ static int mxt_check_reg_init(struct mxt_data *data)
644 mxt_write_object(data, object->type, j, 666 mxt_write_object(data, object->type, j,
645 pdata->config[config_offset]); 667 pdata->config[config_offset]);
646 } 668 }
647 index += object->size + 1; 669 index += (object->size + 1) * (object->instances + 1);
648 } 670 }
649 671
650 return 0; 672 return 0;
@@ -678,31 +700,31 @@ static void mxt_handle_pdata(struct mxt_data *data)
678 u8 voltage; 700 u8 voltage;
679 701
680 /* Set touchscreen lines */ 702 /* Set touchscreen lines */
681 mxt_write_object(data, MXT_TOUCH_MULTI, MXT_TOUCH_XSIZE, 703 mxt_write_object(data, MXT_TOUCH_MULTI_T9, MXT_TOUCH_XSIZE,
682 pdata->x_line); 704 pdata->x_line);
683 mxt_write_object(data, MXT_TOUCH_MULTI, MXT_TOUCH_YSIZE, 705 mxt_write_object(data, MXT_TOUCH_MULTI_T9, MXT_TOUCH_YSIZE,
684 pdata->y_line); 706 pdata->y_line);
685 707
686 /* Set touchscreen orient */ 708 /* Set touchscreen orient */
687 mxt_write_object(data, MXT_TOUCH_MULTI, MXT_TOUCH_ORIENT, 709 mxt_write_object(data, MXT_TOUCH_MULTI_T9, MXT_TOUCH_ORIENT,
688 pdata->orient); 710 pdata->orient);
689 711
690 /* Set touchscreen burst length */ 712 /* Set touchscreen burst length */
691 mxt_write_object(data, MXT_TOUCH_MULTI, 713 mxt_write_object(data, MXT_TOUCH_MULTI_T9,
692 MXT_TOUCH_BLEN, pdata->blen); 714 MXT_TOUCH_BLEN, pdata->blen);
693 715
694 /* Set touchscreen threshold */ 716 /* Set touchscreen threshold */
695 mxt_write_object(data, MXT_TOUCH_MULTI, 717 mxt_write_object(data, MXT_TOUCH_MULTI_T9,
696 MXT_TOUCH_TCHTHR, pdata->threshold); 718 MXT_TOUCH_TCHTHR, pdata->threshold);
697 719
698 /* Set touchscreen resolution */ 720 /* Set touchscreen resolution */
699 mxt_write_object(data, MXT_TOUCH_MULTI, 721 mxt_write_object(data, MXT_TOUCH_MULTI_T9,
700 MXT_TOUCH_XRANGE_LSB, (pdata->x_size - 1) & 0xff); 722 MXT_TOUCH_XRANGE_LSB, (pdata->x_size - 1) & 0xff);
701 mxt_write_object(data, MXT_TOUCH_MULTI, 723 mxt_write_object(data, MXT_TOUCH_MULTI_T9,
702 MXT_TOUCH_XRANGE_MSB, (pdata->x_size - 1) >> 8); 724 MXT_TOUCH_XRANGE_MSB, (pdata->x_size - 1) >> 8);
703 mxt_write_object(data, MXT_TOUCH_MULTI, 725 mxt_write_object(data, MXT_TOUCH_MULTI_T9,
704 MXT_TOUCH_YRANGE_LSB, (pdata->y_size - 1) & 0xff); 726 MXT_TOUCH_YRANGE_LSB, (pdata->y_size - 1) & 0xff);
705 mxt_write_object(data, MXT_TOUCH_MULTI, 727 mxt_write_object(data, MXT_TOUCH_MULTI_T9,
706 MXT_TOUCH_YRANGE_MSB, (pdata->y_size - 1) >> 8); 728 MXT_TOUCH_YRANGE_MSB, (pdata->y_size - 1) >> 8);
707 729
708 /* Set touchscreen voltage */ 730 /* Set touchscreen voltage */
@@ -715,7 +737,7 @@ static void mxt_handle_pdata(struct mxt_data *data)
715 voltage = (pdata->voltage - MXT_VOLTAGE_DEFAULT) / 737 voltage = (pdata->voltage - MXT_VOLTAGE_DEFAULT) /
716 MXT_VOLTAGE_STEP; 738 MXT_VOLTAGE_STEP;
717 739
718 mxt_write_object(data, MXT_SPT_CTECONFIG, 740 mxt_write_object(data, MXT_SPT_CTECONFIG_T28,
719 MXT_CTE_VOLTAGE, voltage); 741 MXT_CTE_VOLTAGE, voltage);
720 } 742 }
721} 743}
@@ -819,13 +841,13 @@ static int mxt_initialize(struct mxt_data *data)
819 mxt_handle_pdata(data); 841 mxt_handle_pdata(data);
820 842
821 /* Backup to memory */ 843 /* Backup to memory */
822 mxt_write_object(data, MXT_GEN_COMMAND, 844 mxt_write_object(data, MXT_GEN_COMMAND_T6,
823 MXT_COMMAND_BACKUPNV, 845 MXT_COMMAND_BACKUPNV,
824 MXT_BACKUP_VALUE); 846 MXT_BACKUP_VALUE);
825 msleep(MXT_BACKUP_TIME); 847 msleep(MXT_BACKUP_TIME);
826 848
827 /* Soft reset */ 849 /* Soft reset */
828 mxt_write_object(data, MXT_GEN_COMMAND, 850 mxt_write_object(data, MXT_GEN_COMMAND_T6,
829 MXT_COMMAND_RESET, 1); 851 MXT_COMMAND_RESET, 1);
830 msleep(MXT_RESET_TIME); 852 msleep(MXT_RESET_TIME);
831 853
@@ -921,7 +943,7 @@ static int mxt_load_fw(struct device *dev, const char *fn)
921 } 943 }
922 944
923 /* Change to the bootloader mode */ 945 /* Change to the bootloader mode */
924 mxt_write_object(data, MXT_GEN_COMMAND, 946 mxt_write_object(data, MXT_GEN_COMMAND_T6,
925 MXT_COMMAND_RESET, MXT_BOOT_VALUE); 947 MXT_COMMAND_RESET, MXT_BOOT_VALUE);
926 msleep(MXT_RESET_TIME); 948 msleep(MXT_RESET_TIME);
927 949
@@ -1027,14 +1049,14 @@ static void mxt_start(struct mxt_data *data)
1027{ 1049{
1028 /* Touch enable */ 1050 /* Touch enable */
1029 mxt_write_object(data, 1051 mxt_write_object(data,
1030 MXT_TOUCH_MULTI, MXT_TOUCH_CTRL, 0x83); 1052 MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0x83);
1031} 1053}
1032 1054
1033static void mxt_stop(struct mxt_data *data) 1055static void mxt_stop(struct mxt_data *data)
1034{ 1056{
1035 /* Touch disable */ 1057 /* Touch disable */
1036 mxt_write_object(data, 1058 mxt_write_object(data,
1037 MXT_TOUCH_MULTI, MXT_TOUCH_CTRL, 0); 1059 MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0);
1038} 1060}
1039 1061
1040static int mxt_input_open(struct input_dev *dev) 1062static int mxt_input_open(struct input_dev *dev)
@@ -1182,7 +1204,7 @@ static int mxt_resume(struct device *dev)
1182 struct input_dev *input_dev = data->input_dev; 1204 struct input_dev *input_dev = data->input_dev;
1183 1205
1184 /* Soft reset */ 1206 /* Soft reset */
1185 mxt_write_object(data, MXT_GEN_COMMAND, 1207 mxt_write_object(data, MXT_GEN_COMMAND_T6,
1186 MXT_COMMAND_RESET, 1); 1208 MXT_COMMAND_RESET, 1);
1187 1209
1188 msleep(MXT_RESET_TIME); 1210 msleep(MXT_RESET_TIME);
diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
index a93c5c26ab3f..d8815c5d54ad 100644
--- a/drivers/input/touchscreen/cy8ctmg110_ts.c
+++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
@@ -84,9 +84,9 @@ static int cy8ctmg110_write_regs(struct cy8ctmg110 *tsc, unsigned char reg,
84 memcpy(i2c_data + 1, value, len); 84 memcpy(i2c_data + 1, value, len);
85 85
86 ret = i2c_master_send(client, i2c_data, len + 1); 86 ret = i2c_master_send(client, i2c_data, len + 1);
87 if (ret != 1) { 87 if (ret != len + 1) {
88 dev_err(&client->dev, "i2c write data cmd failed\n"); 88 dev_err(&client->dev, "i2c write data cmd failed\n");
89 return ret ? ret : -EIO; 89 return ret < 0 ? ret : -EIO;
90 } 90 }
91 91
92 return 0; 92 return 0;
@@ -193,6 +193,8 @@ static int __devinit cy8ctmg110_probe(struct i2c_client *client,
193 193
194 ts->client = client; 194 ts->client = client;
195 ts->input = input_dev; 195 ts->input = input_dev;
196 ts->reset_pin = pdata->reset_pin;
197 ts->irq_pin = pdata->irq_pin;
196 198
197 snprintf(ts->phys, sizeof(ts->phys), 199 snprintf(ts->phys, sizeof(ts->phys),
198 "%s/input0", dev_name(&client->dev)); 200 "%s/input0", dev_name(&client->dev));
@@ -328,7 +330,7 @@ static int __devexit cy8ctmg110_remove(struct i2c_client *client)
328 return 0; 330 return 0;
329} 331}
330 332
331static struct i2c_device_id cy8ctmg110_idtable[] = { 333static const struct i2c_device_id cy8ctmg110_idtable[] = {
332 { CY8CTMG110_DRIVER_NAME, 1 }, 334 { CY8CTMG110_DRIVER_NAME, 1 },
333 { } 335 { }
334}; 336};
diff --git a/drivers/input/touchscreen/intel-mid-touch.c b/drivers/input/touchscreen/intel-mid-touch.c
index 66c96bfc5522..327695268e06 100644
--- a/drivers/input/touchscreen/intel-mid-touch.c
+++ b/drivers/input/touchscreen/intel-mid-touch.c
@@ -448,15 +448,11 @@ static int __devinit mrstouch_read_pmic_id(uint *vendor, uint *rev)
448 */ 448 */
449static int __devinit mrstouch_chan_parse(struct mrstouch_dev *tsdev) 449static int __devinit mrstouch_chan_parse(struct mrstouch_dev *tsdev)
450{ 450{
451 int err, i, found; 451 int found = 0;
452 int err, i;
452 u8 r8; 453 u8 r8;
453 454
454 found = -1;
455
456 for (i = 0; i < MRSTOUCH_MAX_CHANNELS; i++) { 455 for (i = 0; i < MRSTOUCH_MAX_CHANNELS; i++) {
457 if (found >= 0)
458 break;
459
460 err = intel_scu_ipc_ioread8(PMICADDR0 + i, &r8); 456 err = intel_scu_ipc_ioread8(PMICADDR0 + i, &r8);
461 if (err) 457 if (err)
462 return err; 458 return err;
@@ -466,16 +462,15 @@ static int __devinit mrstouch_chan_parse(struct mrstouch_dev *tsdev)
466 break; 462 break;
467 } 463 }
468 } 464 }
469 if (found < 0)
470 return 0;
471 465
472 if (tsdev->vendor == PMIC_VENDOR_FS) { 466 if (tsdev->vendor == PMIC_VENDOR_FS) {
473 if (found && found > (MRSTOUCH_MAX_CHANNELS - 18)) 467 if (found > MRSTOUCH_MAX_CHANNELS - 18)
474 return -ENOSPC; 468 return -ENOSPC;
475 } else { 469 } else {
476 if (found && found > (MRSTOUCH_MAX_CHANNELS - 4)) 470 if (found > MRSTOUCH_MAX_CHANNELS - 4)
477 return -ENOSPC; 471 return -ENOSPC;
478 } 472 }
473
479 return found; 474 return found;
480} 475}
481 476
diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c
index 3242e7076258..e966c29ff1bb 100644
--- a/drivers/input/touchscreen/mainstone-wm97xx.c
+++ b/drivers/input/touchscreen/mainstone-wm97xx.c
@@ -157,9 +157,9 @@ static int wm97xx_acc_pen_down(struct wm97xx *wm)
157 x, y, p); 157 x, y, p);
158 158
159 /* are samples valid */ 159 /* are samples valid */
160 if ((x & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_X || 160 if ((x & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_X ||
161 (y & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_Y || 161 (y & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_Y ||
162 (p & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_PRES) 162 (p & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_PRES)
163 goto up; 163 goto up;
164 164
165 /* coordinate is good */ 165 /* coordinate is good */
diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c
index 22a3411e93c5..089b0a0f3d8c 100644
--- a/drivers/input/touchscreen/tnetv107x-ts.c
+++ b/drivers/input/touchscreen/tnetv107x-ts.c
@@ -393,5 +393,5 @@ module_exit(tsc_exit);
393 393
394MODULE_AUTHOR("Cyril Chemparathy"); 394MODULE_AUTHOR("Cyril Chemparathy");
395MODULE_DESCRIPTION("TNETV107X Touchscreen Driver"); 395MODULE_DESCRIPTION("TNETV107X Touchscreen Driver");
396MODULE_ALIAS("platform: tnetv107x-ts"); 396MODULE_ALIAS("platform:tnetv107x-ts");
397MODULE_LICENSE("GPL"); 397MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/wm9705.c b/drivers/input/touchscreen/wm9705.c
index 98e61175d3f5..adc13a523ab5 100644
--- a/drivers/input/touchscreen/wm9705.c
+++ b/drivers/input/touchscreen/wm9705.c
@@ -215,8 +215,9 @@ static inline int is_pden(struct wm97xx *wm)
215static int wm9705_poll_sample(struct wm97xx *wm, int adcsel, int *sample) 215static int wm9705_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
216{ 216{
217 int timeout = 5 * delay; 217 int timeout = 5 * delay;
218 bool wants_pen = adcsel & WM97XX_PEN_DOWN;
218 219
219 if (!wm->pen_probably_down) { 220 if (wants_pen && !wm->pen_probably_down) {
220 u16 data = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); 221 u16 data = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
221 if (!(data & WM97XX_PEN_DOWN)) 222 if (!(data & WM97XX_PEN_DOWN))
222 return RC_PENUP; 223 return RC_PENUP;
@@ -224,13 +225,10 @@ static int wm9705_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
224 } 225 }
225 226
226 /* set up digitiser */ 227 /* set up digitiser */
227 if (adcsel & 0x8000)
228 adcsel = ((adcsel & 0x7fff) + 3) << 12;
229
230 if (wm->mach_ops && wm->mach_ops->pre_sample) 228 if (wm->mach_ops && wm->mach_ops->pre_sample)
231 wm->mach_ops->pre_sample(adcsel); 229 wm->mach_ops->pre_sample(adcsel);
232 wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1, 230 wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1, (adcsel & WM97XX_ADCSEL_MASK)
233 adcsel | WM97XX_POLL | WM97XX_DELAY(delay)); 231 | WM97XX_POLL | WM97XX_DELAY(delay));
234 232
235 /* wait 3 AC97 time slots + delay for conversion */ 233 /* wait 3 AC97 time slots + delay for conversion */
236 poll_delay(delay); 234 poll_delay(delay);
@@ -256,13 +254,14 @@ static int wm9705_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
256 wm->mach_ops->post_sample(adcsel); 254 wm->mach_ops->post_sample(adcsel);
257 255
258 /* check we have correct sample */ 256 /* check we have correct sample */
259 if ((*sample & WM97XX_ADCSEL_MASK) != adcsel) { 257 if ((*sample ^ adcsel) & WM97XX_ADCSEL_MASK) {
260 dev_dbg(wm->dev, "adc wrong sample, read %x got %x", adcsel, 258 dev_dbg(wm->dev, "adc wrong sample, wanted %x got %x",
261 *sample & WM97XX_ADCSEL_MASK); 259 adcsel & WM97XX_ADCSEL_MASK,
260 *sample & WM97XX_ADCSEL_MASK);
262 return RC_PENUP; 261 return RC_PENUP;
263 } 262 }
264 263
265 if (!(*sample & WM97XX_PEN_DOWN)) { 264 if (wants_pen && !(*sample & WM97XX_PEN_DOWN)) {
266 wm->pen_probably_down = 0; 265 wm->pen_probably_down = 0;
267 return RC_PENUP; 266 return RC_PENUP;
268 } 267 }
@@ -277,14 +276,14 @@ static int wm9705_poll_touch(struct wm97xx *wm, struct wm97xx_data *data)
277{ 276{
278 int rc; 277 int rc;
279 278
280 rc = wm9705_poll_sample(wm, WM97XX_ADCSEL_X, &data->x); 279 rc = wm9705_poll_sample(wm, WM97XX_ADCSEL_X | WM97XX_PEN_DOWN, &data->x);
281 if (rc != RC_VALID) 280 if (rc != RC_VALID)
282 return rc; 281 return rc;
283 rc = wm9705_poll_sample(wm, WM97XX_ADCSEL_Y, &data->y); 282 rc = wm9705_poll_sample(wm, WM97XX_ADCSEL_Y | WM97XX_PEN_DOWN, &data->y);
284 if (rc != RC_VALID) 283 if (rc != RC_VALID)
285 return rc; 284 return rc;
286 if (pil) { 285 if (pil) {
287 rc = wm9705_poll_sample(wm, WM97XX_ADCSEL_PRES, &data->p); 286 rc = wm9705_poll_sample(wm, WM97XX_ADCSEL_PRES | WM97XX_PEN_DOWN, &data->p);
288 if (rc != RC_VALID) 287 if (rc != RC_VALID)
289 return rc; 288 return rc;
290 } else 289 } else
diff --git a/drivers/input/touchscreen/wm9712.c b/drivers/input/touchscreen/wm9712.c
index 2bc2fb801009..6e743e3dfda4 100644
--- a/drivers/input/touchscreen/wm9712.c
+++ b/drivers/input/touchscreen/wm9712.c
@@ -255,8 +255,9 @@ static inline int is_pden(struct wm97xx *wm)
255static int wm9712_poll_sample(struct wm97xx *wm, int adcsel, int *sample) 255static int wm9712_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
256{ 256{
257 int timeout = 5 * delay; 257 int timeout = 5 * delay;
258 bool wants_pen = adcsel & WM97XX_PEN_DOWN;
258 259
259 if (!wm->pen_probably_down) { 260 if (wants_pen && !wm->pen_probably_down) {
260 u16 data = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); 261 u16 data = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
261 if (!(data & WM97XX_PEN_DOWN)) 262 if (!(data & WM97XX_PEN_DOWN))
262 return RC_PENUP; 263 return RC_PENUP;
@@ -264,13 +265,10 @@ static int wm9712_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
264 } 265 }
265 266
266 /* set up digitiser */ 267 /* set up digitiser */
267 if (adcsel & 0x8000)
268 adcsel = ((adcsel & 0x7fff) + 3) << 12;
269
270 if (wm->mach_ops && wm->mach_ops->pre_sample) 268 if (wm->mach_ops && wm->mach_ops->pre_sample)
271 wm->mach_ops->pre_sample(adcsel); 269 wm->mach_ops->pre_sample(adcsel);
272 wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1, 270 wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1, (adcsel & WM97XX_ADCSEL_MASK)
273 adcsel | WM97XX_POLL | WM97XX_DELAY(delay)); 271 | WM97XX_POLL | WM97XX_DELAY(delay));
274 272
275 /* wait 3 AC97 time slots + delay for conversion */ 273 /* wait 3 AC97 time slots + delay for conversion */
276 poll_delay(delay); 274 poll_delay(delay);
@@ -296,13 +294,14 @@ static int wm9712_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
296 wm->mach_ops->post_sample(adcsel); 294 wm->mach_ops->post_sample(adcsel);
297 295
298 /* check we have correct sample */ 296 /* check we have correct sample */
299 if ((*sample & WM97XX_ADCSEL_MASK) != adcsel) { 297 if ((*sample ^ adcsel) & WM97XX_ADCSEL_MASK) {
300 dev_dbg(wm->dev, "adc wrong sample, read %x got %x", adcsel, 298 dev_dbg(wm->dev, "adc wrong sample, wanted %x got %x",
301 *sample & WM97XX_ADCSEL_MASK); 299 adcsel & WM97XX_ADCSEL_MASK,
300 *sample & WM97XX_ADCSEL_MASK);
302 return RC_PENUP; 301 return RC_PENUP;
303 } 302 }
304 303
305 if (!(*sample & WM97XX_PEN_DOWN)) { 304 if (wants_pen && !(*sample & WM97XX_PEN_DOWN)) {
306 wm->pen_probably_down = 0; 305 wm->pen_probably_down = 0;
307 return RC_PENUP; 306 return RC_PENUP;
308 } 307 }
@@ -387,16 +386,18 @@ static int wm9712_poll_touch(struct wm97xx *wm, struct wm97xx_data *data)
387 if (rc != RC_VALID) 386 if (rc != RC_VALID)
388 return rc; 387 return rc;
389 } else { 388 } else {
390 rc = wm9712_poll_sample(wm, WM97XX_ADCSEL_X, &data->x); 389 rc = wm9712_poll_sample(wm, WM97XX_ADCSEL_X | WM97XX_PEN_DOWN,
390 &data->x);
391 if (rc != RC_VALID) 391 if (rc != RC_VALID)
392 return rc; 392 return rc;
393 393
394 rc = wm9712_poll_sample(wm, WM97XX_ADCSEL_Y, &data->y); 394 rc = wm9712_poll_sample(wm, WM97XX_ADCSEL_Y | WM97XX_PEN_DOWN,
395 &data->y);
395 if (rc != RC_VALID) 396 if (rc != RC_VALID)
396 return rc; 397 return rc;
397 398
398 if (pil && !five_wire) { 399 if (pil && !five_wire) {
399 rc = wm9712_poll_sample(wm, WM97XX_ADCSEL_PRES, 400 rc = wm9712_poll_sample(wm, WM97XX_ADCSEL_PRES | WM97XX_PEN_DOWN,
400 &data->p); 401 &data->p);
401 if (rc != RC_VALID) 402 if (rc != RC_VALID)
402 return rc; 403 return rc;
diff --git a/drivers/input/touchscreen/wm9713.c b/drivers/input/touchscreen/wm9713.c
index 73ec99568f12..7405353199d7 100644
--- a/drivers/input/touchscreen/wm9713.c
+++ b/drivers/input/touchscreen/wm9713.c
@@ -261,8 +261,9 @@ static int wm9713_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
261{ 261{
262 u16 dig1; 262 u16 dig1;
263 int timeout = 5 * delay; 263 int timeout = 5 * delay;
264 bool wants_pen = adcsel & WM97XX_PEN_DOWN;
264 265
265 if (!wm->pen_probably_down) { 266 if (wants_pen && !wm->pen_probably_down) {
266 u16 data = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); 267 u16 data = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
267 if (!(data & WM97XX_PEN_DOWN)) 268 if (!(data & WM97XX_PEN_DOWN))
268 return RC_PENUP; 269 return RC_PENUP;
@@ -270,15 +271,14 @@ static int wm9713_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
270 } 271 }
271 272
272 /* set up digitiser */ 273 /* set up digitiser */
273 if (adcsel & 0x8000)
274 adcsel = 1 << ((adcsel & 0x7fff) + 3);
275
276 dig1 = wm97xx_reg_read(wm, AC97_WM9713_DIG1); 274 dig1 = wm97xx_reg_read(wm, AC97_WM9713_DIG1);
277 dig1 &= ~WM9713_ADCSEL_MASK; 275 dig1 &= ~WM9713_ADCSEL_MASK;
276 /* WM97XX_ADCSEL_* channels need to be converted to WM9713 format */
277 dig1 |= 1 << ((adcsel & WM97XX_ADCSEL_MASK) >> 12);
278 278
279 if (wm->mach_ops && wm->mach_ops->pre_sample) 279 if (wm->mach_ops && wm->mach_ops->pre_sample)
280 wm->mach_ops->pre_sample(adcsel); 280 wm->mach_ops->pre_sample(adcsel);
281 wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1 | adcsel | WM9713_POLL); 281 wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1 | WM9713_POLL);
282 282
283 /* wait 3 AC97 time slots + delay for conversion */ 283 /* wait 3 AC97 time slots + delay for conversion */
284 poll_delay(delay); 284 poll_delay(delay);
@@ -304,13 +304,14 @@ static int wm9713_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
304 wm->mach_ops->post_sample(adcsel); 304 wm->mach_ops->post_sample(adcsel);
305 305
306 /* check we have correct sample */ 306 /* check we have correct sample */
307 if ((*sample & WM97XX_ADCSRC_MASK) != ffs(adcsel >> 1) << 12) { 307 if ((*sample ^ adcsel) & WM97XX_ADCSEL_MASK) {
308 dev_dbg(wm->dev, "adc wrong sample, read %x got %x", adcsel, 308 dev_dbg(wm->dev, "adc wrong sample, wanted %x got %x",
309 *sample & WM97XX_ADCSRC_MASK); 309 adcsel & WM97XX_ADCSEL_MASK,
310 *sample & WM97XX_ADCSEL_MASK);
310 return RC_PENUP; 311 return RC_PENUP;
311 } 312 }
312 313
313 if (!(*sample & WM97XX_PEN_DOWN)) { 314 if (wants_pen && !(*sample & WM97XX_PEN_DOWN)) {
314 wm->pen_probably_down = 0; 315 wm->pen_probably_down = 0;
315 return RC_PENUP; 316 return RC_PENUP;
316 } 317 }
@@ -400,14 +401,14 @@ static int wm9713_poll_touch(struct wm97xx *wm, struct wm97xx_data *data)
400 if (rc != RC_VALID) 401 if (rc != RC_VALID)
401 return rc; 402 return rc;
402 } else { 403 } else {
403 rc = wm9713_poll_sample(wm, WM9713_ADCSEL_X, &data->x); 404 rc = wm9713_poll_sample(wm, WM97XX_ADCSEL_X | WM97XX_PEN_DOWN, &data->x);
404 if (rc != RC_VALID) 405 if (rc != RC_VALID)
405 return rc; 406 return rc;
406 rc = wm9713_poll_sample(wm, WM9713_ADCSEL_Y, &data->y); 407 rc = wm9713_poll_sample(wm, WM97XX_ADCSEL_Y | WM97XX_PEN_DOWN, &data->y);
407 if (rc != RC_VALID) 408 if (rc != RC_VALID)
408 return rc; 409 return rc;
409 if (pil) { 410 if (pil) {
410 rc = wm9713_poll_sample(wm, WM9713_ADCSEL_PRES, 411 rc = wm9713_poll_sample(wm, WM97XX_ADCSEL_PRES | WM97XX_PEN_DOWN,
411 &data->p); 412 &data->p);
412 if (rc != RC_VALID) 413 if (rc != RC_VALID)
413 return rc; 414 return rc;
diff --git a/drivers/input/touchscreen/zylonite-wm97xx.c b/drivers/input/touchscreen/zylonite-wm97xx.c
index 5b0f15ec874a..f6328c0cded6 100644
--- a/drivers/input/touchscreen/zylonite-wm97xx.c
+++ b/drivers/input/touchscreen/zylonite-wm97xx.c
@@ -122,9 +122,9 @@ static int wm97xx_acc_pen_down(struct wm97xx *wm)
122 x, y, p); 122 x, y, p);
123 123
124 /* are samples valid */ 124 /* are samples valid */
125 if ((x & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_X || 125 if ((x & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_X ||
126 (y & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_Y || 126 (y & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_Y ||
127 (p & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_PRES) 127 (p & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_PRES)
128 goto up; 128 goto up;
129 129
130 /* coordinate is good */ 130 /* coordinate is good */
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index 6dd360734cfd..212efaf9a4e4 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -34,7 +34,7 @@
34#include <linux/tty.h> 34#include <linux/tty.h>
35#include <linux/tty_driver.h> 35#include <linux/tty_driver.h>
36#include <linux/list.h> 36#include <linux/list.h>
37#include <asm/atomic.h> 37#include <linux/atomic.h>
38 38
39#define GIG_VERSION {0, 5, 0, 0} 39#define GIG_VERSION {0, 5, 0, 0}
40#define GIG_COMPAT {0, 4, 0, 0} 40#define GIG_COMPAT {0, 4, 0, 0}
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 48e9cc0369b1..1f73d7f7e024 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -2532,6 +2532,9 @@ static void _isdn_setup(struct net_device *dev)
2532 2532
2533 /* Setup the generic properties */ 2533 /* Setup the generic properties */
2534 dev->flags = IFF_NOARP|IFF_POINTOPOINT; 2534 dev->flags = IFF_NOARP|IFF_POINTOPOINT;
2535
2536 /* isdn prepends a header in the tx path, can't share skbs */
2537 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2535 dev->header_ops = NULL; 2538 dev->header_ops = NULL;
2536 dev->netdev_ops = &isdn_netdev_ops; 2539 dev->netdev_ops = &isdn_netdev_ops;
2537 2540
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 574b09afedd3..0dc6546b77a8 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -29,7 +29,6 @@
29#include "md.h" 29#include "md.h"
30#include "bitmap.h" 30#include "bitmap.h"
31 31
32#include <linux/dm-dirty-log.h>
33/* debug macros */ 32/* debug macros */
34 33
35#define DEBUG 0 34#define DEBUG 0
@@ -775,10 +774,8 @@ static inline unsigned long file_page_offset(struct bitmap *bitmap, unsigned lon
775 * 0 or page 1 774 * 0 or page 1
776 */ 775 */
777static inline struct page *filemap_get_page(struct bitmap *bitmap, 776static inline struct page *filemap_get_page(struct bitmap *bitmap,
778 unsigned long chunk) 777 unsigned long chunk)
779{ 778{
780 if (bitmap->filemap == NULL)
781 return NULL;
782 if (file_page_index(bitmap, chunk) >= bitmap->file_pages) 779 if (file_page_index(bitmap, chunk) >= bitmap->file_pages)
783 return NULL; 780 return NULL;
784 return bitmap->filemap[file_page_index(bitmap, chunk) 781 return bitmap->filemap[file_page_index(bitmap, chunk)
@@ -878,28 +875,19 @@ enum bitmap_page_attr {
878static inline void set_page_attr(struct bitmap *bitmap, struct page *page, 875static inline void set_page_attr(struct bitmap *bitmap, struct page *page,
879 enum bitmap_page_attr attr) 876 enum bitmap_page_attr attr)
880{ 877{
881 if (page) 878 __set_bit((page->index<<2) + attr, bitmap->filemap_attr);
882 __set_bit((page->index<<2) + attr, bitmap->filemap_attr);
883 else
884 __set_bit(attr, &bitmap->logattrs);
885} 879}
886 880
887static inline void clear_page_attr(struct bitmap *bitmap, struct page *page, 881static inline void clear_page_attr(struct bitmap *bitmap, struct page *page,
888 enum bitmap_page_attr attr) 882 enum bitmap_page_attr attr)
889{ 883{
890 if (page) 884 __clear_bit((page->index<<2) + attr, bitmap->filemap_attr);
891 __clear_bit((page->index<<2) + attr, bitmap->filemap_attr);
892 else
893 __clear_bit(attr, &bitmap->logattrs);
894} 885}
895 886
896static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *page, 887static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *page,
897 enum bitmap_page_attr attr) 888 enum bitmap_page_attr attr)
898{ 889{
899 if (page) 890 return test_bit((page->index<<2) + attr, bitmap->filemap_attr);
900 return test_bit((page->index<<2) + attr, bitmap->filemap_attr);
901 else
902 return test_bit(attr, &bitmap->logattrs);
903} 891}
904 892
905/* 893/*
@@ -912,30 +900,26 @@ static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *p
912static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) 900static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
913{ 901{
914 unsigned long bit; 902 unsigned long bit;
915 struct page *page = NULL; 903 struct page *page;
916 void *kaddr; 904 void *kaddr;
917 unsigned long chunk = block >> CHUNK_BLOCK_SHIFT(bitmap); 905 unsigned long chunk = block >> CHUNK_BLOCK_SHIFT(bitmap);
918 906
919 if (!bitmap->filemap) { 907 if (!bitmap->filemap)
920 struct dm_dirty_log *log = bitmap->mddev->bitmap_info.log; 908 return;
921 if (log)
922 log->type->mark_region(log, chunk);
923 } else {
924 909
925 page = filemap_get_page(bitmap, chunk); 910 page = filemap_get_page(bitmap, chunk);
926 if (!page) 911 if (!page)
927 return; 912 return;
928 bit = file_page_offset(bitmap, chunk); 913 bit = file_page_offset(bitmap, chunk);
929 914
930 /* set the bit */ 915 /* set the bit */
931 kaddr = kmap_atomic(page, KM_USER0); 916 kaddr = kmap_atomic(page, KM_USER0);
932 if (bitmap->flags & BITMAP_HOSTENDIAN) 917 if (bitmap->flags & BITMAP_HOSTENDIAN)
933 set_bit(bit, kaddr); 918 set_bit(bit, kaddr);
934 else 919 else
935 __test_and_set_bit_le(bit, kaddr); 920 __set_bit_le(bit, kaddr);
936 kunmap_atomic(kaddr, KM_USER0); 921 kunmap_atomic(kaddr, KM_USER0);
937 PRINTK("set file bit %lu page %lu\n", bit, page->index); 922 PRINTK("set file bit %lu page %lu\n", bit, page->index);
938 }
939 /* record page number so it gets flushed to disk when unplug occurs */ 923 /* record page number so it gets flushed to disk when unplug occurs */
940 set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); 924 set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
941} 925}
@@ -952,16 +936,6 @@ void bitmap_unplug(struct bitmap *bitmap)
952 936
953 if (!bitmap) 937 if (!bitmap)
954 return; 938 return;
955 if (!bitmap->filemap) {
956 /* Must be using a dirty_log */
957 struct dm_dirty_log *log = bitmap->mddev->bitmap_info.log;
958 dirty = test_and_clear_bit(BITMAP_PAGE_DIRTY, &bitmap->logattrs);
959 need_write = test_and_clear_bit(BITMAP_PAGE_NEEDWRITE, &bitmap->logattrs);
960 if (dirty || need_write)
961 if (log->type->flush(log))
962 bitmap->flags |= BITMAP_WRITE_ERROR;
963 goto out;
964 }
965 939
966 /* look at each page to see if there are any set bits that need to be 940 /* look at each page to see if there are any set bits that need to be
967 * flushed out to disk */ 941 * flushed out to disk */
@@ -990,7 +964,6 @@ void bitmap_unplug(struct bitmap *bitmap)
990 else 964 else
991 md_super_wait(bitmap->mddev); 965 md_super_wait(bitmap->mddev);
992 } 966 }
993out:
994 if (bitmap->flags & BITMAP_WRITE_ERROR) 967 if (bitmap->flags & BITMAP_WRITE_ERROR)
995 bitmap_file_kick(bitmap); 968 bitmap_file_kick(bitmap);
996} 969}
@@ -1199,7 +1172,6 @@ void bitmap_daemon_work(mddev_t *mddev)
1199 struct page *page = NULL, *lastpage = NULL; 1172 struct page *page = NULL, *lastpage = NULL;
1200 sector_t blocks; 1173 sector_t blocks;
1201 void *paddr; 1174 void *paddr;
1202 struct dm_dirty_log *log = mddev->bitmap_info.log;
1203 1175
1204 /* Use a mutex to guard daemon_work against 1176 /* Use a mutex to guard daemon_work against
1205 * bitmap_destroy. 1177 * bitmap_destroy.
@@ -1224,12 +1196,11 @@ void bitmap_daemon_work(mddev_t *mddev)
1224 spin_lock_irqsave(&bitmap->lock, flags); 1196 spin_lock_irqsave(&bitmap->lock, flags);
1225 for (j = 0; j < bitmap->chunks; j++) { 1197 for (j = 0; j < bitmap->chunks; j++) {
1226 bitmap_counter_t *bmc; 1198 bitmap_counter_t *bmc;
1227 if (!bitmap->filemap) { 1199 if (!bitmap->filemap)
1228 if (!log) 1200 /* error or shutdown */
1229 /* error or shutdown */ 1201 break;
1230 break; 1202
1231 } else 1203 page = filemap_get_page(bitmap, j);
1232 page = filemap_get_page(bitmap, j);
1233 1204
1234 if (page != lastpage) { 1205 if (page != lastpage) {
1235 /* skip this page unless it's marked as needing cleaning */ 1206 /* skip this page unless it's marked as needing cleaning */
@@ -1298,17 +1269,16 @@ void bitmap_daemon_work(mddev_t *mddev)
1298 -1); 1269 -1);
1299 1270
1300 /* clear the bit */ 1271 /* clear the bit */
1301 if (page) { 1272 paddr = kmap_atomic(page, KM_USER0);
1302 paddr = kmap_atomic(page, KM_USER0); 1273 if (bitmap->flags & BITMAP_HOSTENDIAN)
1303 if (bitmap->flags & BITMAP_HOSTENDIAN) 1274 clear_bit(file_page_offset(bitmap, j),
1304 clear_bit(file_page_offset(bitmap, j), 1275 paddr);
1305 paddr); 1276 else
1306 else 1277 __clear_bit_le(
1307 __test_and_clear_bit_le(file_page_offset(bitmap, j), 1278 file_page_offset(bitmap,
1308 paddr); 1279 j),
1309 kunmap_atomic(paddr, KM_USER0); 1280 paddr);
1310 } else 1281 kunmap_atomic(paddr, KM_USER0);
1311 log->type->clear_region(log, j);
1312 } 1282 }
1313 } else 1283 } else
1314 j |= PAGE_COUNTER_MASK; 1284 j |= PAGE_COUNTER_MASK;
@@ -1316,16 +1286,12 @@ void bitmap_daemon_work(mddev_t *mddev)
1316 spin_unlock_irqrestore(&bitmap->lock, flags); 1286 spin_unlock_irqrestore(&bitmap->lock, flags);
1317 1287
1318 /* now sync the final page */ 1288 /* now sync the final page */
1319 if (lastpage != NULL || log != NULL) { 1289 if (lastpage != NULL) {
1320 spin_lock_irqsave(&bitmap->lock, flags); 1290 spin_lock_irqsave(&bitmap->lock, flags);
1321 if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) { 1291 if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
1322 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1292 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1323 spin_unlock_irqrestore(&bitmap->lock, flags); 1293 spin_unlock_irqrestore(&bitmap->lock, flags);
1324 if (lastpage) 1294 write_page(bitmap, lastpage, 0);
1325 write_page(bitmap, lastpage, 0);
1326 else
1327 if (log->type->flush(log))
1328 bitmap->flags |= BITMAP_WRITE_ERROR;
1329 } else { 1295 } else {
1330 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1296 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1331 spin_unlock_irqrestore(&bitmap->lock, flags); 1297 spin_unlock_irqrestore(&bitmap->lock, flags);
@@ -1767,12 +1733,10 @@ int bitmap_create(mddev_t *mddev)
1767 BUILD_BUG_ON(sizeof(bitmap_super_t) != 256); 1733 BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
1768 1734
1769 if (!file 1735 if (!file
1770 && !mddev->bitmap_info.offset 1736 && !mddev->bitmap_info.offset) /* bitmap disabled, nothing to do */
1771 && !mddev->bitmap_info.log) /* bitmap disabled, nothing to do */
1772 return 0; 1737 return 0;
1773 1738
1774 BUG_ON(file && mddev->bitmap_info.offset); 1739 BUG_ON(file && mddev->bitmap_info.offset);
1775 BUG_ON(mddev->bitmap_info.offset && mddev->bitmap_info.log);
1776 1740
1777 bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL); 1741 bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
1778 if (!bitmap) 1742 if (!bitmap)
@@ -1863,6 +1827,7 @@ int bitmap_create(mddev_t *mddev)
1863int bitmap_load(mddev_t *mddev) 1827int bitmap_load(mddev_t *mddev)
1864{ 1828{
1865 int err = 0; 1829 int err = 0;
1830 sector_t start = 0;
1866 sector_t sector = 0; 1831 sector_t sector = 0;
1867 struct bitmap *bitmap = mddev->bitmap; 1832 struct bitmap *bitmap = mddev->bitmap;
1868 1833
@@ -1881,24 +1846,14 @@ int bitmap_load(mddev_t *mddev)
1881 } 1846 }
1882 bitmap_close_sync(bitmap); 1847 bitmap_close_sync(bitmap);
1883 1848
1884 if (mddev->bitmap_info.log) { 1849 if (mddev->degraded == 0
1885 unsigned long i; 1850 || bitmap->events_cleared == mddev->events)
1886 struct dm_dirty_log *log = mddev->bitmap_info.log; 1851 /* no need to keep dirty bits to optimise a
1887 for (i = 0; i < bitmap->chunks; i++) 1852 * re-add of a missing device */
1888 if (!log->type->in_sync(log, i, 1)) 1853 start = mddev->recovery_cp;
1889 bitmap_set_memory_bits(bitmap, 1854
1890 (sector_t)i << CHUNK_BLOCK_SHIFT(bitmap), 1855 err = bitmap_init_from_disk(bitmap, start);
1891 1); 1856
1892 } else {
1893 sector_t start = 0;
1894 if (mddev->degraded == 0
1895 || bitmap->events_cleared == mddev->events)
1896 /* no need to keep dirty bits to optimise a
1897 * re-add of a missing device */
1898 start = mddev->recovery_cp;
1899
1900 err = bitmap_init_from_disk(bitmap, start);
1901 }
1902 if (err) 1857 if (err)
1903 goto out; 1858 goto out;
1904 1859
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index b2a127e891ac..a28f2e5588c6 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -212,10 +212,6 @@ struct bitmap {
212 unsigned long file_pages; /* number of pages in the file */ 212 unsigned long file_pages; /* number of pages in the file */
213 int last_page_size; /* bytes in the last page */ 213 int last_page_size; /* bytes in the last page */
214 214
215 unsigned long logattrs; /* used when filemap_attr doesn't exist
216 * because we are working with a dirty_log
217 */
218
219 unsigned long flags; 215 unsigned long flags;
220 216
221 int allclean; 217 int allclean;
@@ -237,7 +233,6 @@ struct bitmap {
237 wait_queue_head_t behind_wait; 233 wait_queue_head_t behind_wait;
238 234
239 struct sysfs_dirent *sysfs_can_clear; 235 struct sysfs_dirent *sysfs_can_clear;
240
241}; 236};
242 237
243/* the bitmap API */ 238/* the bitmap API */
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index c8827ffd85bb..bae6c4e23d3f 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -19,7 +19,7 @@
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20#include <linux/backing-dev.h> 20#include <linux/backing-dev.h>
21#include <linux/percpu.h> 21#include <linux/percpu.h>
22#include <asm/atomic.h> 22#include <linux/atomic.h>
23#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
24#include <asm/page.h> 24#include <asm/page.h>
25#include <asm/unaligned.h> 25#include <asm/unaligned.h>
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 819e37eaaeba..320401dec104 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -10,7 +10,7 @@
10 */ 10 */
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13#include <asm/atomic.h> 13#include <linux/atomic.h>
14#include <linux/blkdev.h> 14#include <linux/blkdev.h>
15#include <linux/fs.h> 15#include <linux/fs.h>
16#include <linux/init.h> 16#include <linux/init.h>
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index aa4e570c2cb5..c3547016f0f1 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -19,7 +19,7 @@
19#include <linux/time.h> 19#include <linux/time.h>
20#include <linux/workqueue.h> 20#include <linux/workqueue.h>
21#include <scsi/scsi_dh.h> 21#include <scsi/scsi_dh.h>
22#include <asm/atomic.h> 22#include <linux/atomic.h>
23 23
24#define DM_MSG_PREFIX "multipath" 24#define DM_MSG_PREFIX "multipath"
25#define MESG_STR(x) x, sizeof(x) 25#define MESG_STR(x) x, sizeof(x)
diff --git a/drivers/md/dm-queue-length.c b/drivers/md/dm-queue-length.c
index f92b6cea9d9c..03a837aa5ce6 100644
--- a/drivers/md/dm-queue-length.c
+++ b/drivers/md/dm-queue-length.c
@@ -20,7 +20,7 @@
20#include <linux/ctype.h> 20#include <linux/ctype.h>
21#include <linux/errno.h> 21#include <linux/errno.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <asm/atomic.h> 23#include <linux/atomic.h>
24 24
25#define DM_MSG_PREFIX "multipath queue-length" 25#define DM_MSG_PREFIX "multipath queue-length"
26#define QL_MIN_IO 128 26#define QL_MIN_IO 128
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 451c3bb176d2..bfe9c2333cea 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -17,7 +17,7 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/mutex.h> 18#include <linux/mutex.h>
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <asm/atomic.h> 20#include <linux/atomic.h>
21 21
22#define DM_MSG_PREFIX "table" 22#define DM_MSG_PREFIX "table"
23 23
diff --git a/drivers/md/md.c b/drivers/md/md.c
index dfc9425db70b..8e221a20f5d9 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -215,6 +215,55 @@ struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
215} 215}
216EXPORT_SYMBOL_GPL(bio_clone_mddev); 216EXPORT_SYMBOL_GPL(bio_clone_mddev);
217 217
218void md_trim_bio(struct bio *bio, int offset, int size)
219{
220 /* 'bio' is a cloned bio which we need to trim to match
221 * the given offset and size.
222 * This requires adjusting bi_sector, bi_size, and bi_io_vec
223 */
224 int i;
225 struct bio_vec *bvec;
226 int sofar = 0;
227
228 size <<= 9;
229 if (offset == 0 && size == bio->bi_size)
230 return;
231
232 bio->bi_sector += offset;
233 bio->bi_size = size;
234 offset <<= 9;
235 clear_bit(BIO_SEG_VALID, &bio->bi_flags);
236
237 while (bio->bi_idx < bio->bi_vcnt &&
238 bio->bi_io_vec[bio->bi_idx].bv_len <= offset) {
239 /* remove this whole bio_vec */
240 offset -= bio->bi_io_vec[bio->bi_idx].bv_len;
241 bio->bi_idx++;
242 }
243 if (bio->bi_idx < bio->bi_vcnt) {
244 bio->bi_io_vec[bio->bi_idx].bv_offset += offset;
245 bio->bi_io_vec[bio->bi_idx].bv_len -= offset;
246 }
247 /* avoid any complications with bi_idx being non-zero*/
248 if (bio->bi_idx) {
249 memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
250 (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
251 bio->bi_vcnt -= bio->bi_idx;
252 bio->bi_idx = 0;
253 }
254 /* Make sure vcnt and last bv are not too big */
255 bio_for_each_segment(bvec, bio, i) {
256 if (sofar + bvec->bv_len > size)
257 bvec->bv_len = size - sofar;
258 if (bvec->bv_len == 0) {
259 bio->bi_vcnt = i;
260 break;
261 }
262 sofar += bvec->bv_len;
263 }
264}
265EXPORT_SYMBOL_GPL(md_trim_bio);
266
218/* 267/*
219 * We have a system wide 'event count' that is incremented 268 * We have a system wide 'event count' that is incremented
220 * on any 'interesting' event, and readers of /proc/mdstat 269 * on any 'interesting' event, and readers of /proc/mdstat
@@ -757,6 +806,10 @@ static void free_disk_sb(mdk_rdev_t * rdev)
757 rdev->sb_start = 0; 806 rdev->sb_start = 0;
758 rdev->sectors = 0; 807 rdev->sectors = 0;
759 } 808 }
809 if (rdev->bb_page) {
810 put_page(rdev->bb_page);
811 rdev->bb_page = NULL;
812 }
760} 813}
761 814
762 815
@@ -1025,7 +1078,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
1025 ret = -EINVAL; 1078 ret = -EINVAL;
1026 1079
1027 bdevname(rdev->bdev, b); 1080 bdevname(rdev->bdev, b);
1028 sb = (mdp_super_t*)page_address(rdev->sb_page); 1081 sb = page_address(rdev->sb_page);
1029 1082
1030 if (sb->md_magic != MD_SB_MAGIC) { 1083 if (sb->md_magic != MD_SB_MAGIC) {
1031 printk(KERN_ERR "md: invalid raid superblock magic on %s\n", 1084 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
@@ -1054,6 +1107,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
1054 rdev->preferred_minor = sb->md_minor; 1107 rdev->preferred_minor = sb->md_minor;
1055 rdev->data_offset = 0; 1108 rdev->data_offset = 0;
1056 rdev->sb_size = MD_SB_BYTES; 1109 rdev->sb_size = MD_SB_BYTES;
1110 rdev->badblocks.shift = -1;
1057 1111
1058 if (sb->level == LEVEL_MULTIPATH) 1112 if (sb->level == LEVEL_MULTIPATH)
1059 rdev->desc_nr = -1; 1113 rdev->desc_nr = -1;
@@ -1064,7 +1118,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
1064 ret = 1; 1118 ret = 1;
1065 } else { 1119 } else {
1066 __u64 ev1, ev2; 1120 __u64 ev1, ev2;
1067 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page); 1121 mdp_super_t *refsb = page_address(refdev->sb_page);
1068 if (!uuid_equal(refsb, sb)) { 1122 if (!uuid_equal(refsb, sb)) {
1069 printk(KERN_WARNING "md: %s has different UUID to %s\n", 1123 printk(KERN_WARNING "md: %s has different UUID to %s\n",
1070 b, bdevname(refdev->bdev,b2)); 1124 b, bdevname(refdev->bdev,b2));
@@ -1099,7 +1153,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
1099static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) 1153static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1100{ 1154{
1101 mdp_disk_t *desc; 1155 mdp_disk_t *desc;
1102 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page); 1156 mdp_super_t *sb = page_address(rdev->sb_page);
1103 __u64 ev1 = md_event(sb); 1157 __u64 ev1 = md_event(sb);
1104 1158
1105 rdev->raid_disk = -1; 1159 rdev->raid_disk = -1;
@@ -1230,7 +1284,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1230 1284
1231 rdev->sb_size = MD_SB_BYTES; 1285 rdev->sb_size = MD_SB_BYTES;
1232 1286
1233 sb = (mdp_super_t*)page_address(rdev->sb_page); 1287 sb = page_address(rdev->sb_page);
1234 1288
1235 memset(sb, 0, sizeof(*sb)); 1289 memset(sb, 0, sizeof(*sb));
1236 1290
@@ -1395,6 +1449,8 @@ static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
1395 return cpu_to_le32(csum); 1449 return cpu_to_le32(csum);
1396} 1450}
1397 1451
1452static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
1453 int acknowledged);
1398static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 1454static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1399{ 1455{
1400 struct mdp_superblock_1 *sb; 1456 struct mdp_superblock_1 *sb;
@@ -1435,7 +1491,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1435 if (ret) return ret; 1491 if (ret) return ret;
1436 1492
1437 1493
1438 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1494 sb = page_address(rdev->sb_page);
1439 1495
1440 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1496 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1441 sb->major_version != cpu_to_le32(1) || 1497 sb->major_version != cpu_to_le32(1) ||
@@ -1473,12 +1529,52 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1473 else 1529 else
1474 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1530 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1475 1531
1532 if (!rdev->bb_page) {
1533 rdev->bb_page = alloc_page(GFP_KERNEL);
1534 if (!rdev->bb_page)
1535 return -ENOMEM;
1536 }
1537 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1538 rdev->badblocks.count == 0) {
1539 /* need to load the bad block list.
1540 * Currently we limit it to one page.
1541 */
1542 s32 offset;
1543 sector_t bb_sector;
1544 u64 *bbp;
1545 int i;
1546 int sectors = le16_to_cpu(sb->bblog_size);
1547 if (sectors > (PAGE_SIZE / 512))
1548 return -EINVAL;
1549 offset = le32_to_cpu(sb->bblog_offset);
1550 if (offset == 0)
1551 return -EINVAL;
1552 bb_sector = (long long)offset;
1553 if (!sync_page_io(rdev, bb_sector, sectors << 9,
1554 rdev->bb_page, READ, true))
1555 return -EIO;
1556 bbp = (u64 *)page_address(rdev->bb_page);
1557 rdev->badblocks.shift = sb->bblog_shift;
1558 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1559 u64 bb = le64_to_cpu(*bbp);
1560 int count = bb & (0x3ff);
1561 u64 sector = bb >> 10;
1562 sector <<= sb->bblog_shift;
1563 count <<= sb->bblog_shift;
1564 if (bb + 1 == 0)
1565 break;
1566 if (md_set_badblocks(&rdev->badblocks,
1567 sector, count, 1) == 0)
1568 return -EINVAL;
1569 }
1570 } else if (sb->bblog_offset == 0)
1571 rdev->badblocks.shift = -1;
1572
1476 if (!refdev) { 1573 if (!refdev) {
1477 ret = 1; 1574 ret = 1;
1478 } else { 1575 } else {
1479 __u64 ev1, ev2; 1576 __u64 ev1, ev2;
1480 struct mdp_superblock_1 *refsb = 1577 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
1481 (struct mdp_superblock_1*)page_address(refdev->sb_page);
1482 1578
1483 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1579 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1484 sb->level != refsb->level || 1580 sb->level != refsb->level ||
@@ -1513,7 +1609,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1513 1609
1514static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) 1610static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1515{ 1611{
1516 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1612 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1517 __u64 ev1 = le64_to_cpu(sb->events); 1613 __u64 ev1 = le64_to_cpu(sb->events);
1518 1614
1519 rdev->raid_disk = -1; 1615 rdev->raid_disk = -1;
@@ -1619,13 +1715,12 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1619 int max_dev, i; 1715 int max_dev, i;
1620 /* make rdev->sb match mddev and rdev data. */ 1716 /* make rdev->sb match mddev and rdev data. */
1621 1717
1622 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1718 sb = page_address(rdev->sb_page);
1623 1719
1624 sb->feature_map = 0; 1720 sb->feature_map = 0;
1625 sb->pad0 = 0; 1721 sb->pad0 = 0;
1626 sb->recovery_offset = cpu_to_le64(0); 1722 sb->recovery_offset = cpu_to_le64(0);
1627 memset(sb->pad1, 0, sizeof(sb->pad1)); 1723 memset(sb->pad1, 0, sizeof(sb->pad1));
1628 memset(sb->pad2, 0, sizeof(sb->pad2));
1629 memset(sb->pad3, 0, sizeof(sb->pad3)); 1724 memset(sb->pad3, 0, sizeof(sb->pad3));
1630 1725
1631 sb->utime = cpu_to_le64((__u64)mddev->utime); 1726 sb->utime = cpu_to_le64((__u64)mddev->utime);
@@ -1665,6 +1760,40 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1665 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); 1760 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1666 } 1761 }
1667 1762
1763 if (rdev->badblocks.count == 0)
1764 /* Nothing to do for bad blocks*/ ;
1765 else if (sb->bblog_offset == 0)
1766 /* Cannot record bad blocks on this device */
1767 md_error(mddev, rdev);
1768 else {
1769 struct badblocks *bb = &rdev->badblocks;
1770 u64 *bbp = (u64 *)page_address(rdev->bb_page);
1771 u64 *p = bb->page;
1772 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
1773 if (bb->changed) {
1774 unsigned seq;
1775
1776retry:
1777 seq = read_seqbegin(&bb->lock);
1778
1779 memset(bbp, 0xff, PAGE_SIZE);
1780
1781 for (i = 0 ; i < bb->count ; i++) {
1782 u64 internal_bb = *p++;
1783 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
1784 | BB_LEN(internal_bb));
1785 *bbp++ = cpu_to_le64(store_bb);
1786 }
1787 if (read_seqretry(&bb->lock, seq))
1788 goto retry;
1789
1790 bb->sector = (rdev->sb_start +
1791 (int)le32_to_cpu(sb->bblog_offset));
1792 bb->size = le16_to_cpu(sb->bblog_size);
1793 bb->changed = 0;
1794 }
1795 }
1796
1668 max_dev = 0; 1797 max_dev = 0;
1669 list_for_each_entry(rdev2, &mddev->disks, same_set) 1798 list_for_each_entry(rdev2, &mddev->disks, same_set)
1670 if (rdev2->desc_nr+1 > max_dev) 1799 if (rdev2->desc_nr+1 > max_dev)
@@ -1724,7 +1853,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1724 num_sectors = max_sectors; 1853 num_sectors = max_sectors;
1725 rdev->sb_start = sb_start; 1854 rdev->sb_start = sb_start;
1726 } 1855 }
1727 sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page); 1856 sb = page_address(rdev->sb_page);
1728 sb->data_size = cpu_to_le64(num_sectors); 1857 sb->data_size = cpu_to_le64(num_sectors);
1729 sb->super_offset = rdev->sb_start; 1858 sb->super_offset = rdev->sb_start;
1730 sb->sb_csum = calc_sb_1_csum(sb); 1859 sb->sb_csum = calc_sb_1_csum(sb);
@@ -1922,7 +2051,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1922 bd_link_disk_holder(rdev->bdev, mddev->gendisk); 2051 bd_link_disk_holder(rdev->bdev, mddev->gendisk);
1923 2052
1924 /* May as well allow recovery to be retried once */ 2053 /* May as well allow recovery to be retried once */
1925 mddev->recovery_disabled = 0; 2054 mddev->recovery_disabled++;
1926 2055
1927 return 0; 2056 return 0;
1928 2057
@@ -1953,6 +2082,9 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1953 sysfs_remove_link(&rdev->kobj, "block"); 2082 sysfs_remove_link(&rdev->kobj, "block");
1954 sysfs_put(rdev->sysfs_state); 2083 sysfs_put(rdev->sysfs_state);
1955 rdev->sysfs_state = NULL; 2084 rdev->sysfs_state = NULL;
2085 kfree(rdev->badblocks.page);
2086 rdev->badblocks.count = 0;
2087 rdev->badblocks.page = NULL;
1956 /* We need to delay this, otherwise we can deadlock when 2088 /* We need to delay this, otherwise we can deadlock when
1957 * writing to 'remove' to "dev/state". We also need 2089 * writing to 'remove' to "dev/state". We also need
1958 * to delay it due to rcu usage. 2090 * to delay it due to rcu usage.
@@ -2127,10 +2259,10 @@ static void print_rdev(mdk_rdev_t *rdev, int major_version)
2127 printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version); 2259 printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
2128 switch (major_version) { 2260 switch (major_version) {
2129 case 0: 2261 case 0:
2130 print_sb_90((mdp_super_t*)page_address(rdev->sb_page)); 2262 print_sb_90(page_address(rdev->sb_page));
2131 break; 2263 break;
2132 case 1: 2264 case 1:
2133 print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page)); 2265 print_sb_1(page_address(rdev->sb_page));
2134 break; 2266 break;
2135 } 2267 }
2136 } else 2268 } else
@@ -2194,6 +2326,7 @@ static void md_update_sb(mddev_t * mddev, int force_change)
2194 mdk_rdev_t *rdev; 2326 mdk_rdev_t *rdev;
2195 int sync_req; 2327 int sync_req;
2196 int nospares = 0; 2328 int nospares = 0;
2329 int any_badblocks_changed = 0;
2197 2330
2198repeat: 2331repeat:
2199 /* First make sure individual recovery_offsets are correct */ 2332 /* First make sure individual recovery_offsets are correct */
@@ -2208,8 +2341,18 @@ repeat:
2208 if (!mddev->persistent) { 2341 if (!mddev->persistent) {
2209 clear_bit(MD_CHANGE_CLEAN, &mddev->flags); 2342 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2210 clear_bit(MD_CHANGE_DEVS, &mddev->flags); 2343 clear_bit(MD_CHANGE_DEVS, &mddev->flags);
2211 if (!mddev->external) 2344 if (!mddev->external) {
2212 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2345 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2346 list_for_each_entry(rdev, &mddev->disks, same_set) {
2347 if (rdev->badblocks.changed) {
2348 md_ack_all_badblocks(&rdev->badblocks);
2349 md_error(mddev, rdev);
2350 }
2351 clear_bit(Blocked, &rdev->flags);
2352 clear_bit(BlockedBadBlocks, &rdev->flags);
2353 wake_up(&rdev->blocked_wait);
2354 }
2355 }
2213 wake_up(&mddev->sb_wait); 2356 wake_up(&mddev->sb_wait);
2214 return; 2357 return;
2215 } 2358 }
@@ -2265,6 +2408,14 @@ repeat:
2265 MD_BUG(); 2408 MD_BUG();
2266 mddev->events --; 2409 mddev->events --;
2267 } 2410 }
2411
2412 list_for_each_entry(rdev, &mddev->disks, same_set) {
2413 if (rdev->badblocks.changed)
2414 any_badblocks_changed++;
2415 if (test_bit(Faulty, &rdev->flags))
2416 set_bit(FaultRecorded, &rdev->flags);
2417 }
2418
2268 sync_sbs(mddev, nospares); 2419 sync_sbs(mddev, nospares);
2269 spin_unlock_irq(&mddev->write_lock); 2420 spin_unlock_irq(&mddev->write_lock);
2270 2421
@@ -2290,6 +2441,13 @@ repeat:
2290 bdevname(rdev->bdev,b), 2441 bdevname(rdev->bdev,b),
2291 (unsigned long long)rdev->sb_start); 2442 (unsigned long long)rdev->sb_start);
2292 rdev->sb_events = mddev->events; 2443 rdev->sb_events = mddev->events;
2444 if (rdev->badblocks.size) {
2445 md_super_write(mddev, rdev,
2446 rdev->badblocks.sector,
2447 rdev->badblocks.size << 9,
2448 rdev->bb_page);
2449 rdev->badblocks.size = 0;
2450 }
2293 2451
2294 } else 2452 } else
2295 dprintk(")\n"); 2453 dprintk(")\n");
@@ -2313,6 +2471,15 @@ repeat:
2313 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 2471 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2314 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 2472 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2315 2473
2474 list_for_each_entry(rdev, &mddev->disks, same_set) {
2475 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2476 clear_bit(Blocked, &rdev->flags);
2477
2478 if (any_badblocks_changed)
2479 md_ack_all_badblocks(&rdev->badblocks);
2480 clear_bit(BlockedBadBlocks, &rdev->flags);
2481 wake_up(&rdev->blocked_wait);
2482 }
2316} 2483}
2317 2484
2318/* words written to sysfs files may, or may not, be \n terminated. 2485/* words written to sysfs files may, or may not, be \n terminated.
@@ -2347,7 +2514,8 @@ state_show(mdk_rdev_t *rdev, char *page)
2347 char *sep = ""; 2514 char *sep = "";
2348 size_t len = 0; 2515 size_t len = 0;
2349 2516
2350 if (test_bit(Faulty, &rdev->flags)) { 2517 if (test_bit(Faulty, &rdev->flags) ||
2518 rdev->badblocks.unacked_exist) {
2351 len+= sprintf(page+len, "%sfaulty",sep); 2519 len+= sprintf(page+len, "%sfaulty",sep);
2352 sep = ","; 2520 sep = ",";
2353 } 2521 }
@@ -2359,7 +2527,8 @@ state_show(mdk_rdev_t *rdev, char *page)
2359 len += sprintf(page+len, "%swrite_mostly",sep); 2527 len += sprintf(page+len, "%swrite_mostly",sep);
2360 sep = ","; 2528 sep = ",";
2361 } 2529 }
2362 if (test_bit(Blocked, &rdev->flags)) { 2530 if (test_bit(Blocked, &rdev->flags) ||
2531 rdev->badblocks.unacked_exist) {
2363 len += sprintf(page+len, "%sblocked", sep); 2532 len += sprintf(page+len, "%sblocked", sep);
2364 sep = ","; 2533 sep = ",";
2365 } 2534 }
@@ -2368,6 +2537,10 @@ state_show(mdk_rdev_t *rdev, char *page)
2368 len += sprintf(page+len, "%sspare", sep); 2537 len += sprintf(page+len, "%sspare", sep);
2369 sep = ","; 2538 sep = ",";
2370 } 2539 }
2540 if (test_bit(WriteErrorSeen, &rdev->flags)) {
2541 len += sprintf(page+len, "%swrite_error", sep);
2542 sep = ",";
2543 }
2371 return len+sprintf(page+len, "\n"); 2544 return len+sprintf(page+len, "\n");
2372} 2545}
2373 2546
@@ -2375,13 +2548,15 @@ static ssize_t
2375state_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2548state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2376{ 2549{
2377 /* can write 2550 /* can write
2378 * faulty - simulates and error 2551 * faulty - simulates an error
2379 * remove - disconnects the device 2552 * remove - disconnects the device
2380 * writemostly - sets write_mostly 2553 * writemostly - sets write_mostly
2381 * -writemostly - clears write_mostly 2554 * -writemostly - clears write_mostly
2382 * blocked - sets the Blocked flag 2555 * blocked - sets the Blocked flags
2383 * -blocked - clears the Blocked flag 2556 * -blocked - clears the Blocked and possibly simulates an error
2384 * insync - sets Insync providing device isn't active 2557 * insync - sets Insync providing device isn't active
2558 * write_error - sets WriteErrorSeen
2559 * -write_error - clears WriteErrorSeen
2385 */ 2560 */
2386 int err = -EINVAL; 2561 int err = -EINVAL;
2387 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2562 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
@@ -2408,7 +2583,15 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2408 set_bit(Blocked, &rdev->flags); 2583 set_bit(Blocked, &rdev->flags);
2409 err = 0; 2584 err = 0;
2410 } else if (cmd_match(buf, "-blocked")) { 2585 } else if (cmd_match(buf, "-blocked")) {
2586 if (!test_bit(Faulty, &rdev->flags) &&
2587 test_bit(BlockedBadBlocks, &rdev->flags)) {
2588 /* metadata handler doesn't understand badblocks,
2589 * so we need to fail the device
2590 */
2591 md_error(rdev->mddev, rdev);
2592 }
2411 clear_bit(Blocked, &rdev->flags); 2593 clear_bit(Blocked, &rdev->flags);
2594 clear_bit(BlockedBadBlocks, &rdev->flags);
2412 wake_up(&rdev->blocked_wait); 2595 wake_up(&rdev->blocked_wait);
2413 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2596 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2414 md_wakeup_thread(rdev->mddev->thread); 2597 md_wakeup_thread(rdev->mddev->thread);
@@ -2417,6 +2600,12 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2417 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { 2600 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2418 set_bit(In_sync, &rdev->flags); 2601 set_bit(In_sync, &rdev->flags);
2419 err = 0; 2602 err = 0;
2603 } else if (cmd_match(buf, "write_error")) {
2604 set_bit(WriteErrorSeen, &rdev->flags);
2605 err = 0;
2606 } else if (cmd_match(buf, "-write_error")) {
2607 clear_bit(WriteErrorSeen, &rdev->flags);
2608 err = 0;
2420 } 2609 }
2421 if (!err) 2610 if (!err)
2422 sysfs_notify_dirent_safe(rdev->sysfs_state); 2611 sysfs_notify_dirent_safe(rdev->sysfs_state);
@@ -2459,7 +2648,6 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2459{ 2648{
2460 char *e; 2649 char *e;
2461 int err; 2650 int err;
2462 char nm[20];
2463 int slot = simple_strtoul(buf, &e, 10); 2651 int slot = simple_strtoul(buf, &e, 10);
2464 if (strncmp(buf, "none", 4)==0) 2652 if (strncmp(buf, "none", 4)==0)
2465 slot = -1; 2653 slot = -1;
@@ -2482,8 +2670,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2482 hot_remove_disk(rdev->mddev, rdev->raid_disk); 2670 hot_remove_disk(rdev->mddev, rdev->raid_disk);
2483 if (err) 2671 if (err)
2484 return err; 2672 return err;
2485 sprintf(nm, "rd%d", rdev->raid_disk); 2673 sysfs_unlink_rdev(rdev->mddev, rdev);
2486 sysfs_remove_link(&rdev->mddev->kobj, nm);
2487 rdev->raid_disk = -1; 2674 rdev->raid_disk = -1;
2488 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2675 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2489 md_wakeup_thread(rdev->mddev->thread); 2676 md_wakeup_thread(rdev->mddev->thread);
@@ -2522,8 +2709,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2522 return err; 2709 return err;
2523 } else 2710 } else
2524 sysfs_notify_dirent_safe(rdev->sysfs_state); 2711 sysfs_notify_dirent_safe(rdev->sysfs_state);
2525 sprintf(nm, "rd%d", rdev->raid_disk); 2712 if (sysfs_link_rdev(rdev->mddev, rdev))
2526 if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
2527 /* failure here is OK */; 2713 /* failure here is OK */;
2528 /* don't wakeup anyone, leave that to userspace. */ 2714 /* don't wakeup anyone, leave that to userspace. */
2529 } else { 2715 } else {
@@ -2712,6 +2898,39 @@ static ssize_t recovery_start_store(mdk_rdev_t *rdev, const char *buf, size_t le
2712static struct rdev_sysfs_entry rdev_recovery_start = 2898static struct rdev_sysfs_entry rdev_recovery_start =
2713__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store); 2899__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
2714 2900
2901
2902static ssize_t
2903badblocks_show(struct badblocks *bb, char *page, int unack);
2904static ssize_t
2905badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack);
2906
2907static ssize_t bb_show(mdk_rdev_t *rdev, char *page)
2908{
2909 return badblocks_show(&rdev->badblocks, page, 0);
2910}
2911static ssize_t bb_store(mdk_rdev_t *rdev, const char *page, size_t len)
2912{
2913 int rv = badblocks_store(&rdev->badblocks, page, len, 0);
2914 /* Maybe that ack was all we needed */
2915 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
2916 wake_up(&rdev->blocked_wait);
2917 return rv;
2918}
2919static struct rdev_sysfs_entry rdev_bad_blocks =
2920__ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
2921
2922
2923static ssize_t ubb_show(mdk_rdev_t *rdev, char *page)
2924{
2925 return badblocks_show(&rdev->badblocks, page, 1);
2926}
2927static ssize_t ubb_store(mdk_rdev_t *rdev, const char *page, size_t len)
2928{
2929 return badblocks_store(&rdev->badblocks, page, len, 1);
2930}
2931static struct rdev_sysfs_entry rdev_unack_bad_blocks =
2932__ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
2933
2715static struct attribute *rdev_default_attrs[] = { 2934static struct attribute *rdev_default_attrs[] = {
2716 &rdev_state.attr, 2935 &rdev_state.attr,
2717 &rdev_errors.attr, 2936 &rdev_errors.attr,
@@ -2719,6 +2938,8 @@ static struct attribute *rdev_default_attrs[] = {
2719 &rdev_offset.attr, 2938 &rdev_offset.attr,
2720 &rdev_size.attr, 2939 &rdev_size.attr,
2721 &rdev_recovery_start.attr, 2940 &rdev_recovery_start.attr,
2941 &rdev_bad_blocks.attr,
2942 &rdev_unack_bad_blocks.attr,
2722 NULL, 2943 NULL,
2723}; 2944};
2724static ssize_t 2945static ssize_t
@@ -2782,7 +3003,7 @@ static struct kobj_type rdev_ktype = {
2782 .default_attrs = rdev_default_attrs, 3003 .default_attrs = rdev_default_attrs,
2783}; 3004};
2784 3005
2785void md_rdev_init(mdk_rdev_t *rdev) 3006int md_rdev_init(mdk_rdev_t *rdev)
2786{ 3007{
2787 rdev->desc_nr = -1; 3008 rdev->desc_nr = -1;
2788 rdev->saved_raid_disk = -1; 3009 rdev->saved_raid_disk = -1;
@@ -2792,12 +3013,27 @@ void md_rdev_init(mdk_rdev_t *rdev)
2792 rdev->sb_events = 0; 3013 rdev->sb_events = 0;
2793 rdev->last_read_error.tv_sec = 0; 3014 rdev->last_read_error.tv_sec = 0;
2794 rdev->last_read_error.tv_nsec = 0; 3015 rdev->last_read_error.tv_nsec = 0;
3016 rdev->sb_loaded = 0;
3017 rdev->bb_page = NULL;
2795 atomic_set(&rdev->nr_pending, 0); 3018 atomic_set(&rdev->nr_pending, 0);
2796 atomic_set(&rdev->read_errors, 0); 3019 atomic_set(&rdev->read_errors, 0);
2797 atomic_set(&rdev->corrected_errors, 0); 3020 atomic_set(&rdev->corrected_errors, 0);
2798 3021
2799 INIT_LIST_HEAD(&rdev->same_set); 3022 INIT_LIST_HEAD(&rdev->same_set);
2800 init_waitqueue_head(&rdev->blocked_wait); 3023 init_waitqueue_head(&rdev->blocked_wait);
3024
3025 /* Add space to store bad block list.
3026 * This reserves the space even on arrays where it cannot
3027 * be used - I wonder if that matters
3028 */
3029 rdev->badblocks.count = 0;
3030 rdev->badblocks.shift = 0;
3031 rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL);
3032 seqlock_init(&rdev->badblocks.lock);
3033 if (rdev->badblocks.page == NULL)
3034 return -ENOMEM;
3035
3036 return 0;
2801} 3037}
2802EXPORT_SYMBOL_GPL(md_rdev_init); 3038EXPORT_SYMBOL_GPL(md_rdev_init);
2803/* 3039/*
@@ -2823,8 +3059,11 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
2823 return ERR_PTR(-ENOMEM); 3059 return ERR_PTR(-ENOMEM);
2824 } 3060 }
2825 3061
2826 md_rdev_init(rdev); 3062 err = md_rdev_init(rdev);
2827 if ((err = alloc_disk_sb(rdev))) 3063 if (err)
3064 goto abort_free;
3065 err = alloc_disk_sb(rdev);
3066 if (err)
2828 goto abort_free; 3067 goto abort_free;
2829 3068
2830 err = lock_rdev(rdev, newdev, super_format == -2); 3069 err = lock_rdev(rdev, newdev, super_format == -2);
@@ -2860,15 +3099,17 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
2860 goto abort_free; 3099 goto abort_free;
2861 } 3100 }
2862 } 3101 }
3102 if (super_format == -1)
3103 /* hot-add for 0.90, or non-persistent: so no badblocks */
3104 rdev->badblocks.shift = -1;
2863 3105
2864 return rdev; 3106 return rdev;
2865 3107
2866abort_free: 3108abort_free:
2867 if (rdev->sb_page) { 3109 if (rdev->bdev)
2868 if (rdev->bdev) 3110 unlock_rdev(rdev);
2869 unlock_rdev(rdev); 3111 free_disk_sb(rdev);
2870 free_disk_sb(rdev); 3112 kfree(rdev->badblocks.page);
2871 }
2872 kfree(rdev); 3113 kfree(rdev);
2873 return ERR_PTR(err); 3114 return ERR_PTR(err);
2874} 3115}
@@ -3149,15 +3390,13 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
3149 } 3390 }
3150 3391
3151 list_for_each_entry(rdev, &mddev->disks, same_set) { 3392 list_for_each_entry(rdev, &mddev->disks, same_set) {
3152 char nm[20];
3153 if (rdev->raid_disk < 0) 3393 if (rdev->raid_disk < 0)
3154 continue; 3394 continue;
3155 if (rdev->new_raid_disk >= mddev->raid_disks) 3395 if (rdev->new_raid_disk >= mddev->raid_disks)
3156 rdev->new_raid_disk = -1; 3396 rdev->new_raid_disk = -1;
3157 if (rdev->new_raid_disk == rdev->raid_disk) 3397 if (rdev->new_raid_disk == rdev->raid_disk)
3158 continue; 3398 continue;
3159 sprintf(nm, "rd%d", rdev->raid_disk); 3399 sysfs_unlink_rdev(mddev, rdev);
3160 sysfs_remove_link(&mddev->kobj, nm);
3161 } 3400 }
3162 list_for_each_entry(rdev, &mddev->disks, same_set) { 3401 list_for_each_entry(rdev, &mddev->disks, same_set) {
3163 if (rdev->raid_disk < 0) 3402 if (rdev->raid_disk < 0)
@@ -3168,11 +3407,10 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
3168 if (rdev->raid_disk < 0) 3407 if (rdev->raid_disk < 0)
3169 clear_bit(In_sync, &rdev->flags); 3408 clear_bit(In_sync, &rdev->flags);
3170 else { 3409 else {
3171 char nm[20]; 3410 if (sysfs_link_rdev(mddev, rdev))
3172 sprintf(nm, "rd%d", rdev->raid_disk); 3411 printk(KERN_WARNING "md: cannot register rd%d"
3173 if(sysfs_create_link(&mddev->kobj, &rdev->kobj, nm)) 3412 " for %s after level change\n",
3174 printk("md: cannot register %s for %s after level change\n", 3413 rdev->raid_disk, mdname(mddev));
3175 nm, mdname(mddev));
3176 } 3414 }
3177 } 3415 }
3178 3416
@@ -4504,7 +4742,8 @@ int md_run(mddev_t *mddev)
4504 } 4742 }
4505 4743
4506 if (mddev->bio_set == NULL) 4744 if (mddev->bio_set == NULL)
4507 mddev->bio_set = bioset_create(BIO_POOL_SIZE, sizeof(mddev)); 4745 mddev->bio_set = bioset_create(BIO_POOL_SIZE,
4746 sizeof(mddev_t *));
4508 4747
4509 spin_lock(&pers_lock); 4748 spin_lock(&pers_lock);
4510 pers = find_pers(mddev->level, mddev->clevel); 4749 pers = find_pers(mddev->level, mddev->clevel);
@@ -4621,12 +4860,9 @@ int md_run(mddev_t *mddev)
4621 smp_wmb(); 4860 smp_wmb();
4622 mddev->ready = 1; 4861 mddev->ready = 1;
4623 list_for_each_entry(rdev, &mddev->disks, same_set) 4862 list_for_each_entry(rdev, &mddev->disks, same_set)
4624 if (rdev->raid_disk >= 0) { 4863 if (rdev->raid_disk >= 0)
4625 char nm[20]; 4864 if (sysfs_link_rdev(mddev, rdev))
4626 sprintf(nm, "rd%d", rdev->raid_disk);
4627 if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
4628 /* failure here is OK */; 4865 /* failure here is OK */;
4629 }
4630 4866
4631 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4867 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4632 4868
@@ -4854,11 +5090,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4854 sysfs_notify_dirent_safe(mddev->sysfs_state); 5090 sysfs_notify_dirent_safe(mddev->sysfs_state);
4855 5091
4856 list_for_each_entry(rdev, &mddev->disks, same_set) 5092 list_for_each_entry(rdev, &mddev->disks, same_set)
4857 if (rdev->raid_disk >= 0) { 5093 if (rdev->raid_disk >= 0)
4858 char nm[20]; 5094 sysfs_unlink_rdev(mddev, rdev);
4859 sprintf(nm, "rd%d", rdev->raid_disk);
4860 sysfs_remove_link(&mddev->kobj, nm);
4861 }
4862 5095
4863 set_capacity(disk, 0); 5096 set_capacity(disk, 0);
4864 mutex_unlock(&mddev->open_mutex); 5097 mutex_unlock(&mddev->open_mutex);
@@ -6198,18 +6431,7 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
6198 if (!rdev || test_bit(Faulty, &rdev->flags)) 6431 if (!rdev || test_bit(Faulty, &rdev->flags))
6199 return; 6432 return;
6200 6433
6201 if (mddev->external) 6434 if (!mddev->pers || !mddev->pers->error_handler)
6202 set_bit(Blocked, &rdev->flags);
6203/*
6204 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
6205 mdname(mddev),
6206 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
6207 __builtin_return_address(0),__builtin_return_address(1),
6208 __builtin_return_address(2),__builtin_return_address(3));
6209*/
6210 if (!mddev->pers)
6211 return;
6212 if (!mddev->pers->error_handler)
6213 return; 6435 return;
6214 mddev->pers->error_handler(mddev,rdev); 6436 mddev->pers->error_handler(mddev,rdev);
6215 if (mddev->degraded) 6437 if (mddev->degraded)
@@ -6933,11 +7155,14 @@ void md_do_sync(mddev_t *mddev)
6933 atomic_add(sectors, &mddev->recovery_active); 7155 atomic_add(sectors, &mddev->recovery_active);
6934 } 7156 }
6935 7157
7158 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7159 break;
7160
6936 j += sectors; 7161 j += sectors;
6937 if (j>1) mddev->curr_resync = j; 7162 if (j>1) mddev->curr_resync = j;
6938 mddev->curr_mark_cnt = io_sectors; 7163 mddev->curr_mark_cnt = io_sectors;
6939 if (last_check == 0) 7164 if (last_check == 0)
6940 /* this is the earliers that rebuilt will be 7165 /* this is the earliest that rebuild will be
6941 * visible in /proc/mdstat 7166 * visible in /proc/mdstat
6942 */ 7167 */
6943 md_new_event(mddev); 7168 md_new_event(mddev);
@@ -6946,10 +7171,6 @@ void md_do_sync(mddev_t *mddev)
6946 continue; 7171 continue;
6947 7172
6948 last_check = io_sectors; 7173 last_check = io_sectors;
6949
6950 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6951 break;
6952
6953 repeat: 7174 repeat:
6954 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 7175 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
6955 /* step marks */ 7176 /* step marks */
@@ -7067,29 +7288,23 @@ static int remove_and_add_spares(mddev_t *mddev)
7067 atomic_read(&rdev->nr_pending)==0) { 7288 atomic_read(&rdev->nr_pending)==0) {
7068 if (mddev->pers->hot_remove_disk( 7289 if (mddev->pers->hot_remove_disk(
7069 mddev, rdev->raid_disk)==0) { 7290 mddev, rdev->raid_disk)==0) {
7070 char nm[20]; 7291 sysfs_unlink_rdev(mddev, rdev);
7071 sprintf(nm,"rd%d", rdev->raid_disk);
7072 sysfs_remove_link(&mddev->kobj, nm);
7073 rdev->raid_disk = -1; 7292 rdev->raid_disk = -1;
7074 } 7293 }
7075 } 7294 }
7076 7295
7077 if (mddev->degraded && !mddev->recovery_disabled) { 7296 if (mddev->degraded) {
7078 list_for_each_entry(rdev, &mddev->disks, same_set) { 7297 list_for_each_entry(rdev, &mddev->disks, same_set) {
7079 if (rdev->raid_disk >= 0 && 7298 if (rdev->raid_disk >= 0 &&
7080 !test_bit(In_sync, &rdev->flags) && 7299 !test_bit(In_sync, &rdev->flags) &&
7081 !test_bit(Faulty, &rdev->flags) && 7300 !test_bit(Faulty, &rdev->flags))
7082 !test_bit(Blocked, &rdev->flags))
7083 spares++; 7301 spares++;
7084 if (rdev->raid_disk < 0 7302 if (rdev->raid_disk < 0
7085 && !test_bit(Faulty, &rdev->flags)) { 7303 && !test_bit(Faulty, &rdev->flags)) {
7086 rdev->recovery_offset = 0; 7304 rdev->recovery_offset = 0;
7087 if (mddev->pers-> 7305 if (mddev->pers->
7088 hot_add_disk(mddev, rdev) == 0) { 7306 hot_add_disk(mddev, rdev) == 0) {
7089 char nm[20]; 7307 if (sysfs_link_rdev(mddev, rdev))
7090 sprintf(nm, "rd%d", rdev->raid_disk);
7091 if (sysfs_create_link(&mddev->kobj,
7092 &rdev->kobj, nm))
7093 /* failure here is OK */; 7308 /* failure here is OK */;
7094 spares++; 7309 spares++;
7095 md_new_event(mddev); 7310 md_new_event(mddev);
@@ -7138,6 +7353,8 @@ static void reap_sync_thread(mddev_t *mddev)
7138 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7353 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7139 sysfs_notify_dirent_safe(mddev->sysfs_action); 7354 sysfs_notify_dirent_safe(mddev->sysfs_action);
7140 md_new_event(mddev); 7355 md_new_event(mddev);
7356 if (mddev->event_work.func)
7357 queue_work(md_misc_wq, &mddev->event_work);
7141} 7358}
7142 7359
7143/* 7360/*
@@ -7170,9 +7387,6 @@ void md_check_recovery(mddev_t *mddev)
7170 if (mddev->bitmap) 7387 if (mddev->bitmap)
7171 bitmap_daemon_work(mddev); 7388 bitmap_daemon_work(mddev);
7172 7389
7173 if (mddev->ro)
7174 return;
7175
7176 if (signal_pending(current)) { 7390 if (signal_pending(current)) {
7177 if (mddev->pers->sync_request && !mddev->external) { 7391 if (mddev->pers->sync_request && !mddev->external) {
7178 printk(KERN_INFO "md: %s in immediate safe mode\n", 7392 printk(KERN_INFO "md: %s in immediate safe mode\n",
@@ -7209,9 +7423,7 @@ void md_check_recovery(mddev_t *mddev)
7209 atomic_read(&rdev->nr_pending)==0) { 7423 atomic_read(&rdev->nr_pending)==0) {
7210 if (mddev->pers->hot_remove_disk( 7424 if (mddev->pers->hot_remove_disk(
7211 mddev, rdev->raid_disk)==0) { 7425 mddev, rdev->raid_disk)==0) {
7212 char nm[20]; 7426 sysfs_unlink_rdev(mddev, rdev);
7213 sprintf(nm,"rd%d", rdev->raid_disk);
7214 sysfs_remove_link(&mddev->kobj, nm);
7215 rdev->raid_disk = -1; 7427 rdev->raid_disk = -1;
7216 } 7428 }
7217 } 7429 }
@@ -7331,12 +7543,499 @@ void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
7331{ 7543{
7332 sysfs_notify_dirent_safe(rdev->sysfs_state); 7544 sysfs_notify_dirent_safe(rdev->sysfs_state);
7333 wait_event_timeout(rdev->blocked_wait, 7545 wait_event_timeout(rdev->blocked_wait,
7334 !test_bit(Blocked, &rdev->flags), 7546 !test_bit(Blocked, &rdev->flags) &&
7547 !test_bit(BlockedBadBlocks, &rdev->flags),
7335 msecs_to_jiffies(5000)); 7548 msecs_to_jiffies(5000));
7336 rdev_dec_pending(rdev, mddev); 7549 rdev_dec_pending(rdev, mddev);
7337} 7550}
7338EXPORT_SYMBOL(md_wait_for_blocked_rdev); 7551EXPORT_SYMBOL(md_wait_for_blocked_rdev);
7339 7552
7553
7554/* Bad block management.
7555 * We can record which blocks on each device are 'bad' and so just
7556 * fail those blocks, or that stripe, rather than the whole device.
7557 * Entries in the bad-block table are 64bits wide. This comprises:
7558 * Length of bad-range, in sectors: 0-511 for lengths 1-512
7559 * Start of bad-range, sector offset, 54 bits (allows 8 exbibytes)
7560 * A 'shift' can be set so that larger blocks are tracked and
7561 * consequently larger devices can be covered.
7562 * 'Acknowledged' flag - 1 bit. - the most significant bit.
7563 *
7564 * Locking of the bad-block table uses a seqlock so md_is_badblock
7565 * might need to retry if it is very unlucky.
7566 * We will sometimes want to check for bad blocks in a bi_end_io function,
7567 * so we use the write_seqlock_irq variant.
7568 *
7569 * When looking for a bad block we specify a range and want to
7570 * know if any block in the range is bad. So we binary-search
7571 * to the last range that starts at-or-before the given endpoint,
7572 * (or "before the sector after the target range")
7573 * then see if it ends after the given start.
7574 * We return
7575 * 0 if there are no known bad blocks in the range
7576 * 1 if there are known bad block which are all acknowledged
7577 * -1 if there are bad blocks which have not yet been acknowledged in metadata.
7578 * plus the start/length of the first bad section we overlap.
7579 */
7580int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
7581 sector_t *first_bad, int *bad_sectors)
7582{
7583 int hi;
7584 int lo = 0;
7585 u64 *p = bb->page;
7586 int rv = 0;
7587 sector_t target = s + sectors;
7588 unsigned seq;
7589
7590 if (bb->shift > 0) {
7591 /* round the start down, and the end up */
7592 s >>= bb->shift;
7593 target += (1<<bb->shift) - 1;
7594 target >>= bb->shift;
7595 sectors = target - s;
7596 }
7597 /* 'target' is now the first block after the bad range */
7598
7599retry:
7600 seq = read_seqbegin(&bb->lock);
7601
7602 hi = bb->count;
7603
7604 /* Binary search between lo and hi for 'target'
7605 * i.e. for the last range that starts before 'target'
7606 */
7607 /* INVARIANT: ranges before 'lo' and at-or-after 'hi'
7608 * are known not to be the last range before target.
7609 * VARIANT: hi-lo is the number of possible
7610 * ranges, and decreases until it reaches 1
7611 */
7612 while (hi - lo > 1) {
7613 int mid = (lo + hi) / 2;
7614 sector_t a = BB_OFFSET(p[mid]);
7615 if (a < target)
7616 /* This could still be the one, earlier ranges
7617 * could not. */
7618 lo = mid;
7619 else
7620 /* This and later ranges are definitely out. */
7621 hi = mid;
7622 }
7623 /* 'lo' might be the last that started before target, but 'hi' isn't */
7624 if (hi > lo) {
7625 /* need to check all range that end after 's' to see if
7626 * any are unacknowledged.
7627 */
7628 while (lo >= 0 &&
7629 BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
7630 if (BB_OFFSET(p[lo]) < target) {
7631 /* starts before the end, and finishes after
7632 * the start, so they must overlap
7633 */
7634 if (rv != -1 && BB_ACK(p[lo]))
7635 rv = 1;
7636 else
7637 rv = -1;
7638 *first_bad = BB_OFFSET(p[lo]);
7639 *bad_sectors = BB_LEN(p[lo]);
7640 }
7641 lo--;
7642 }
7643 }
7644
7645 if (read_seqretry(&bb->lock, seq))
7646 goto retry;
7647
7648 return rv;
7649}
7650EXPORT_SYMBOL_GPL(md_is_badblock);
7651
7652/*
7653 * Add a range of bad blocks to the table.
7654 * This might extend the table, or might contract it
7655 * if two adjacent ranges can be merged.
7656 * We binary-search to find the 'insertion' point, then
7657 * decide how best to handle it.
7658 */
7659static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
7660 int acknowledged)
7661{
7662 u64 *p;
7663 int lo, hi;
7664 int rv = 1;
7665
7666 if (bb->shift < 0)
7667 /* badblocks are disabled */
7668 return 0;
7669
7670 if (bb->shift) {
7671 /* round the start down, and the end up */
7672 sector_t next = s + sectors;
7673 s >>= bb->shift;
7674 next += (1<<bb->shift) - 1;
7675 next >>= bb->shift;
7676 sectors = next - s;
7677 }
7678
7679 write_seqlock_irq(&bb->lock);
7680
7681 p = bb->page;
7682 lo = 0;
7683 hi = bb->count;
7684 /* Find the last range that starts at-or-before 's' */
7685 while (hi - lo > 1) {
7686 int mid = (lo + hi) / 2;
7687 sector_t a = BB_OFFSET(p[mid]);
7688 if (a <= s)
7689 lo = mid;
7690 else
7691 hi = mid;
7692 }
7693 if (hi > lo && BB_OFFSET(p[lo]) > s)
7694 hi = lo;
7695
7696 if (hi > lo) {
7697 /* we found a range that might merge with the start
7698 * of our new range
7699 */
7700 sector_t a = BB_OFFSET(p[lo]);
7701 sector_t e = a + BB_LEN(p[lo]);
7702 int ack = BB_ACK(p[lo]);
7703 if (e >= s) {
7704 /* Yes, we can merge with a previous range */
7705 if (s == a && s + sectors >= e)
7706 /* new range covers old */
7707 ack = acknowledged;
7708 else
7709 ack = ack && acknowledged;
7710
7711 if (e < s + sectors)
7712 e = s + sectors;
7713 if (e - a <= BB_MAX_LEN) {
7714 p[lo] = BB_MAKE(a, e-a, ack);
7715 s = e;
7716 } else {
7717 /* does not all fit in one range,
7718 * make p[lo] maximal
7719 */
7720 if (BB_LEN(p[lo]) != BB_MAX_LEN)
7721 p[lo] = BB_MAKE(a, BB_MAX_LEN, ack);
7722 s = a + BB_MAX_LEN;
7723 }
7724 sectors = e - s;
7725 }
7726 }
7727 if (sectors && hi < bb->count) {
7728 /* 'hi' points to the first range that starts after 's'.
7729 * Maybe we can merge with the start of that range */
7730 sector_t a = BB_OFFSET(p[hi]);
7731 sector_t e = a + BB_LEN(p[hi]);
7732 int ack = BB_ACK(p[hi]);
7733 if (a <= s + sectors) {
7734 /* merging is possible */
7735 if (e <= s + sectors) {
7736 /* full overlap */
7737 e = s + sectors;
7738 ack = acknowledged;
7739 } else
7740 ack = ack && acknowledged;
7741
7742 a = s;
7743 if (e - a <= BB_MAX_LEN) {
7744 p[hi] = BB_MAKE(a, e-a, ack);
7745 s = e;
7746 } else {
7747 p[hi] = BB_MAKE(a, BB_MAX_LEN, ack);
7748 s = a + BB_MAX_LEN;
7749 }
7750 sectors = e - s;
7751 lo = hi;
7752 hi++;
7753 }
7754 }
7755 if (sectors == 0 && hi < bb->count) {
7756 /* we might be able to combine lo and hi */
7757 /* Note: 's' is at the end of 'lo' */
7758 sector_t a = BB_OFFSET(p[hi]);
7759 int lolen = BB_LEN(p[lo]);
7760 int hilen = BB_LEN(p[hi]);
7761 int newlen = lolen + hilen - (s - a);
7762 if (s >= a && newlen < BB_MAX_LEN) {
7763 /* yes, we can combine them */
7764 int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]);
7765 p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack);
7766 memmove(p + hi, p + hi + 1,
7767 (bb->count - hi - 1) * 8);
7768 bb->count--;
7769 }
7770 }
7771 while (sectors) {
7772 /* didn't merge (it all).
7773 * Need to add a range just before 'hi' */
7774 if (bb->count >= MD_MAX_BADBLOCKS) {
7775 /* No room for more */
7776 rv = 0;
7777 break;
7778 } else {
7779 int this_sectors = sectors;
7780 memmove(p + hi + 1, p + hi,
7781 (bb->count - hi) * 8);
7782 bb->count++;
7783
7784 if (this_sectors > BB_MAX_LEN)
7785 this_sectors = BB_MAX_LEN;
7786 p[hi] = BB_MAKE(s, this_sectors, acknowledged);
7787 sectors -= this_sectors;
7788 s += this_sectors;
7789 }
7790 }
7791
7792 bb->changed = 1;
7793 if (!acknowledged)
7794 bb->unacked_exist = 1;
7795 write_sequnlock_irq(&bb->lock);
7796
7797 return rv;
7798}
7799
7800int rdev_set_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors,
7801 int acknowledged)
7802{
7803 int rv = md_set_badblocks(&rdev->badblocks,
7804 s + rdev->data_offset, sectors, acknowledged);
7805 if (rv) {
7806 /* Make sure they get written out promptly */
7807 set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
7808 md_wakeup_thread(rdev->mddev->thread);
7809 }
7810 return rv;
7811}
7812EXPORT_SYMBOL_GPL(rdev_set_badblocks);
7813
7814/*
7815 * Remove a range of bad blocks from the table.
7816 * This may involve extending the table if we spilt a region,
7817 * but it must not fail. So if the table becomes full, we just
7818 * drop the remove request.
7819 */
7820static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors)
7821{
7822 u64 *p;
7823 int lo, hi;
7824 sector_t target = s + sectors;
7825 int rv = 0;
7826
7827 if (bb->shift > 0) {
7828 /* When clearing we round the start up and the end down.
7829 * This should not matter as the shift should align with
7830 * the block size and no rounding should ever be needed.
7831 * However it is better the think a block is bad when it
7832 * isn't than to think a block is not bad when it is.
7833 */
7834 s += (1<<bb->shift) - 1;
7835 s >>= bb->shift;
7836 target >>= bb->shift;
7837 sectors = target - s;
7838 }
7839
7840 write_seqlock_irq(&bb->lock);
7841
7842 p = bb->page;
7843 lo = 0;
7844 hi = bb->count;
7845 /* Find the last range that starts before 'target' */
7846 while (hi - lo > 1) {
7847 int mid = (lo + hi) / 2;
7848 sector_t a = BB_OFFSET(p[mid]);
7849 if (a < target)
7850 lo = mid;
7851 else
7852 hi = mid;
7853 }
7854 if (hi > lo) {
7855 /* p[lo] is the last range that could overlap the
7856 * current range. Earlier ranges could also overlap,
7857 * but only this one can overlap the end of the range.
7858 */
7859 if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) {
7860 /* Partial overlap, leave the tail of this range */
7861 int ack = BB_ACK(p[lo]);
7862 sector_t a = BB_OFFSET(p[lo]);
7863 sector_t end = a + BB_LEN(p[lo]);
7864
7865 if (a < s) {
7866 /* we need to split this range */
7867 if (bb->count >= MD_MAX_BADBLOCKS) {
7868 rv = 0;
7869 goto out;
7870 }
7871 memmove(p+lo+1, p+lo, (bb->count - lo) * 8);
7872 bb->count++;
7873 p[lo] = BB_MAKE(a, s-a, ack);
7874 lo++;
7875 }
7876 p[lo] = BB_MAKE(target, end - target, ack);
7877 /* there is no longer an overlap */
7878 hi = lo;
7879 lo--;
7880 }
7881 while (lo >= 0 &&
7882 BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
7883 /* This range does overlap */
7884 if (BB_OFFSET(p[lo]) < s) {
7885 /* Keep the early parts of this range. */
7886 int ack = BB_ACK(p[lo]);
7887 sector_t start = BB_OFFSET(p[lo]);
7888 p[lo] = BB_MAKE(start, s - start, ack);
7889 /* now low doesn't overlap, so.. */
7890 break;
7891 }
7892 lo--;
7893 }
7894 /* 'lo' is strictly before, 'hi' is strictly after,
7895 * anything between needs to be discarded
7896 */
7897 if (hi - lo > 1) {
7898 memmove(p+lo+1, p+hi, (bb->count - hi) * 8);
7899 bb->count -= (hi - lo - 1);
7900 }
7901 }
7902
7903 bb->changed = 1;
7904out:
7905 write_sequnlock_irq(&bb->lock);
7906 return rv;
7907}
7908
7909int rdev_clear_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors)
7910{
7911 return md_clear_badblocks(&rdev->badblocks,
7912 s + rdev->data_offset,
7913 sectors);
7914}
7915EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
7916
7917/*
7918 * Acknowledge all bad blocks in a list.
7919 * This only succeeds if ->changed is clear. It is used by
7920 * in-kernel metadata updates
7921 */
7922void md_ack_all_badblocks(struct badblocks *bb)
7923{
7924 if (bb->page == NULL || bb->changed)
7925 /* no point even trying */
7926 return;
7927 write_seqlock_irq(&bb->lock);
7928
7929 if (bb->changed == 0) {
7930 u64 *p = bb->page;
7931 int i;
7932 for (i = 0; i < bb->count ; i++) {
7933 if (!BB_ACK(p[i])) {
7934 sector_t start = BB_OFFSET(p[i]);
7935 int len = BB_LEN(p[i]);
7936 p[i] = BB_MAKE(start, len, 1);
7937 }
7938 }
7939 bb->unacked_exist = 0;
7940 }
7941 write_sequnlock_irq(&bb->lock);
7942}
7943EXPORT_SYMBOL_GPL(md_ack_all_badblocks);
7944
7945/* sysfs access to bad-blocks list.
7946 * We present two files.
7947 * 'bad-blocks' lists sector numbers and lengths of ranges that
7948 * are recorded as bad. The list is truncated to fit within
7949 * the one-page limit of sysfs.
7950 * Writing "sector length" to this file adds an acknowledged
7951 * bad block list.
7952 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
7953 * been acknowledged. Writing to this file adds bad blocks
7954 * without acknowledging them. This is largely for testing.
7955 */
7956
7957static ssize_t
7958badblocks_show(struct badblocks *bb, char *page, int unack)
7959{
7960 size_t len;
7961 int i;
7962 u64 *p = bb->page;
7963 unsigned seq;
7964
7965 if (bb->shift < 0)
7966 return 0;
7967
7968retry:
7969 seq = read_seqbegin(&bb->lock);
7970
7971 len = 0;
7972 i = 0;
7973
7974 while (len < PAGE_SIZE && i < bb->count) {
7975 sector_t s = BB_OFFSET(p[i]);
7976 unsigned int length = BB_LEN(p[i]);
7977 int ack = BB_ACK(p[i]);
7978 i++;
7979
7980 if (unack && ack)
7981 continue;
7982
7983 len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n",
7984 (unsigned long long)s << bb->shift,
7985 length << bb->shift);
7986 }
7987 if (unack && len == 0)
7988 bb->unacked_exist = 0;
7989
7990 if (read_seqretry(&bb->lock, seq))
7991 goto retry;
7992
7993 return len;
7994}
7995
7996#define DO_DEBUG 1
7997
7998static ssize_t
7999badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack)
8000{
8001 unsigned long long sector;
8002 int length;
8003 char newline;
8004#ifdef DO_DEBUG
8005 /* Allow clearing via sysfs *only* for testing/debugging.
8006 * Normally only a successful write may clear a badblock
8007 */
8008 int clear = 0;
8009 if (page[0] == '-') {
8010 clear = 1;
8011 page++;
8012 }
8013#endif /* DO_DEBUG */
8014
8015 switch (sscanf(page, "%llu %d%c", &sector, &length, &newline)) {
8016 case 3:
8017 if (newline != '\n')
8018 return -EINVAL;
8019 case 2:
8020 if (length <= 0)
8021 return -EINVAL;
8022 break;
8023 default:
8024 return -EINVAL;
8025 }
8026
8027#ifdef DO_DEBUG
8028 if (clear) {
8029 md_clear_badblocks(bb, sector, length);
8030 return len;
8031 }
8032#endif /* DO_DEBUG */
8033 if (md_set_badblocks(bb, sector, length, !unack))
8034 return len;
8035 else
8036 return -ENOSPC;
8037}
8038
7340static int md_notify_reboot(struct notifier_block *this, 8039static int md_notify_reboot(struct notifier_block *this,
7341 unsigned long code, void *x) 8040 unsigned long code, void *x)
7342{ 8041{
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 1c26c7a08ae6..1e586bb4452e 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -29,6 +29,13 @@
29typedef struct mddev_s mddev_t; 29typedef struct mddev_s mddev_t;
30typedef struct mdk_rdev_s mdk_rdev_t; 30typedef struct mdk_rdev_s mdk_rdev_t;
31 31
32/* Bad block numbers are stored sorted in a single page.
33 * 64bits is used for each block or extent.
34 * 54 bits are sector number, 9 bits are extent size,
35 * 1 bit is an 'acknowledged' flag.
36 */
37#define MD_MAX_BADBLOCKS (PAGE_SIZE/8)
38
32/* 39/*
33 * MD's 'extended' device 40 * MD's 'extended' device
34 */ 41 */
@@ -48,7 +55,7 @@ struct mdk_rdev_s
48 struct block_device *meta_bdev; 55 struct block_device *meta_bdev;
49 struct block_device *bdev; /* block device handle */ 56 struct block_device *bdev; /* block device handle */
50 57
51 struct page *sb_page; 58 struct page *sb_page, *bb_page;
52 int sb_loaded; 59 int sb_loaded;
53 __u64 sb_events; 60 __u64 sb_events;
54 sector_t data_offset; /* start of data in array */ 61 sector_t data_offset; /* start of data in array */
@@ -74,9 +81,29 @@ struct mdk_rdev_s
74#define In_sync 2 /* device is in_sync with rest of array */ 81#define In_sync 2 /* device is in_sync with rest of array */
75#define WriteMostly 4 /* Avoid reading if at all possible */ 82#define WriteMostly 4 /* Avoid reading if at all possible */
76#define AutoDetected 7 /* added by auto-detect */ 83#define AutoDetected 7 /* added by auto-detect */
77#define Blocked 8 /* An error occurred on an externally 84#define Blocked 8 /* An error occurred but has not yet
78 * managed array, don't allow writes 85 * been acknowledged by the metadata
86 * handler, so don't allow writes
79 * until it is cleared */ 87 * until it is cleared */
88#define WriteErrorSeen 9 /* A write error has been seen on this
89 * device
90 */
91#define FaultRecorded 10 /* Intermediate state for clearing
92 * Blocked. The Fault is/will-be
93 * recorded in the metadata, but that
94 * metadata hasn't been stored safely
95 * on disk yet.
96 */
97#define BlockedBadBlocks 11 /* A writer is blocked because they
98 * found an unacknowledged bad-block.
99 * This can safely be cleared at any
100 * time, and the writer will re-check.
101 * It may be set at any time, and at
102 * worst the writer will timeout and
103 * re-check. So setting it as
104 * accurately as possible is good, but
105 * not absolutely critical.
106 */
80 wait_queue_head_t blocked_wait; 107 wait_queue_head_t blocked_wait;
81 108
82 int desc_nr; /* descriptor index in the superblock */ 109 int desc_nr; /* descriptor index in the superblock */
@@ -111,8 +138,54 @@ struct mdk_rdev_s
111 138
112 struct sysfs_dirent *sysfs_state; /* handle for 'state' 139 struct sysfs_dirent *sysfs_state; /* handle for 'state'
113 * sysfs entry */ 140 * sysfs entry */
141
142 struct badblocks {
143 int count; /* count of bad blocks */
144 int unacked_exist; /* there probably are unacknowledged
145 * bad blocks. This is only cleared
146 * when a read discovers none
147 */
148 int shift; /* shift from sectors to block size
149 * a -ve shift means badblocks are
150 * disabled.*/
151 u64 *page; /* badblock list */
152 int changed;
153 seqlock_t lock;
154
155 sector_t sector;
156 sector_t size; /* in sectors */
157 } badblocks;
114}; 158};
115 159
160#define BB_LEN_MASK (0x00000000000001FFULL)
161#define BB_OFFSET_MASK (0x7FFFFFFFFFFFFE00ULL)
162#define BB_ACK_MASK (0x8000000000000000ULL)
163#define BB_MAX_LEN 512
164#define BB_OFFSET(x) (((x) & BB_OFFSET_MASK) >> 9)
165#define BB_LEN(x) (((x) & BB_LEN_MASK) + 1)
166#define BB_ACK(x) (!!((x) & BB_ACK_MASK))
167#define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63))
168
169extern int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
170 sector_t *first_bad, int *bad_sectors);
171static inline int is_badblock(mdk_rdev_t *rdev, sector_t s, int sectors,
172 sector_t *first_bad, int *bad_sectors)
173{
174 if (unlikely(rdev->badblocks.count)) {
175 int rv = md_is_badblock(&rdev->badblocks, rdev->data_offset + s,
176 sectors,
177 first_bad, bad_sectors);
178 if (rv)
179 *first_bad -= rdev->data_offset;
180 return rv;
181 }
182 return 0;
183}
184extern int rdev_set_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors,
185 int acknowledged);
186extern int rdev_clear_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors);
187extern void md_ack_all_badblocks(struct badblocks *bb);
188
116struct mddev_s 189struct mddev_s
117{ 190{
118 void *private; 191 void *private;
@@ -239,9 +312,12 @@ struct mddev_s
239#define MD_RECOVERY_FROZEN 9 312#define MD_RECOVERY_FROZEN 9
240 313
241 unsigned long recovery; 314 unsigned long recovery;
242 int recovery_disabled; /* if we detect that recovery 315 /* If a RAID personality determines that recovery (of a particular
243 * will always fail, set this 316 * device) will fail due to a read error on the source device, it
244 * so we don't loop trying */ 317 * takes a copy of this number and does not attempt recovery again
318 * until this number changes.
319 */
320 int recovery_disabled;
245 321
246 int in_sync; /* know to not need resync */ 322 int in_sync; /* know to not need resync */
247 /* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so 323 /* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
@@ -304,11 +380,6 @@ struct mddev_s
304 * hot-adding a bitmap. It should 380 * hot-adding a bitmap. It should
305 * eventually be settable by sysfs. 381 * eventually be settable by sysfs.
306 */ 382 */
307 /* When md is serving under dm, it might use a
308 * dirty_log to store the bits.
309 */
310 struct dm_dirty_log *log;
311
312 struct mutex mutex; 383 struct mutex mutex;
313 unsigned long chunksize; 384 unsigned long chunksize;
314 unsigned long daemon_sleep; /* how many jiffies between updates? */ 385 unsigned long daemon_sleep; /* how many jiffies between updates? */
@@ -413,6 +484,20 @@ static inline char * mdname (mddev_t * mddev)
413 return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; 484 return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
414} 485}
415 486
487static inline int sysfs_link_rdev(mddev_t *mddev, mdk_rdev_t *rdev)
488{
489 char nm[20];
490 sprintf(nm, "rd%d", rdev->raid_disk);
491 return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
492}
493
494static inline void sysfs_unlink_rdev(mddev_t *mddev, mdk_rdev_t *rdev)
495{
496 char nm[20];
497 sprintf(nm, "rd%d", rdev->raid_disk);
498 sysfs_remove_link(&mddev->kobj, nm);
499}
500
416/* 501/*
417 * iterates through some rdev ringlist. It's safe to remove the 502 * iterates through some rdev ringlist. It's safe to remove the
418 * current 'rdev'. Dont touch 'tmp' though. 503 * current 'rdev'. Dont touch 'tmp' though.
@@ -505,7 +590,7 @@ extern void mddev_init(mddev_t *mddev);
505extern int md_run(mddev_t *mddev); 590extern int md_run(mddev_t *mddev);
506extern void md_stop(mddev_t *mddev); 591extern void md_stop(mddev_t *mddev);
507extern void md_stop_writes(mddev_t *mddev); 592extern void md_stop_writes(mddev_t *mddev);
508extern void md_rdev_init(mdk_rdev_t *rdev); 593extern int md_rdev_init(mdk_rdev_t *rdev);
509 594
510extern void mddev_suspend(mddev_t *mddev); 595extern void mddev_suspend(mddev_t *mddev);
511extern void mddev_resume(mddev_t *mddev); 596extern void mddev_resume(mddev_t *mddev);
@@ -514,4 +599,5 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
514extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 599extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
515 mddev_t *mddev); 600 mddev_t *mddev);
516extern int mddev_check_plugged(mddev_t *mddev); 601extern int mddev_check_plugged(mddev_t *mddev);
602extern void md_trim_bio(struct bio *bio, int offset, int size);
517#endif /* _MD_MD_H */ 603#endif /* _MD_MD_H */
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f7431b6d8447..32323f0afd89 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -35,16 +35,13 @@
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/blkdev.h> 36#include <linux/blkdev.h>
37#include <linux/seq_file.h> 37#include <linux/seq_file.h>
38#include <linux/ratelimit.h>
38#include "md.h" 39#include "md.h"
39#include "raid1.h" 40#include "raid1.h"
40#include "bitmap.h" 41#include "bitmap.h"
41 42
42#define DEBUG 0 43#define DEBUG 0
43#if DEBUG 44#define PRINTK(x...) do { if (DEBUG) printk(x); } while (0)
44#define PRINTK(x...) printk(x)
45#else
46#define PRINTK(x...)
47#endif
48 45
49/* 46/*
50 * Number of guaranteed r1bios in case of extreme VM load: 47 * Number of guaranteed r1bios in case of extreme VM load:
@@ -166,7 +163,7 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
166 163
167 for (i = 0; i < conf->raid_disks; i++) { 164 for (i = 0; i < conf->raid_disks; i++) {
168 struct bio **bio = r1_bio->bios + i; 165 struct bio **bio = r1_bio->bios + i;
169 if (*bio && *bio != IO_BLOCKED) 166 if (!BIO_SPECIAL(*bio))
170 bio_put(*bio); 167 bio_put(*bio);
171 *bio = NULL; 168 *bio = NULL;
172 } 169 }
@@ -176,12 +173,6 @@ static void free_r1bio(r1bio_t *r1_bio)
176{ 173{
177 conf_t *conf = r1_bio->mddev->private; 174 conf_t *conf = r1_bio->mddev->private;
178 175
179 /*
180 * Wake up any possible resync thread that waits for the device
181 * to go idle.
182 */
183 allow_barrier(conf);
184
185 put_all_bios(conf, r1_bio); 176 put_all_bios(conf, r1_bio);
186 mempool_free(r1_bio, conf->r1bio_pool); 177 mempool_free(r1_bio, conf->r1bio_pool);
187} 178}
@@ -222,6 +213,33 @@ static void reschedule_retry(r1bio_t *r1_bio)
222 * operation and are ready to return a success/failure code to the buffer 213 * operation and are ready to return a success/failure code to the buffer
223 * cache layer. 214 * cache layer.
224 */ 215 */
216static void call_bio_endio(r1bio_t *r1_bio)
217{
218 struct bio *bio = r1_bio->master_bio;
219 int done;
220 conf_t *conf = r1_bio->mddev->private;
221
222 if (bio->bi_phys_segments) {
223 unsigned long flags;
224 spin_lock_irqsave(&conf->device_lock, flags);
225 bio->bi_phys_segments--;
226 done = (bio->bi_phys_segments == 0);
227 spin_unlock_irqrestore(&conf->device_lock, flags);
228 } else
229 done = 1;
230
231 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
232 clear_bit(BIO_UPTODATE, &bio->bi_flags);
233 if (done) {
234 bio_endio(bio, 0);
235 /*
236 * Wake up any possible resync thread that waits for the device
237 * to go idle.
238 */
239 allow_barrier(conf);
240 }
241}
242
225static void raid_end_bio_io(r1bio_t *r1_bio) 243static void raid_end_bio_io(r1bio_t *r1_bio)
226{ 244{
227 struct bio *bio = r1_bio->master_bio; 245 struct bio *bio = r1_bio->master_bio;
@@ -234,8 +252,7 @@ static void raid_end_bio_io(r1bio_t *r1_bio)
234 (unsigned long long) bio->bi_sector + 252 (unsigned long long) bio->bi_sector +
235 (bio->bi_size >> 9) - 1); 253 (bio->bi_size >> 9) - 1);
236 254
237 bio_endio(bio, 255 call_bio_endio(r1_bio);
238 test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO);
239 } 256 }
240 free_r1bio(r1_bio); 257 free_r1bio(r1_bio);
241} 258}
@@ -287,36 +304,52 @@ static void raid1_end_read_request(struct bio *bio, int error)
287 * oops, read error: 304 * oops, read error:
288 */ 305 */
289 char b[BDEVNAME_SIZE]; 306 char b[BDEVNAME_SIZE];
290 if (printk_ratelimit()) 307 printk_ratelimited(
291 printk(KERN_ERR "md/raid1:%s: %s: rescheduling sector %llu\n", 308 KERN_ERR "md/raid1:%s: %s: "
292 mdname(conf->mddev), 309 "rescheduling sector %llu\n",
293 bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector); 310 mdname(conf->mddev),
311 bdevname(conf->mirrors[mirror].rdev->bdev,
312 b),
313 (unsigned long long)r1_bio->sector);
314 set_bit(R1BIO_ReadError, &r1_bio->state);
294 reschedule_retry(r1_bio); 315 reschedule_retry(r1_bio);
295 } 316 }
296 317
297 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); 318 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
298} 319}
299 320
321static void close_write(r1bio_t *r1_bio)
322{
323 /* it really is the end of this request */
324 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
325 /* free extra copy of the data pages */
326 int i = r1_bio->behind_page_count;
327 while (i--)
328 safe_put_page(r1_bio->behind_bvecs[i].bv_page);
329 kfree(r1_bio->behind_bvecs);
330 r1_bio->behind_bvecs = NULL;
331 }
332 /* clear the bitmap if all writes complete successfully */
333 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
334 r1_bio->sectors,
335 !test_bit(R1BIO_Degraded, &r1_bio->state),
336 test_bit(R1BIO_BehindIO, &r1_bio->state));
337 md_write_end(r1_bio->mddev);
338}
339
300static void r1_bio_write_done(r1bio_t *r1_bio) 340static void r1_bio_write_done(r1bio_t *r1_bio)
301{ 341{
302 if (atomic_dec_and_test(&r1_bio->remaining)) 342 if (!atomic_dec_and_test(&r1_bio->remaining))
303 { 343 return;
304 /* it really is the end of this request */ 344
305 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { 345 if (test_bit(R1BIO_WriteError, &r1_bio->state))
306 /* free extra copy of the data pages */ 346 reschedule_retry(r1_bio);
307 int i = r1_bio->behind_page_count; 347 else {
308 while (i--) 348 close_write(r1_bio);
309 safe_put_page(r1_bio->behind_pages[i]); 349 if (test_bit(R1BIO_MadeGood, &r1_bio->state))
310 kfree(r1_bio->behind_pages); 350 reschedule_retry(r1_bio);
311 r1_bio->behind_pages = NULL; 351 else
312 } 352 raid_end_bio_io(r1_bio);
313 /* clear the bitmap if all writes complete successfully */
314 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
315 r1_bio->sectors,
316 !test_bit(R1BIO_Degraded, &r1_bio->state),
317 test_bit(R1BIO_BehindIO, &r1_bio->state));
318 md_write_end(r1_bio->mddev);
319 raid_end_bio_io(r1_bio);
320 } 353 }
321} 354}
322 355
@@ -336,13 +369,11 @@ static void raid1_end_write_request(struct bio *bio, int error)
336 /* 369 /*
337 * 'one mirror IO has finished' event handler: 370 * 'one mirror IO has finished' event handler:
338 */ 371 */
339 r1_bio->bios[mirror] = NULL;
340 to_put = bio;
341 if (!uptodate) { 372 if (!uptodate) {
342 md_error(r1_bio->mddev, conf->mirrors[mirror].rdev); 373 set_bit(WriteErrorSeen,
343 /* an I/O failed, we can't clear the bitmap */ 374 &conf->mirrors[mirror].rdev->flags);
344 set_bit(R1BIO_Degraded, &r1_bio->state); 375 set_bit(R1BIO_WriteError, &r1_bio->state);
345 } else 376 } else {
346 /* 377 /*
347 * Set R1BIO_Uptodate in our master bio, so that we 378 * Set R1BIO_Uptodate in our master bio, so that we
348 * will return a good error code for to the higher 379 * will return a good error code for to the higher
@@ -353,8 +384,22 @@ static void raid1_end_write_request(struct bio *bio, int error)
353 * to user-side. So if something waits for IO, then it 384 * to user-side. So if something waits for IO, then it
354 * will wait for the 'master' bio. 385 * will wait for the 'master' bio.
355 */ 386 */
387 sector_t first_bad;
388 int bad_sectors;
389
390 r1_bio->bios[mirror] = NULL;
391 to_put = bio;
356 set_bit(R1BIO_Uptodate, &r1_bio->state); 392 set_bit(R1BIO_Uptodate, &r1_bio->state);
357 393
394 /* Maybe we can clear some bad blocks. */
395 if (is_badblock(conf->mirrors[mirror].rdev,
396 r1_bio->sector, r1_bio->sectors,
397 &first_bad, &bad_sectors)) {
398 r1_bio->bios[mirror] = IO_MADE_GOOD;
399 set_bit(R1BIO_MadeGood, &r1_bio->state);
400 }
401 }
402
358 update_head_pos(mirror, r1_bio); 403 update_head_pos(mirror, r1_bio);
359 404
360 if (behind) { 405 if (behind) {
@@ -377,11 +422,13 @@ static void raid1_end_write_request(struct bio *bio, int error)
377 (unsigned long long) mbio->bi_sector, 422 (unsigned long long) mbio->bi_sector,
378 (unsigned long long) mbio->bi_sector + 423 (unsigned long long) mbio->bi_sector +
379 (mbio->bi_size >> 9) - 1); 424 (mbio->bi_size >> 9) - 1);
380 bio_endio(mbio, 0); 425 call_bio_endio(r1_bio);
381 } 426 }
382 } 427 }
383 } 428 }
384 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); 429 if (r1_bio->bios[mirror] == NULL)
430 rdev_dec_pending(conf->mirrors[mirror].rdev,
431 conf->mddev);
385 432
386 /* 433 /*
387 * Let's see if all mirrored write operations have finished 434 * Let's see if all mirrored write operations have finished
@@ -408,10 +455,11 @@ static void raid1_end_write_request(struct bio *bio, int error)
408 * 455 *
409 * The rdev for the device selected will have nr_pending incremented. 456 * The rdev for the device selected will have nr_pending incremented.
410 */ 457 */
411static int read_balance(conf_t *conf, r1bio_t *r1_bio) 458static int read_balance(conf_t *conf, r1bio_t *r1_bio, int *max_sectors)
412{ 459{
413 const sector_t this_sector = r1_bio->sector; 460 const sector_t this_sector = r1_bio->sector;
414 const int sectors = r1_bio->sectors; 461 int sectors;
462 int best_good_sectors;
415 int start_disk; 463 int start_disk;
416 int best_disk; 464 int best_disk;
417 int i; 465 int i;
@@ -426,8 +474,11 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
426 * We take the first readable disk when above the resync window. 474 * We take the first readable disk when above the resync window.
427 */ 475 */
428 retry: 476 retry:
477 sectors = r1_bio->sectors;
429 best_disk = -1; 478 best_disk = -1;
430 best_dist = MaxSector; 479 best_dist = MaxSector;
480 best_good_sectors = 0;
481
431 if (conf->mddev->recovery_cp < MaxSector && 482 if (conf->mddev->recovery_cp < MaxSector &&
432 (this_sector + sectors >= conf->next_resync)) { 483 (this_sector + sectors >= conf->next_resync)) {
433 choose_first = 1; 484 choose_first = 1;
@@ -439,6 +490,9 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
439 490
440 for (i = 0 ; i < conf->raid_disks ; i++) { 491 for (i = 0 ; i < conf->raid_disks ; i++) {
441 sector_t dist; 492 sector_t dist;
493 sector_t first_bad;
494 int bad_sectors;
495
442 int disk = start_disk + i; 496 int disk = start_disk + i;
443 if (disk >= conf->raid_disks) 497 if (disk >= conf->raid_disks)
444 disk -= conf->raid_disks; 498 disk -= conf->raid_disks;
@@ -461,6 +515,35 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
461 /* This is a reasonable device to use. It might 515 /* This is a reasonable device to use. It might
462 * even be best. 516 * even be best.
463 */ 517 */
518 if (is_badblock(rdev, this_sector, sectors,
519 &first_bad, &bad_sectors)) {
520 if (best_dist < MaxSector)
521 /* already have a better device */
522 continue;
523 if (first_bad <= this_sector) {
524 /* cannot read here. If this is the 'primary'
525 * device, then we must not read beyond
526 * bad_sectors from another device..
527 */
528 bad_sectors -= (this_sector - first_bad);
529 if (choose_first && sectors > bad_sectors)
530 sectors = bad_sectors;
531 if (best_good_sectors > sectors)
532 best_good_sectors = sectors;
533
534 } else {
535 sector_t good_sectors = first_bad - this_sector;
536 if (good_sectors > best_good_sectors) {
537 best_good_sectors = good_sectors;
538 best_disk = disk;
539 }
540 if (choose_first)
541 break;
542 }
543 continue;
544 } else
545 best_good_sectors = sectors;
546
464 dist = abs(this_sector - conf->mirrors[disk].head_position); 547 dist = abs(this_sector - conf->mirrors[disk].head_position);
465 if (choose_first 548 if (choose_first
466 /* Don't change to another disk for sequential reads */ 549 /* Don't change to another disk for sequential reads */
@@ -489,10 +572,12 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
489 rdev_dec_pending(rdev, conf->mddev); 572 rdev_dec_pending(rdev, conf->mddev);
490 goto retry; 573 goto retry;
491 } 574 }
575 sectors = best_good_sectors;
492 conf->next_seq_sect = this_sector + sectors; 576 conf->next_seq_sect = this_sector + sectors;
493 conf->last_used = best_disk; 577 conf->last_used = best_disk;
494 } 578 }
495 rcu_read_unlock(); 579 rcu_read_unlock();
580 *max_sectors = sectors;
496 581
497 return best_disk; 582 return best_disk;
498} 583}
@@ -672,30 +757,31 @@ static void alloc_behind_pages(struct bio *bio, r1bio_t *r1_bio)
672{ 757{
673 int i; 758 int i;
674 struct bio_vec *bvec; 759 struct bio_vec *bvec;
675 struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page*), 760 struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
676 GFP_NOIO); 761 GFP_NOIO);
677 if (unlikely(!pages)) 762 if (unlikely(!bvecs))
678 return; 763 return;
679 764
680 bio_for_each_segment(bvec, bio, i) { 765 bio_for_each_segment(bvec, bio, i) {
681 pages[i] = alloc_page(GFP_NOIO); 766 bvecs[i] = *bvec;
682 if (unlikely(!pages[i])) 767 bvecs[i].bv_page = alloc_page(GFP_NOIO);
768 if (unlikely(!bvecs[i].bv_page))
683 goto do_sync_io; 769 goto do_sync_io;
684 memcpy(kmap(pages[i]) + bvec->bv_offset, 770 memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset,
685 kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len); 771 kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
686 kunmap(pages[i]); 772 kunmap(bvecs[i].bv_page);
687 kunmap(bvec->bv_page); 773 kunmap(bvec->bv_page);
688 } 774 }
689 r1_bio->behind_pages = pages; 775 r1_bio->behind_bvecs = bvecs;
690 r1_bio->behind_page_count = bio->bi_vcnt; 776 r1_bio->behind_page_count = bio->bi_vcnt;
691 set_bit(R1BIO_BehindIO, &r1_bio->state); 777 set_bit(R1BIO_BehindIO, &r1_bio->state);
692 return; 778 return;
693 779
694do_sync_io: 780do_sync_io:
695 for (i = 0; i < bio->bi_vcnt; i++) 781 for (i = 0; i < bio->bi_vcnt; i++)
696 if (pages[i]) 782 if (bvecs[i].bv_page)
697 put_page(pages[i]); 783 put_page(bvecs[i].bv_page);
698 kfree(pages); 784 kfree(bvecs);
699 PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); 785 PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
700} 786}
701 787
@@ -705,7 +791,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
705 mirror_info_t *mirror; 791 mirror_info_t *mirror;
706 r1bio_t *r1_bio; 792 r1bio_t *r1_bio;
707 struct bio *read_bio; 793 struct bio *read_bio;
708 int i, targets = 0, disks; 794 int i, disks;
709 struct bitmap *bitmap; 795 struct bitmap *bitmap;
710 unsigned long flags; 796 unsigned long flags;
711 const int rw = bio_data_dir(bio); 797 const int rw = bio_data_dir(bio);
@@ -713,6 +799,9 @@ static int make_request(mddev_t *mddev, struct bio * bio)
713 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); 799 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
714 mdk_rdev_t *blocked_rdev; 800 mdk_rdev_t *blocked_rdev;
715 int plugged; 801 int plugged;
802 int first_clone;
803 int sectors_handled;
804 int max_sectors;
716 805
717 /* 806 /*
718 * Register the new request and wait if the reconstruction 807 * Register the new request and wait if the reconstruction
@@ -759,11 +848,24 @@ static int make_request(mddev_t *mddev, struct bio * bio)
759 r1_bio->mddev = mddev; 848 r1_bio->mddev = mddev;
760 r1_bio->sector = bio->bi_sector; 849 r1_bio->sector = bio->bi_sector;
761 850
851 /* We might need to issue multiple reads to different
852 * devices if there are bad blocks around, so we keep
853 * track of the number of reads in bio->bi_phys_segments.
854 * If this is 0, there is only one r1_bio and no locking
855 * will be needed when requests complete. If it is
856 * non-zero, then it is the number of not-completed requests.
857 */
858 bio->bi_phys_segments = 0;
859 clear_bit(BIO_SEG_VALID, &bio->bi_flags);
860
762 if (rw == READ) { 861 if (rw == READ) {
763 /* 862 /*
764 * read balancing logic: 863 * read balancing logic:
765 */ 864 */
766 int rdisk = read_balance(conf, r1_bio); 865 int rdisk;
866
867read_again:
868 rdisk = read_balance(conf, r1_bio, &max_sectors);
767 869
768 if (rdisk < 0) { 870 if (rdisk < 0) {
769 /* couldn't find anywhere to read from */ 871 /* couldn't find anywhere to read from */
@@ -784,6 +886,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
784 r1_bio->read_disk = rdisk; 886 r1_bio->read_disk = rdisk;
785 887
786 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); 888 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
889 md_trim_bio(read_bio, r1_bio->sector - bio->bi_sector,
890 max_sectors);
787 891
788 r1_bio->bios[rdisk] = read_bio; 892 r1_bio->bios[rdisk] = read_bio;
789 893
@@ -793,16 +897,52 @@ static int make_request(mddev_t *mddev, struct bio * bio)
793 read_bio->bi_rw = READ | do_sync; 897 read_bio->bi_rw = READ | do_sync;
794 read_bio->bi_private = r1_bio; 898 read_bio->bi_private = r1_bio;
795 899
796 generic_make_request(read_bio); 900 if (max_sectors < r1_bio->sectors) {
901 /* could not read all from this device, so we will
902 * need another r1_bio.
903 */
904
905 sectors_handled = (r1_bio->sector + max_sectors
906 - bio->bi_sector);
907 r1_bio->sectors = max_sectors;
908 spin_lock_irq(&conf->device_lock);
909 if (bio->bi_phys_segments == 0)
910 bio->bi_phys_segments = 2;
911 else
912 bio->bi_phys_segments++;
913 spin_unlock_irq(&conf->device_lock);
914 /* Cannot call generic_make_request directly
915 * as that will be queued in __make_request
916 * and subsequent mempool_alloc might block waiting
917 * for it. So hand bio over to raid1d.
918 */
919 reschedule_retry(r1_bio);
920
921 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
922
923 r1_bio->master_bio = bio;
924 r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
925 r1_bio->state = 0;
926 r1_bio->mddev = mddev;
927 r1_bio->sector = bio->bi_sector + sectors_handled;
928 goto read_again;
929 } else
930 generic_make_request(read_bio);
797 return 0; 931 return 0;
798 } 932 }
799 933
800 /* 934 /*
801 * WRITE: 935 * WRITE:
802 */ 936 */
803 /* first select target devices under spinlock and 937 /* first select target devices under rcu_lock and
804 * inc refcount on their rdev. Record them by setting 938 * inc refcount on their rdev. Record them by setting
805 * bios[x] to bio 939 * bios[x] to bio
940 * If there are known/acknowledged bad blocks on any device on
941 * which we have seen a write error, we want to avoid writing those
942 * blocks.
943 * This potentially requires several writes to write around
944 * the bad blocks. Each set of writes gets it's own r1bio
945 * with a set of bios attached.
806 */ 946 */
807 plugged = mddev_check_plugged(mddev); 947 plugged = mddev_check_plugged(mddev);
808 948
@@ -810,6 +950,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
810 retry_write: 950 retry_write:
811 blocked_rdev = NULL; 951 blocked_rdev = NULL;
812 rcu_read_lock(); 952 rcu_read_lock();
953 max_sectors = r1_bio->sectors;
813 for (i = 0; i < disks; i++) { 954 for (i = 0; i < disks; i++) {
814 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 955 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
815 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 956 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
@@ -817,17 +958,56 @@ static int make_request(mddev_t *mddev, struct bio * bio)
817 blocked_rdev = rdev; 958 blocked_rdev = rdev;
818 break; 959 break;
819 } 960 }
820 if (rdev && !test_bit(Faulty, &rdev->flags)) { 961 r1_bio->bios[i] = NULL;
821 atomic_inc(&rdev->nr_pending); 962 if (!rdev || test_bit(Faulty, &rdev->flags)) {
822 if (test_bit(Faulty, &rdev->flags)) { 963 set_bit(R1BIO_Degraded, &r1_bio->state);
964 continue;
965 }
966
967 atomic_inc(&rdev->nr_pending);
968 if (test_bit(WriteErrorSeen, &rdev->flags)) {
969 sector_t first_bad;
970 int bad_sectors;
971 int is_bad;
972
973 is_bad = is_badblock(rdev, r1_bio->sector,
974 max_sectors,
975 &first_bad, &bad_sectors);
976 if (is_bad < 0) {
977 /* mustn't write here until the bad block is
978 * acknowledged*/
979 set_bit(BlockedBadBlocks, &rdev->flags);
980 blocked_rdev = rdev;
981 break;
982 }
983 if (is_bad && first_bad <= r1_bio->sector) {
984 /* Cannot write here at all */
985 bad_sectors -= (r1_bio->sector - first_bad);
986 if (bad_sectors < max_sectors)
987 /* mustn't write more than bad_sectors
988 * to other devices yet
989 */
990 max_sectors = bad_sectors;
823 rdev_dec_pending(rdev, mddev); 991 rdev_dec_pending(rdev, mddev);
824 r1_bio->bios[i] = NULL; 992 /* We don't set R1BIO_Degraded as that
825 } else { 993 * only applies if the disk is
826 r1_bio->bios[i] = bio; 994 * missing, so it might be re-added,
827 targets++; 995 * and we want to know to recover this
996 * chunk.
997 * In this case the device is here,
998 * and the fact that this chunk is not
999 * in-sync is recorded in the bad
1000 * block log
1001 */
1002 continue;
828 } 1003 }
829 } else 1004 if (is_bad) {
830 r1_bio->bios[i] = NULL; 1005 int good_sectors = first_bad - r1_bio->sector;
1006 if (good_sectors < max_sectors)
1007 max_sectors = good_sectors;
1008 }
1009 }
1010 r1_bio->bios[i] = bio;
831 } 1011 }
832 rcu_read_unlock(); 1012 rcu_read_unlock();
833 1013
@@ -838,51 +1018,57 @@ static int make_request(mddev_t *mddev, struct bio * bio)
838 for (j = 0; j < i; j++) 1018 for (j = 0; j < i; j++)
839 if (r1_bio->bios[j]) 1019 if (r1_bio->bios[j])
840 rdev_dec_pending(conf->mirrors[j].rdev, mddev); 1020 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
841 1021 r1_bio->state = 0;
842 allow_barrier(conf); 1022 allow_barrier(conf);
843 md_wait_for_blocked_rdev(blocked_rdev, mddev); 1023 md_wait_for_blocked_rdev(blocked_rdev, mddev);
844 wait_barrier(conf); 1024 wait_barrier(conf);
845 goto retry_write; 1025 goto retry_write;
846 } 1026 }
847 1027
848 BUG_ON(targets == 0); /* we never fail the last device */ 1028 if (max_sectors < r1_bio->sectors) {
849 1029 /* We are splitting this write into multiple parts, so
850 if (targets < conf->raid_disks) { 1030 * we need to prepare for allocating another r1_bio.
851 /* array is degraded, we will not clear the bitmap 1031 */
852 * on I/O completion (see raid1_end_write_request) */ 1032 r1_bio->sectors = max_sectors;
853 set_bit(R1BIO_Degraded, &r1_bio->state); 1033 spin_lock_irq(&conf->device_lock);
1034 if (bio->bi_phys_segments == 0)
1035 bio->bi_phys_segments = 2;
1036 else
1037 bio->bi_phys_segments++;
1038 spin_unlock_irq(&conf->device_lock);
854 } 1039 }
855 1040 sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector;
856 /* do behind I/O ?
857 * Not if there are too many, or cannot allocate memory,
858 * or a reader on WriteMostly is waiting for behind writes
859 * to flush */
860 if (bitmap &&
861 (atomic_read(&bitmap->behind_writes)
862 < mddev->bitmap_info.max_write_behind) &&
863 !waitqueue_active(&bitmap->behind_wait))
864 alloc_behind_pages(bio, r1_bio);
865 1041
866 atomic_set(&r1_bio->remaining, 1); 1042 atomic_set(&r1_bio->remaining, 1);
867 atomic_set(&r1_bio->behind_remaining, 0); 1043 atomic_set(&r1_bio->behind_remaining, 0);
868 1044
869 bitmap_startwrite(bitmap, bio->bi_sector, r1_bio->sectors, 1045 first_clone = 1;
870 test_bit(R1BIO_BehindIO, &r1_bio->state));
871 for (i = 0; i < disks; i++) { 1046 for (i = 0; i < disks; i++) {
872 struct bio *mbio; 1047 struct bio *mbio;
873 if (!r1_bio->bios[i]) 1048 if (!r1_bio->bios[i])
874 continue; 1049 continue;
875 1050
876 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1051 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
877 r1_bio->bios[i] = mbio; 1052 md_trim_bio(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
878 1053
879 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; 1054 if (first_clone) {
880 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 1055 /* do behind I/O ?
881 mbio->bi_end_io = raid1_end_write_request; 1056 * Not if there are too many, or cannot
882 mbio->bi_rw = WRITE | do_flush_fua | do_sync; 1057 * allocate memory, or a reader on WriteMostly
883 mbio->bi_private = r1_bio; 1058 * is waiting for behind writes to flush */
884 1059 if (bitmap &&
885 if (r1_bio->behind_pages) { 1060 (atomic_read(&bitmap->behind_writes)
1061 < mddev->bitmap_info.max_write_behind) &&
1062 !waitqueue_active(&bitmap->behind_wait))
1063 alloc_behind_pages(mbio, r1_bio);
1064
1065 bitmap_startwrite(bitmap, r1_bio->sector,
1066 r1_bio->sectors,
1067 test_bit(R1BIO_BehindIO,
1068 &r1_bio->state));
1069 first_clone = 0;
1070 }
1071 if (r1_bio->behind_bvecs) {
886 struct bio_vec *bvec; 1072 struct bio_vec *bvec;
887 int j; 1073 int j;
888 1074
@@ -894,11 +1080,20 @@ static int make_request(mddev_t *mddev, struct bio * bio)
894 * them all 1080 * them all
895 */ 1081 */
896 __bio_for_each_segment(bvec, mbio, j, 0) 1082 __bio_for_each_segment(bvec, mbio, j, 0)
897 bvec->bv_page = r1_bio->behind_pages[j]; 1083 bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
898 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags)) 1084 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
899 atomic_inc(&r1_bio->behind_remaining); 1085 atomic_inc(&r1_bio->behind_remaining);
900 } 1086 }
901 1087
1088 r1_bio->bios[i] = mbio;
1089
1090 mbio->bi_sector = (r1_bio->sector +
1091 conf->mirrors[i].rdev->data_offset);
1092 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1093 mbio->bi_end_io = raid1_end_write_request;
1094 mbio->bi_rw = WRITE | do_flush_fua | do_sync;
1095 mbio->bi_private = r1_bio;
1096
902 atomic_inc(&r1_bio->remaining); 1097 atomic_inc(&r1_bio->remaining);
903 spin_lock_irqsave(&conf->device_lock, flags); 1098 spin_lock_irqsave(&conf->device_lock, flags);
904 bio_list_add(&conf->pending_bio_list, mbio); 1099 bio_list_add(&conf->pending_bio_list, mbio);
@@ -909,6 +1104,19 @@ static int make_request(mddev_t *mddev, struct bio * bio)
909 /* In case raid1d snuck in to freeze_array */ 1104 /* In case raid1d snuck in to freeze_array */
910 wake_up(&conf->wait_barrier); 1105 wake_up(&conf->wait_barrier);
911 1106
1107 if (sectors_handled < (bio->bi_size >> 9)) {
1108 /* We need another r1_bio. It has already been counted
1109 * in bio->bi_phys_segments
1110 */
1111 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1112 r1_bio->master_bio = bio;
1113 r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
1114 r1_bio->state = 0;
1115 r1_bio->mddev = mddev;
1116 r1_bio->sector = bio->bi_sector + sectors_handled;
1117 goto retry_write;
1118 }
1119
912 if (do_sync || !bitmap || !plugged) 1120 if (do_sync || !bitmap || !plugged)
913 md_wakeup_thread(mddev->thread); 1121 md_wakeup_thread(mddev->thread);
914 1122
@@ -952,9 +1160,10 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
952 * However don't try a recovery from this drive as 1160 * However don't try a recovery from this drive as
953 * it is very likely to fail. 1161 * it is very likely to fail.
954 */ 1162 */
955 mddev->recovery_disabled = 1; 1163 conf->recovery_disabled = mddev->recovery_disabled;
956 return; 1164 return;
957 } 1165 }
1166 set_bit(Blocked, &rdev->flags);
958 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1167 if (test_and_clear_bit(In_sync, &rdev->flags)) {
959 unsigned long flags; 1168 unsigned long flags;
960 spin_lock_irqsave(&conf->device_lock, flags); 1169 spin_lock_irqsave(&conf->device_lock, flags);
@@ -1027,7 +1236,7 @@ static int raid1_spare_active(mddev_t *mddev)
1027 && !test_bit(Faulty, &rdev->flags) 1236 && !test_bit(Faulty, &rdev->flags)
1028 && !test_and_set_bit(In_sync, &rdev->flags)) { 1237 && !test_and_set_bit(In_sync, &rdev->flags)) {
1029 count++; 1238 count++;
1030 sysfs_notify_dirent(rdev->sysfs_state); 1239 sysfs_notify_dirent_safe(rdev->sysfs_state);
1031 } 1240 }
1032 } 1241 }
1033 spin_lock_irqsave(&conf->device_lock, flags); 1242 spin_lock_irqsave(&conf->device_lock, flags);
@@ -1048,6 +1257,9 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1048 int first = 0; 1257 int first = 0;
1049 int last = mddev->raid_disks - 1; 1258 int last = mddev->raid_disks - 1;
1050 1259
1260 if (mddev->recovery_disabled == conf->recovery_disabled)
1261 return -EBUSY;
1262
1051 if (rdev->raid_disk >= 0) 1263 if (rdev->raid_disk >= 0)
1052 first = last = rdev->raid_disk; 1264 first = last = rdev->raid_disk;
1053 1265
@@ -1103,7 +1315,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
1103 * is not possible. 1315 * is not possible.
1104 */ 1316 */
1105 if (!test_bit(Faulty, &rdev->flags) && 1317 if (!test_bit(Faulty, &rdev->flags) &&
1106 !mddev->recovery_disabled && 1318 mddev->recovery_disabled != conf->recovery_disabled &&
1107 mddev->degraded < conf->raid_disks) { 1319 mddev->degraded < conf->raid_disks) {
1108 err = -EBUSY; 1320 err = -EBUSY;
1109 goto abort; 1321 goto abort;
@@ -1155,6 +1367,8 @@ static void end_sync_write(struct bio *bio, int error)
1155 conf_t *conf = mddev->private; 1367 conf_t *conf = mddev->private;
1156 int i; 1368 int i;
1157 int mirror=0; 1369 int mirror=0;
1370 sector_t first_bad;
1371 int bad_sectors;
1158 1372
1159 for (i = 0; i < conf->raid_disks; i++) 1373 for (i = 0; i < conf->raid_disks; i++)
1160 if (r1_bio->bios[i] == bio) { 1374 if (r1_bio->bios[i] == bio) {
@@ -1172,18 +1386,48 @@ static void end_sync_write(struct bio *bio, int error)
1172 s += sync_blocks; 1386 s += sync_blocks;
1173 sectors_to_go -= sync_blocks; 1387 sectors_to_go -= sync_blocks;
1174 } while (sectors_to_go > 0); 1388 } while (sectors_to_go > 0);
1175 md_error(mddev, conf->mirrors[mirror].rdev); 1389 set_bit(WriteErrorSeen,
1176 } 1390 &conf->mirrors[mirror].rdev->flags);
1391 set_bit(R1BIO_WriteError, &r1_bio->state);
1392 } else if (is_badblock(conf->mirrors[mirror].rdev,
1393 r1_bio->sector,
1394 r1_bio->sectors,
1395 &first_bad, &bad_sectors) &&
1396 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1397 r1_bio->sector,
1398 r1_bio->sectors,
1399 &first_bad, &bad_sectors)
1400 )
1401 set_bit(R1BIO_MadeGood, &r1_bio->state);
1177 1402
1178 update_head_pos(mirror, r1_bio); 1403 update_head_pos(mirror, r1_bio);
1179 1404
1180 if (atomic_dec_and_test(&r1_bio->remaining)) { 1405 if (atomic_dec_and_test(&r1_bio->remaining)) {
1181 sector_t s = r1_bio->sectors; 1406 int s = r1_bio->sectors;
1182 put_buf(r1_bio); 1407 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1183 md_done_sync(mddev, s, uptodate); 1408 test_bit(R1BIO_WriteError, &r1_bio->state))
1409 reschedule_retry(r1_bio);
1410 else {
1411 put_buf(r1_bio);
1412 md_done_sync(mddev, s, uptodate);
1413 }
1184 } 1414 }
1185} 1415}
1186 1416
1417static int r1_sync_page_io(mdk_rdev_t *rdev, sector_t sector,
1418 int sectors, struct page *page, int rw)
1419{
1420 if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
1421 /* success */
1422 return 1;
1423 if (rw == WRITE)
1424 set_bit(WriteErrorSeen, &rdev->flags);
1425 /* need to record an error - either for the block or the device */
1426 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1427 md_error(rdev->mddev, rdev);
1428 return 0;
1429}
1430
1187static int fix_sync_read_error(r1bio_t *r1_bio) 1431static int fix_sync_read_error(r1bio_t *r1_bio)
1188{ 1432{
1189 /* Try some synchronous reads of other devices to get 1433 /* Try some synchronous reads of other devices to get
@@ -1193,6 +1437,9 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
1193 * We don't need to freeze the array, because being in an 1437 * We don't need to freeze the array, because being in an
1194 * active sync request, there is no normal IO, and 1438 * active sync request, there is no normal IO, and
1195 * no overlapping syncs. 1439 * no overlapping syncs.
1440 * We don't need to check is_badblock() again as we
1441 * made sure that anything with a bad block in range
1442 * will have bi_end_io clear.
1196 */ 1443 */
1197 mddev_t *mddev = r1_bio->mddev; 1444 mddev_t *mddev = r1_bio->mddev;
1198 conf_t *conf = mddev->private; 1445 conf_t *conf = mddev->private;
@@ -1217,9 +1464,7 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
1217 * active, and resync is currently active 1464 * active, and resync is currently active
1218 */ 1465 */
1219 rdev = conf->mirrors[d].rdev; 1466 rdev = conf->mirrors[d].rdev;
1220 if (sync_page_io(rdev, 1467 if (sync_page_io(rdev, sect, s<<9,
1221 sect,
1222 s<<9,
1223 bio->bi_io_vec[idx].bv_page, 1468 bio->bi_io_vec[idx].bv_page,
1224 READ, false)) { 1469 READ, false)) {
1225 success = 1; 1470 success = 1;
@@ -1233,16 +1478,36 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
1233 1478
1234 if (!success) { 1479 if (!success) {
1235 char b[BDEVNAME_SIZE]; 1480 char b[BDEVNAME_SIZE];
1236 /* Cannot read from anywhere, array is toast */ 1481 int abort = 0;
1237 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); 1482 /* Cannot read from anywhere, this block is lost.
1483 * Record a bad block on each device. If that doesn't
1484 * work just disable and interrupt the recovery.
1485 * Don't fail devices as that won't really help.
1486 */
1238 printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error" 1487 printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
1239 " for block %llu\n", 1488 " for block %llu\n",
1240 mdname(mddev), 1489 mdname(mddev),
1241 bdevname(bio->bi_bdev, b), 1490 bdevname(bio->bi_bdev, b),
1242 (unsigned long long)r1_bio->sector); 1491 (unsigned long long)r1_bio->sector);
1243 md_done_sync(mddev, r1_bio->sectors, 0); 1492 for (d = 0; d < conf->raid_disks; d++) {
1244 put_buf(r1_bio); 1493 rdev = conf->mirrors[d].rdev;
1245 return 0; 1494 if (!rdev || test_bit(Faulty, &rdev->flags))
1495 continue;
1496 if (!rdev_set_badblocks(rdev, sect, s, 0))
1497 abort = 1;
1498 }
1499 if (abort) {
1500 mddev->recovery_disabled = 1;
1501 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1502 md_done_sync(mddev, r1_bio->sectors, 0);
1503 put_buf(r1_bio);
1504 return 0;
1505 }
1506 /* Try next page */
1507 sectors -= s;
1508 sect += s;
1509 idx++;
1510 continue;
1246 } 1511 }
1247 1512
1248 start = d; 1513 start = d;
@@ -1254,16 +1519,12 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
1254 if (r1_bio->bios[d]->bi_end_io != end_sync_read) 1519 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1255 continue; 1520 continue;
1256 rdev = conf->mirrors[d].rdev; 1521 rdev = conf->mirrors[d].rdev;
1257 if (sync_page_io(rdev, 1522 if (r1_sync_page_io(rdev, sect, s,
1258 sect, 1523 bio->bi_io_vec[idx].bv_page,
1259 s<<9, 1524 WRITE) == 0) {
1260 bio->bi_io_vec[idx].bv_page,
1261 WRITE, false) == 0) {
1262 r1_bio->bios[d]->bi_end_io = NULL; 1525 r1_bio->bios[d]->bi_end_io = NULL;
1263 rdev_dec_pending(rdev, mddev); 1526 rdev_dec_pending(rdev, mddev);
1264 md_error(mddev, rdev); 1527 }
1265 } else
1266 atomic_add(s, &rdev->corrected_errors);
1267 } 1528 }
1268 d = start; 1529 d = start;
1269 while (d != r1_bio->read_disk) { 1530 while (d != r1_bio->read_disk) {
@@ -1273,12 +1534,10 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
1273 if (r1_bio->bios[d]->bi_end_io != end_sync_read) 1534 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1274 continue; 1535 continue;
1275 rdev = conf->mirrors[d].rdev; 1536 rdev = conf->mirrors[d].rdev;
1276 if (sync_page_io(rdev, 1537 if (r1_sync_page_io(rdev, sect, s,
1277 sect, 1538 bio->bi_io_vec[idx].bv_page,
1278 s<<9, 1539 READ) != 0)
1279 bio->bi_io_vec[idx].bv_page, 1540 atomic_add(s, &rdev->corrected_errors);
1280 READ, false) == 0)
1281 md_error(mddev, rdev);
1282 } 1541 }
1283 sectors -= s; 1542 sectors -= s;
1284 sect += s; 1543 sect += s;
@@ -1420,7 +1679,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
1420 * 1679 *
1421 * 1. Retries failed read operations on working mirrors. 1680 * 1. Retries failed read operations on working mirrors.
1422 * 2. Updates the raid superblock when problems encounter. 1681 * 2. Updates the raid superblock when problems encounter.
1423 * 3. Performs writes following reads for array syncronising. 1682 * 3. Performs writes following reads for array synchronising.
1424 */ 1683 */
1425 1684
1426static void fix_read_error(conf_t *conf, int read_disk, 1685static void fix_read_error(conf_t *conf, int read_disk,
@@ -1443,9 +1702,14 @@ static void fix_read_error(conf_t *conf, int read_disk,
1443 * which is the thread that might remove 1702 * which is the thread that might remove
1444 * a device. If raid1d ever becomes multi-threaded.... 1703 * a device. If raid1d ever becomes multi-threaded....
1445 */ 1704 */
1705 sector_t first_bad;
1706 int bad_sectors;
1707
1446 rdev = conf->mirrors[d].rdev; 1708 rdev = conf->mirrors[d].rdev;
1447 if (rdev && 1709 if (rdev &&
1448 test_bit(In_sync, &rdev->flags) && 1710 test_bit(In_sync, &rdev->flags) &&
1711 is_badblock(rdev, sect, s,
1712 &first_bad, &bad_sectors) == 0 &&
1449 sync_page_io(rdev, sect, s<<9, 1713 sync_page_io(rdev, sect, s<<9,
1450 conf->tmppage, READ, false)) 1714 conf->tmppage, READ, false))
1451 success = 1; 1715 success = 1;
@@ -1457,8 +1721,10 @@ static void fix_read_error(conf_t *conf, int read_disk,
1457 } while (!success && d != read_disk); 1721 } while (!success && d != read_disk);
1458 1722
1459 if (!success) { 1723 if (!success) {
1460 /* Cannot read from anywhere -- bye bye array */ 1724 /* Cannot read from anywhere - mark it bad */
1461 md_error(mddev, conf->mirrors[read_disk].rdev); 1725 mdk_rdev_t *rdev = conf->mirrors[read_disk].rdev;
1726 if (!rdev_set_badblocks(rdev, sect, s, 0))
1727 md_error(mddev, rdev);
1462 break; 1728 break;
1463 } 1729 }
1464 /* write it back and re-read */ 1730 /* write it back and re-read */
@@ -1469,13 +1735,9 @@ static void fix_read_error(conf_t *conf, int read_disk,
1469 d--; 1735 d--;
1470 rdev = conf->mirrors[d].rdev; 1736 rdev = conf->mirrors[d].rdev;
1471 if (rdev && 1737 if (rdev &&
1472 test_bit(In_sync, &rdev->flags)) { 1738 test_bit(In_sync, &rdev->flags))
1473 if (sync_page_io(rdev, sect, s<<9, 1739 r1_sync_page_io(rdev, sect, s,
1474 conf->tmppage, WRITE, false) 1740 conf->tmppage, WRITE);
1475 == 0)
1476 /* Well, this device is dead */
1477 md_error(mddev, rdev);
1478 }
1479 } 1741 }
1480 d = start; 1742 d = start;
1481 while (d != read_disk) { 1743 while (d != read_disk) {
@@ -1486,12 +1748,8 @@ static void fix_read_error(conf_t *conf, int read_disk,
1486 rdev = conf->mirrors[d].rdev; 1748 rdev = conf->mirrors[d].rdev;
1487 if (rdev && 1749 if (rdev &&
1488 test_bit(In_sync, &rdev->flags)) { 1750 test_bit(In_sync, &rdev->flags)) {
1489 if (sync_page_io(rdev, sect, s<<9, 1751 if (r1_sync_page_io(rdev, sect, s,
1490 conf->tmppage, READ, false) 1752 conf->tmppage, READ)) {
1491 == 0)
1492 /* Well, this device is dead */
1493 md_error(mddev, rdev);
1494 else {
1495 atomic_add(s, &rdev->corrected_errors); 1753 atomic_add(s, &rdev->corrected_errors);
1496 printk(KERN_INFO 1754 printk(KERN_INFO
1497 "md/raid1:%s: read error corrected " 1755 "md/raid1:%s: read error corrected "
@@ -1508,21 +1766,255 @@ static void fix_read_error(conf_t *conf, int read_disk,
1508 } 1766 }
1509} 1767}
1510 1768
1769static void bi_complete(struct bio *bio, int error)
1770{
1771 complete((struct completion *)bio->bi_private);
1772}
1773
1774static int submit_bio_wait(int rw, struct bio *bio)
1775{
1776 struct completion event;
1777 rw |= REQ_SYNC;
1778
1779 init_completion(&event);
1780 bio->bi_private = &event;
1781 bio->bi_end_io = bi_complete;
1782 submit_bio(rw, bio);
1783 wait_for_completion(&event);
1784
1785 return test_bit(BIO_UPTODATE, &bio->bi_flags);
1786}
1787
1788static int narrow_write_error(r1bio_t *r1_bio, int i)
1789{
1790 mddev_t *mddev = r1_bio->mddev;
1791 conf_t *conf = mddev->private;
1792 mdk_rdev_t *rdev = conf->mirrors[i].rdev;
1793 int vcnt, idx;
1794 struct bio_vec *vec;
1795
1796 /* bio has the data to be written to device 'i' where
1797 * we just recently had a write error.
1798 * We repeatedly clone the bio and trim down to one block,
1799 * then try the write. Where the write fails we record
1800 * a bad block.
1801 * It is conceivable that the bio doesn't exactly align with
1802 * blocks. We must handle this somehow.
1803 *
1804 * We currently own a reference on the rdev.
1805 */
1806
1807 int block_sectors;
1808 sector_t sector;
1809 int sectors;
1810 int sect_to_write = r1_bio->sectors;
1811 int ok = 1;
1812
1813 if (rdev->badblocks.shift < 0)
1814 return 0;
1815
1816 block_sectors = 1 << rdev->badblocks.shift;
1817 sector = r1_bio->sector;
1818 sectors = ((sector + block_sectors)
1819 & ~(sector_t)(block_sectors - 1))
1820 - sector;
1821
1822 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
1823 vcnt = r1_bio->behind_page_count;
1824 vec = r1_bio->behind_bvecs;
1825 idx = 0;
1826 while (vec[idx].bv_page == NULL)
1827 idx++;
1828 } else {
1829 vcnt = r1_bio->master_bio->bi_vcnt;
1830 vec = r1_bio->master_bio->bi_io_vec;
1831 idx = r1_bio->master_bio->bi_idx;
1832 }
1833 while (sect_to_write) {
1834 struct bio *wbio;
1835 if (sectors > sect_to_write)
1836 sectors = sect_to_write;
1837 /* Write at 'sector' for 'sectors'*/
1838
1839 wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
1840 memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
1841 wbio->bi_sector = r1_bio->sector;
1842 wbio->bi_rw = WRITE;
1843 wbio->bi_vcnt = vcnt;
1844 wbio->bi_size = r1_bio->sectors << 9;
1845 wbio->bi_idx = idx;
1846
1847 md_trim_bio(wbio, sector - r1_bio->sector, sectors);
1848 wbio->bi_sector += rdev->data_offset;
1849 wbio->bi_bdev = rdev->bdev;
1850 if (submit_bio_wait(WRITE, wbio) == 0)
1851 /* failure! */
1852 ok = rdev_set_badblocks(rdev, sector,
1853 sectors, 0)
1854 && ok;
1855
1856 bio_put(wbio);
1857 sect_to_write -= sectors;
1858 sector += sectors;
1859 sectors = block_sectors;
1860 }
1861 return ok;
1862}
1863
1864static void handle_sync_write_finished(conf_t *conf, r1bio_t *r1_bio)
1865{
1866 int m;
1867 int s = r1_bio->sectors;
1868 for (m = 0; m < conf->raid_disks ; m++) {
1869 mdk_rdev_t *rdev = conf->mirrors[m].rdev;
1870 struct bio *bio = r1_bio->bios[m];
1871 if (bio->bi_end_io == NULL)
1872 continue;
1873 if (test_bit(BIO_UPTODATE, &bio->bi_flags) &&
1874 test_bit(R1BIO_MadeGood, &r1_bio->state)) {
1875 rdev_clear_badblocks(rdev, r1_bio->sector, s);
1876 }
1877 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
1878 test_bit(R1BIO_WriteError, &r1_bio->state)) {
1879 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
1880 md_error(conf->mddev, rdev);
1881 }
1882 }
1883 put_buf(r1_bio);
1884 md_done_sync(conf->mddev, s, 1);
1885}
1886
1887static void handle_write_finished(conf_t *conf, r1bio_t *r1_bio)
1888{
1889 int m;
1890 for (m = 0; m < conf->raid_disks ; m++)
1891 if (r1_bio->bios[m] == IO_MADE_GOOD) {
1892 mdk_rdev_t *rdev = conf->mirrors[m].rdev;
1893 rdev_clear_badblocks(rdev,
1894 r1_bio->sector,
1895 r1_bio->sectors);
1896 rdev_dec_pending(rdev, conf->mddev);
1897 } else if (r1_bio->bios[m] != NULL) {
1898 /* This drive got a write error. We need to
1899 * narrow down and record precise write
1900 * errors.
1901 */
1902 if (!narrow_write_error(r1_bio, m)) {
1903 md_error(conf->mddev,
1904 conf->mirrors[m].rdev);
1905 /* an I/O failed, we can't clear the bitmap */
1906 set_bit(R1BIO_Degraded, &r1_bio->state);
1907 }
1908 rdev_dec_pending(conf->mirrors[m].rdev,
1909 conf->mddev);
1910 }
1911 if (test_bit(R1BIO_WriteError, &r1_bio->state))
1912 close_write(r1_bio);
1913 raid_end_bio_io(r1_bio);
1914}
1915
1916static void handle_read_error(conf_t *conf, r1bio_t *r1_bio)
1917{
1918 int disk;
1919 int max_sectors;
1920 mddev_t *mddev = conf->mddev;
1921 struct bio *bio;
1922 char b[BDEVNAME_SIZE];
1923 mdk_rdev_t *rdev;
1924
1925 clear_bit(R1BIO_ReadError, &r1_bio->state);
1926 /* we got a read error. Maybe the drive is bad. Maybe just
1927 * the block and we can fix it.
1928 * We freeze all other IO, and try reading the block from
1929 * other devices. When we find one, we re-write
1930 * and check it that fixes the read error.
1931 * This is all done synchronously while the array is
1932 * frozen
1933 */
1934 if (mddev->ro == 0) {
1935 freeze_array(conf);
1936 fix_read_error(conf, r1_bio->read_disk,
1937 r1_bio->sector, r1_bio->sectors);
1938 unfreeze_array(conf);
1939 } else
1940 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
1941
1942 bio = r1_bio->bios[r1_bio->read_disk];
1943 bdevname(bio->bi_bdev, b);
1944read_more:
1945 disk = read_balance(conf, r1_bio, &max_sectors);
1946 if (disk == -1) {
1947 printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
1948 " read error for block %llu\n",
1949 mdname(mddev), b, (unsigned long long)r1_bio->sector);
1950 raid_end_bio_io(r1_bio);
1951 } else {
1952 const unsigned long do_sync
1953 = r1_bio->master_bio->bi_rw & REQ_SYNC;
1954 if (bio) {
1955 r1_bio->bios[r1_bio->read_disk] =
1956 mddev->ro ? IO_BLOCKED : NULL;
1957 bio_put(bio);
1958 }
1959 r1_bio->read_disk = disk;
1960 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
1961 md_trim_bio(bio, r1_bio->sector - bio->bi_sector, max_sectors);
1962 r1_bio->bios[r1_bio->read_disk] = bio;
1963 rdev = conf->mirrors[disk].rdev;
1964 printk_ratelimited(KERN_ERR
1965 "md/raid1:%s: redirecting sector %llu"
1966 " to other mirror: %s\n",
1967 mdname(mddev),
1968 (unsigned long long)r1_bio->sector,
1969 bdevname(rdev->bdev, b));
1970 bio->bi_sector = r1_bio->sector + rdev->data_offset;
1971 bio->bi_bdev = rdev->bdev;
1972 bio->bi_end_io = raid1_end_read_request;
1973 bio->bi_rw = READ | do_sync;
1974 bio->bi_private = r1_bio;
1975 if (max_sectors < r1_bio->sectors) {
1976 /* Drat - have to split this up more */
1977 struct bio *mbio = r1_bio->master_bio;
1978 int sectors_handled = (r1_bio->sector + max_sectors
1979 - mbio->bi_sector);
1980 r1_bio->sectors = max_sectors;
1981 spin_lock_irq(&conf->device_lock);
1982 if (mbio->bi_phys_segments == 0)
1983 mbio->bi_phys_segments = 2;
1984 else
1985 mbio->bi_phys_segments++;
1986 spin_unlock_irq(&conf->device_lock);
1987 generic_make_request(bio);
1988 bio = NULL;
1989
1990 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1991
1992 r1_bio->master_bio = mbio;
1993 r1_bio->sectors = (mbio->bi_size >> 9)
1994 - sectors_handled;
1995 r1_bio->state = 0;
1996 set_bit(R1BIO_ReadError, &r1_bio->state);
1997 r1_bio->mddev = mddev;
1998 r1_bio->sector = mbio->bi_sector + sectors_handled;
1999
2000 goto read_more;
2001 } else
2002 generic_make_request(bio);
2003 }
2004}
2005
1511static void raid1d(mddev_t *mddev) 2006static void raid1d(mddev_t *mddev)
1512{ 2007{
1513 r1bio_t *r1_bio; 2008 r1bio_t *r1_bio;
1514 struct bio *bio;
1515 unsigned long flags; 2009 unsigned long flags;
1516 conf_t *conf = mddev->private; 2010 conf_t *conf = mddev->private;
1517 struct list_head *head = &conf->retry_list; 2011 struct list_head *head = &conf->retry_list;
1518 mdk_rdev_t *rdev;
1519 struct blk_plug plug; 2012 struct blk_plug plug;
1520 2013
1521 md_check_recovery(mddev); 2014 md_check_recovery(mddev);
1522 2015
1523 blk_start_plug(&plug); 2016 blk_start_plug(&plug);
1524 for (;;) { 2017 for (;;) {
1525 char b[BDEVNAME_SIZE];
1526 2018
1527 if (atomic_read(&mddev->plug_cnt) == 0) 2019 if (atomic_read(&mddev->plug_cnt) == 0)
1528 flush_pending_writes(conf); 2020 flush_pending_writes(conf);
@@ -1539,62 +2031,26 @@ static void raid1d(mddev_t *mddev)
1539 2031
1540 mddev = r1_bio->mddev; 2032 mddev = r1_bio->mddev;
1541 conf = mddev->private; 2033 conf = mddev->private;
1542 if (test_bit(R1BIO_IsSync, &r1_bio->state)) 2034 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
1543 sync_request_write(mddev, r1_bio); 2035 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1544 else { 2036 test_bit(R1BIO_WriteError, &r1_bio->state))
1545 int disk; 2037 handle_sync_write_finished(conf, r1_bio);
1546 2038 else
1547 /* we got a read error. Maybe the drive is bad. Maybe just 2039 sync_request_write(mddev, r1_bio);
1548 * the block and we can fix it. 2040 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1549 * We freeze all other IO, and try reading the block from 2041 test_bit(R1BIO_WriteError, &r1_bio->state))
1550 * other devices. When we find one, we re-write 2042 handle_write_finished(conf, r1_bio);
1551 * and check it that fixes the read error. 2043 else if (test_bit(R1BIO_ReadError, &r1_bio->state))
1552 * This is all done synchronously while the array is 2044 handle_read_error(conf, r1_bio);
1553 * frozen 2045 else
2046 /* just a partial read to be scheduled from separate
2047 * context
1554 */ 2048 */
1555 if (mddev->ro == 0) { 2049 generic_make_request(r1_bio->bios[r1_bio->read_disk]);
1556 freeze_array(conf); 2050
1557 fix_read_error(conf, r1_bio->read_disk,
1558 r1_bio->sector,
1559 r1_bio->sectors);
1560 unfreeze_array(conf);
1561 } else
1562 md_error(mddev,
1563 conf->mirrors[r1_bio->read_disk].rdev);
1564
1565 bio = r1_bio->bios[r1_bio->read_disk];
1566 if ((disk=read_balance(conf, r1_bio)) == -1) {
1567 printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
1568 " read error for block %llu\n",
1569 mdname(mddev),
1570 bdevname(bio->bi_bdev,b),
1571 (unsigned long long)r1_bio->sector);
1572 raid_end_bio_io(r1_bio);
1573 } else {
1574 const unsigned long do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
1575 r1_bio->bios[r1_bio->read_disk] =
1576 mddev->ro ? IO_BLOCKED : NULL;
1577 r1_bio->read_disk = disk;
1578 bio_put(bio);
1579 bio = bio_clone_mddev(r1_bio->master_bio,
1580 GFP_NOIO, mddev);
1581 r1_bio->bios[r1_bio->read_disk] = bio;
1582 rdev = conf->mirrors[disk].rdev;
1583 if (printk_ratelimit())
1584 printk(KERN_ERR "md/raid1:%s: redirecting sector %llu to"
1585 " other mirror: %s\n",
1586 mdname(mddev),
1587 (unsigned long long)r1_bio->sector,
1588 bdevname(rdev->bdev,b));
1589 bio->bi_sector = r1_bio->sector + rdev->data_offset;
1590 bio->bi_bdev = rdev->bdev;
1591 bio->bi_end_io = raid1_end_read_request;
1592 bio->bi_rw = READ | do_sync;
1593 bio->bi_private = r1_bio;
1594 generic_make_request(bio);
1595 }
1596 }
1597 cond_resched(); 2051 cond_resched();
2052 if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
2053 md_check_recovery(mddev);
1598 } 2054 }
1599 blk_finish_plug(&plug); 2055 blk_finish_plug(&plug);
1600} 2056}
@@ -1636,6 +2092,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1636 int write_targets = 0, read_targets = 0; 2092 int write_targets = 0, read_targets = 0;
1637 sector_t sync_blocks; 2093 sector_t sync_blocks;
1638 int still_degraded = 0; 2094 int still_degraded = 0;
2095 int good_sectors = RESYNC_SECTORS;
2096 int min_bad = 0; /* number of sectors that are bad in all devices */
1639 2097
1640 if (!conf->r1buf_pool) 2098 if (!conf->r1buf_pool)
1641 if (init_resync(conf)) 2099 if (init_resync(conf))
@@ -1723,36 +2181,89 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1723 2181
1724 rdev = rcu_dereference(conf->mirrors[i].rdev); 2182 rdev = rcu_dereference(conf->mirrors[i].rdev);
1725 if (rdev == NULL || 2183 if (rdev == NULL ||
1726 test_bit(Faulty, &rdev->flags)) { 2184 test_bit(Faulty, &rdev->flags)) {
1727 still_degraded = 1; 2185 still_degraded = 1;
1728 continue;
1729 } else if (!test_bit(In_sync, &rdev->flags)) { 2186 } else if (!test_bit(In_sync, &rdev->flags)) {
1730 bio->bi_rw = WRITE; 2187 bio->bi_rw = WRITE;
1731 bio->bi_end_io = end_sync_write; 2188 bio->bi_end_io = end_sync_write;
1732 write_targets ++; 2189 write_targets ++;
1733 } else { 2190 } else {
1734 /* may need to read from here */ 2191 /* may need to read from here */
1735 bio->bi_rw = READ; 2192 sector_t first_bad = MaxSector;
1736 bio->bi_end_io = end_sync_read; 2193 int bad_sectors;
1737 if (test_bit(WriteMostly, &rdev->flags)) { 2194
1738 if (wonly < 0) 2195 if (is_badblock(rdev, sector_nr, good_sectors,
1739 wonly = i; 2196 &first_bad, &bad_sectors)) {
1740 } else { 2197 if (first_bad > sector_nr)
1741 if (disk < 0) 2198 good_sectors = first_bad - sector_nr;
1742 disk = i; 2199 else {
2200 bad_sectors -= (sector_nr - first_bad);
2201 if (min_bad == 0 ||
2202 min_bad > bad_sectors)
2203 min_bad = bad_sectors;
2204 }
2205 }
2206 if (sector_nr < first_bad) {
2207 if (test_bit(WriteMostly, &rdev->flags)) {
2208 if (wonly < 0)
2209 wonly = i;
2210 } else {
2211 if (disk < 0)
2212 disk = i;
2213 }
2214 bio->bi_rw = READ;
2215 bio->bi_end_io = end_sync_read;
2216 read_targets++;
1743 } 2217 }
1744 read_targets++;
1745 } 2218 }
1746 atomic_inc(&rdev->nr_pending); 2219 if (bio->bi_end_io) {
1747 bio->bi_sector = sector_nr + rdev->data_offset; 2220 atomic_inc(&rdev->nr_pending);
1748 bio->bi_bdev = rdev->bdev; 2221 bio->bi_sector = sector_nr + rdev->data_offset;
1749 bio->bi_private = r1_bio; 2222 bio->bi_bdev = rdev->bdev;
2223 bio->bi_private = r1_bio;
2224 }
1750 } 2225 }
1751 rcu_read_unlock(); 2226 rcu_read_unlock();
1752 if (disk < 0) 2227 if (disk < 0)
1753 disk = wonly; 2228 disk = wonly;
1754 r1_bio->read_disk = disk; 2229 r1_bio->read_disk = disk;
1755 2230
2231 if (read_targets == 0 && min_bad > 0) {
2232 /* These sectors are bad on all InSync devices, so we
2233 * need to mark them bad on all write targets
2234 */
2235 int ok = 1;
2236 for (i = 0 ; i < conf->raid_disks ; i++)
2237 if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2238 mdk_rdev_t *rdev =
2239 rcu_dereference(conf->mirrors[i].rdev);
2240 ok = rdev_set_badblocks(rdev, sector_nr,
2241 min_bad, 0
2242 ) && ok;
2243 }
2244 set_bit(MD_CHANGE_DEVS, &mddev->flags);
2245 *skipped = 1;
2246 put_buf(r1_bio);
2247
2248 if (!ok) {
2249 /* Cannot record the badblocks, so need to
2250 * abort the resync.
2251 * If there are multiple read targets, could just
2252 * fail the really bad ones ???
2253 */
2254 conf->recovery_disabled = mddev->recovery_disabled;
2255 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2256 return 0;
2257 } else
2258 return min_bad;
2259
2260 }
2261 if (min_bad > 0 && min_bad < good_sectors) {
2262 /* only resync enough to reach the next bad->good
2263 * transition */
2264 good_sectors = min_bad;
2265 }
2266
1756 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0) 2267 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
1757 /* extra read targets are also write targets */ 2268 /* extra read targets are also write targets */
1758 write_targets += read_targets-1; 2269 write_targets += read_targets-1;
@@ -1769,6 +2280,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1769 2280
1770 if (max_sector > mddev->resync_max) 2281 if (max_sector > mddev->resync_max)
1771 max_sector = mddev->resync_max; /* Don't do IO beyond here */ 2282 max_sector = mddev->resync_max; /* Don't do IO beyond here */
2283 if (max_sector > sector_nr + good_sectors)
2284 max_sector = sector_nr + good_sectors;
1772 nr_sectors = 0; 2285 nr_sectors = 0;
1773 sync_blocks = 0; 2286 sync_blocks = 0;
1774 do { 2287 do {
@@ -2154,18 +2667,13 @@ static int raid1_reshape(mddev_t *mddev)
2154 for (d = d2 = 0; d < conf->raid_disks; d++) { 2667 for (d = d2 = 0; d < conf->raid_disks; d++) {
2155 mdk_rdev_t *rdev = conf->mirrors[d].rdev; 2668 mdk_rdev_t *rdev = conf->mirrors[d].rdev;
2156 if (rdev && rdev->raid_disk != d2) { 2669 if (rdev && rdev->raid_disk != d2) {
2157 char nm[20]; 2670 sysfs_unlink_rdev(mddev, rdev);
2158 sprintf(nm, "rd%d", rdev->raid_disk);
2159 sysfs_remove_link(&mddev->kobj, nm);
2160 rdev->raid_disk = d2; 2671 rdev->raid_disk = d2;
2161 sprintf(nm, "rd%d", rdev->raid_disk); 2672 sysfs_unlink_rdev(mddev, rdev);
2162 sysfs_remove_link(&mddev->kobj, nm); 2673 if (sysfs_link_rdev(mddev, rdev))
2163 if (sysfs_create_link(&mddev->kobj,
2164 &rdev->kobj, nm))
2165 printk(KERN_WARNING 2674 printk(KERN_WARNING
2166 "md/raid1:%s: cannot register " 2675 "md/raid1:%s: cannot register rd%d\n",
2167 "%s\n", 2676 mdname(mddev), rdev->raid_disk);
2168 mdname(mddev), nm);
2169 } 2677 }
2170 if (rdev) 2678 if (rdev)
2171 newmirrors[d2++].rdev = rdev; 2679 newmirrors[d2++].rdev = rdev;
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index e743a64fac4f..e0d676b48974 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -48,6 +48,12 @@ struct r1_private_data_s {
48 * (fresh device added). 48 * (fresh device added).
49 * Cleared when a sync completes. 49 * Cleared when a sync completes.
50 */ 50 */
51 int recovery_disabled; /* when the same as
52 * mddev->recovery_disabled
53 * we don't allow recovery
54 * to be attempted as we
55 * expect a read error
56 */
51 57
52 wait_queue_head_t wait_barrier; 58 wait_queue_head_t wait_barrier;
53 59
@@ -95,7 +101,7 @@ struct r1bio_s {
95 101
96 struct list_head retry_list; 102 struct list_head retry_list;
97 /* Next two are only valid when R1BIO_BehindIO is set */ 103 /* Next two are only valid when R1BIO_BehindIO is set */
98 struct page **behind_pages; 104 struct bio_vec *behind_bvecs;
99 int behind_page_count; 105 int behind_page_count;
100 /* 106 /*
101 * if the IO is in WRITE direction, then multiple bios are used. 107 * if the IO is in WRITE direction, then multiple bios are used.
@@ -110,13 +116,24 @@ struct r1bio_s {
110 * correct the read error. To keep track of bad blocks on a per-bio 116 * correct the read error. To keep track of bad blocks on a per-bio
111 * level, we store IO_BLOCKED in the appropriate 'bios' pointer 117 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
112 */ 118 */
113#define IO_BLOCKED ((struct bio*)1) 119#define IO_BLOCKED ((struct bio *)1)
120/* When we successfully write to a known bad-block, we need to remove the
121 * bad-block marking which must be done from process context. So we record
122 * the success by setting bios[n] to IO_MADE_GOOD
123 */
124#define IO_MADE_GOOD ((struct bio *)2)
125
126#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
114 127
115/* bits for r1bio.state */ 128/* bits for r1bio.state */
116#define R1BIO_Uptodate 0 129#define R1BIO_Uptodate 0
117#define R1BIO_IsSync 1 130#define R1BIO_IsSync 1
118#define R1BIO_Degraded 2 131#define R1BIO_Degraded 2
119#define R1BIO_BehindIO 3 132#define R1BIO_BehindIO 3
133/* Set ReadError on bios that experience a readerror so that
134 * raid1d knows what to do with them.
135 */
136#define R1BIO_ReadError 4
120/* For write-behind requests, we call bi_end_io when 137/* For write-behind requests, we call bi_end_io when
121 * the last non-write-behind device completes, providing 138 * the last non-write-behind device completes, providing
122 * any write was successful. Otherwise we call when 139 * any write was successful. Otherwise we call when
@@ -125,6 +142,11 @@ struct r1bio_s {
125 * Record that bi_end_io was called with this flag... 142 * Record that bi_end_io was called with this flag...
126 */ 143 */
127#define R1BIO_Returned 6 144#define R1BIO_Returned 6
145/* If a write for this request means we can clear some
146 * known-bad-block records, we set this flag
147 */
148#define R1BIO_MadeGood 7
149#define R1BIO_WriteError 8
128 150
129extern int md_raid1_congested(mddev_t *mddev, int bits); 151extern int md_raid1_congested(mddev_t *mddev, int bits);
130 152
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 6e846688962f..8b29cd4f01c8 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -22,6 +22,7 @@
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/blkdev.h> 23#include <linux/blkdev.h>
24#include <linux/seq_file.h> 24#include <linux/seq_file.h>
25#include <linux/ratelimit.h>
25#include "md.h" 26#include "md.h"
26#include "raid10.h" 27#include "raid10.h"
27#include "raid0.h" 28#include "raid0.h"
@@ -123,7 +124,14 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
123 for (j = 0 ; j < nalloc; j++) { 124 for (j = 0 ; j < nalloc; j++) {
124 bio = r10_bio->devs[j].bio; 125 bio = r10_bio->devs[j].bio;
125 for (i = 0; i < RESYNC_PAGES; i++) { 126 for (i = 0; i < RESYNC_PAGES; i++) {
126 page = alloc_page(gfp_flags); 127 if (j == 1 && !test_bit(MD_RECOVERY_SYNC,
128 &conf->mddev->recovery)) {
129 /* we can share bv_page's during recovery */
130 struct bio *rbio = r10_bio->devs[0].bio;
131 page = rbio->bi_io_vec[i].bv_page;
132 get_page(page);
133 } else
134 page = alloc_page(gfp_flags);
127 if (unlikely(!page)) 135 if (unlikely(!page))
128 goto out_free_pages; 136 goto out_free_pages;
129 137
@@ -173,7 +181,7 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
173 181
174 for (i = 0; i < conf->copies; i++) { 182 for (i = 0; i < conf->copies; i++) {
175 struct bio **bio = & r10_bio->devs[i].bio; 183 struct bio **bio = & r10_bio->devs[i].bio;
176 if (*bio && *bio != IO_BLOCKED) 184 if (!BIO_SPECIAL(*bio))
177 bio_put(*bio); 185 bio_put(*bio);
178 *bio = NULL; 186 *bio = NULL;
179 } 187 }
@@ -183,12 +191,6 @@ static void free_r10bio(r10bio_t *r10_bio)
183{ 191{
184 conf_t *conf = r10_bio->mddev->private; 192 conf_t *conf = r10_bio->mddev->private;
185 193
186 /*
187 * Wake up any possible resync thread that waits for the device
188 * to go idle.
189 */
190 allow_barrier(conf);
191
192 put_all_bios(conf, r10_bio); 194 put_all_bios(conf, r10_bio);
193 mempool_free(r10_bio, conf->r10bio_pool); 195 mempool_free(r10_bio, conf->r10bio_pool);
194} 196}
@@ -227,9 +229,27 @@ static void reschedule_retry(r10bio_t *r10_bio)
227static void raid_end_bio_io(r10bio_t *r10_bio) 229static void raid_end_bio_io(r10bio_t *r10_bio)
228{ 230{
229 struct bio *bio = r10_bio->master_bio; 231 struct bio *bio = r10_bio->master_bio;
232 int done;
233 conf_t *conf = r10_bio->mddev->private;
230 234
231 bio_endio(bio, 235 if (bio->bi_phys_segments) {
232 test_bit(R10BIO_Uptodate, &r10_bio->state) ? 0 : -EIO); 236 unsigned long flags;
237 spin_lock_irqsave(&conf->device_lock, flags);
238 bio->bi_phys_segments--;
239 done = (bio->bi_phys_segments == 0);
240 spin_unlock_irqrestore(&conf->device_lock, flags);
241 } else
242 done = 1;
243 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
244 clear_bit(BIO_UPTODATE, &bio->bi_flags);
245 if (done) {
246 bio_endio(bio, 0);
247 /*
248 * Wake up any possible resync thread that waits for the device
249 * to go idle.
250 */
251 allow_barrier(conf);
252 }
233 free_r10bio(r10_bio); 253 free_r10bio(r10_bio);
234} 254}
235 255
@@ -244,6 +264,26 @@ static inline void update_head_pos(int slot, r10bio_t *r10_bio)
244 r10_bio->devs[slot].addr + (r10_bio->sectors); 264 r10_bio->devs[slot].addr + (r10_bio->sectors);
245} 265}
246 266
267/*
268 * Find the disk number which triggered given bio
269 */
270static int find_bio_disk(conf_t *conf, r10bio_t *r10_bio,
271 struct bio *bio, int *slotp)
272{
273 int slot;
274
275 for (slot = 0; slot < conf->copies; slot++)
276 if (r10_bio->devs[slot].bio == bio)
277 break;
278
279 BUG_ON(slot == conf->copies);
280 update_head_pos(slot, r10_bio);
281
282 if (slotp)
283 *slotp = slot;
284 return r10_bio->devs[slot].devnum;
285}
286
247static void raid10_end_read_request(struct bio *bio, int error) 287static void raid10_end_read_request(struct bio *bio, int error)
248{ 288{
249 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 289 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -277,34 +317,45 @@ static void raid10_end_read_request(struct bio *bio, int error)
277 * oops, read error - keep the refcount on the rdev 317 * oops, read error - keep the refcount on the rdev
278 */ 318 */
279 char b[BDEVNAME_SIZE]; 319 char b[BDEVNAME_SIZE];
280 if (printk_ratelimit()) 320 printk_ratelimited(KERN_ERR
281 printk(KERN_ERR "md/raid10:%s: %s: rescheduling sector %llu\n", 321 "md/raid10:%s: %s: rescheduling sector %llu\n",
282 mdname(conf->mddev), 322 mdname(conf->mddev),
283 bdevname(conf->mirrors[dev].rdev->bdev,b), (unsigned long long)r10_bio->sector); 323 bdevname(conf->mirrors[dev].rdev->bdev, b),
324 (unsigned long long)r10_bio->sector);
325 set_bit(R10BIO_ReadError, &r10_bio->state);
284 reschedule_retry(r10_bio); 326 reschedule_retry(r10_bio);
285 } 327 }
286} 328}
287 329
330static void close_write(r10bio_t *r10_bio)
331{
332 /* clear the bitmap if all writes complete successfully */
333 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
334 r10_bio->sectors,
335 !test_bit(R10BIO_Degraded, &r10_bio->state),
336 0);
337 md_write_end(r10_bio->mddev);
338}
339
288static void raid10_end_write_request(struct bio *bio, int error) 340static void raid10_end_write_request(struct bio *bio, int error)
289{ 341{
290 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 342 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
291 r10bio_t *r10_bio = bio->bi_private; 343 r10bio_t *r10_bio = bio->bi_private;
292 int slot, dev; 344 int dev;
345 int dec_rdev = 1;
293 conf_t *conf = r10_bio->mddev->private; 346 conf_t *conf = r10_bio->mddev->private;
347 int slot;
294 348
295 for (slot = 0; slot < conf->copies; slot++) 349 dev = find_bio_disk(conf, r10_bio, bio, &slot);
296 if (r10_bio->devs[slot].bio == bio)
297 break;
298 dev = r10_bio->devs[slot].devnum;
299 350
300 /* 351 /*
301 * this branch is our 'one mirror IO has finished' event handler: 352 * this branch is our 'one mirror IO has finished' event handler:
302 */ 353 */
303 if (!uptodate) { 354 if (!uptodate) {
304 md_error(r10_bio->mddev, conf->mirrors[dev].rdev); 355 set_bit(WriteErrorSeen, &conf->mirrors[dev].rdev->flags);
305 /* an I/O failed, we can't clear the bitmap */ 356 set_bit(R10BIO_WriteError, &r10_bio->state);
306 set_bit(R10BIO_Degraded, &r10_bio->state); 357 dec_rdev = 0;
307 } else 358 } else {
308 /* 359 /*
309 * Set R10BIO_Uptodate in our master bio, so that 360 * Set R10BIO_Uptodate in our master bio, so that
310 * we will return a good error code for to the higher 361 * we will return a good error code for to the higher
@@ -314,9 +365,22 @@ static void raid10_end_write_request(struct bio *bio, int error)
314 * user-side. So if something waits for IO, then it will 365 * user-side. So if something waits for IO, then it will
315 * wait for the 'master' bio. 366 * wait for the 'master' bio.
316 */ 367 */
368 sector_t first_bad;
369 int bad_sectors;
370
317 set_bit(R10BIO_Uptodate, &r10_bio->state); 371 set_bit(R10BIO_Uptodate, &r10_bio->state);
318 372
319 update_head_pos(slot, r10_bio); 373 /* Maybe we can clear some bad blocks. */
374 if (is_badblock(conf->mirrors[dev].rdev,
375 r10_bio->devs[slot].addr,
376 r10_bio->sectors,
377 &first_bad, &bad_sectors)) {
378 bio_put(bio);
379 r10_bio->devs[slot].bio = IO_MADE_GOOD;
380 dec_rdev = 0;
381 set_bit(R10BIO_MadeGood, &r10_bio->state);
382 }
383 }
320 384
321 /* 385 /*
322 * 386 *
@@ -324,16 +388,18 @@ static void raid10_end_write_request(struct bio *bio, int error)
324 * already. 388 * already.
325 */ 389 */
326 if (atomic_dec_and_test(&r10_bio->remaining)) { 390 if (atomic_dec_and_test(&r10_bio->remaining)) {
327 /* clear the bitmap if all writes complete successfully */ 391 if (test_bit(R10BIO_WriteError, &r10_bio->state))
328 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, 392 reschedule_retry(r10_bio);
329 r10_bio->sectors, 393 else {
330 !test_bit(R10BIO_Degraded, &r10_bio->state), 394 close_write(r10_bio);
331 0); 395 if (test_bit(R10BIO_MadeGood, &r10_bio->state))
332 md_write_end(r10_bio->mddev); 396 reschedule_retry(r10_bio);
333 raid_end_bio_io(r10_bio); 397 else
398 raid_end_bio_io(r10_bio);
399 }
334 } 400 }
335 401 if (dec_rdev)
336 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); 402 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
337} 403}
338 404
339 405
@@ -484,11 +550,12 @@ static int raid10_mergeable_bvec(struct request_queue *q,
484 * FIXME: possibly should rethink readbalancing and do it differently 550 * FIXME: possibly should rethink readbalancing and do it differently
485 * depending on near_copies / far_copies geometry. 551 * depending on near_copies / far_copies geometry.
486 */ 552 */
487static int read_balance(conf_t *conf, r10bio_t *r10_bio) 553static int read_balance(conf_t *conf, r10bio_t *r10_bio, int *max_sectors)
488{ 554{
489 const sector_t this_sector = r10_bio->sector; 555 const sector_t this_sector = r10_bio->sector;
490 int disk, slot; 556 int disk, slot;
491 const int sectors = r10_bio->sectors; 557 int sectors = r10_bio->sectors;
558 int best_good_sectors;
492 sector_t new_distance, best_dist; 559 sector_t new_distance, best_dist;
493 mdk_rdev_t *rdev; 560 mdk_rdev_t *rdev;
494 int do_balance; 561 int do_balance;
@@ -497,8 +564,10 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
497 raid10_find_phys(conf, r10_bio); 564 raid10_find_phys(conf, r10_bio);
498 rcu_read_lock(); 565 rcu_read_lock();
499retry: 566retry:
567 sectors = r10_bio->sectors;
500 best_slot = -1; 568 best_slot = -1;
501 best_dist = MaxSector; 569 best_dist = MaxSector;
570 best_good_sectors = 0;
502 do_balance = 1; 571 do_balance = 1;
503 /* 572 /*
504 * Check if we can balance. We can balance on the whole 573 * Check if we can balance. We can balance on the whole
@@ -511,6 +580,10 @@ retry:
511 do_balance = 0; 580 do_balance = 0;
512 581
513 for (slot = 0; slot < conf->copies ; slot++) { 582 for (slot = 0; slot < conf->copies ; slot++) {
583 sector_t first_bad;
584 int bad_sectors;
585 sector_t dev_sector;
586
514 if (r10_bio->devs[slot].bio == IO_BLOCKED) 587 if (r10_bio->devs[slot].bio == IO_BLOCKED)
515 continue; 588 continue;
516 disk = r10_bio->devs[slot].devnum; 589 disk = r10_bio->devs[slot].devnum;
@@ -520,6 +593,37 @@ retry:
520 if (!test_bit(In_sync, &rdev->flags)) 593 if (!test_bit(In_sync, &rdev->flags))
521 continue; 594 continue;
522 595
596 dev_sector = r10_bio->devs[slot].addr;
597 if (is_badblock(rdev, dev_sector, sectors,
598 &first_bad, &bad_sectors)) {
599 if (best_dist < MaxSector)
600 /* Already have a better slot */
601 continue;
602 if (first_bad <= dev_sector) {
603 /* Cannot read here. If this is the
604 * 'primary' device, then we must not read
605 * beyond 'bad_sectors' from another device.
606 */
607 bad_sectors -= (dev_sector - first_bad);
608 if (!do_balance && sectors > bad_sectors)
609 sectors = bad_sectors;
610 if (best_good_sectors > sectors)
611 best_good_sectors = sectors;
612 } else {
613 sector_t good_sectors =
614 first_bad - dev_sector;
615 if (good_sectors > best_good_sectors) {
616 best_good_sectors = good_sectors;
617 best_slot = slot;
618 }
619 if (!do_balance)
620 /* Must read from here */
621 break;
622 }
623 continue;
624 } else
625 best_good_sectors = sectors;
626
523 if (!do_balance) 627 if (!do_balance)
524 break; 628 break;
525 629
@@ -561,6 +665,7 @@ retry:
561 } else 665 } else
562 disk = -1; 666 disk = -1;
563 rcu_read_unlock(); 667 rcu_read_unlock();
668 *max_sectors = best_good_sectors;
564 669
565 return disk; 670 return disk;
566} 671}
@@ -734,6 +839,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
734 unsigned long flags; 839 unsigned long flags;
735 mdk_rdev_t *blocked_rdev; 840 mdk_rdev_t *blocked_rdev;
736 int plugged; 841 int plugged;
842 int sectors_handled;
843 int max_sectors;
737 844
738 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 845 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
739 md_flush_request(mddev, bio); 846 md_flush_request(mddev, bio);
@@ -808,12 +915,26 @@ static int make_request(mddev_t *mddev, struct bio * bio)
808 r10_bio->sector = bio->bi_sector; 915 r10_bio->sector = bio->bi_sector;
809 r10_bio->state = 0; 916 r10_bio->state = 0;
810 917
918 /* We might need to issue multiple reads to different
919 * devices if there are bad blocks around, so we keep
920 * track of the number of reads in bio->bi_phys_segments.
921 * If this is 0, there is only one r10_bio and no locking
922 * will be needed when the request completes. If it is
923 * non-zero, then it is the number of not-completed requests.
924 */
925 bio->bi_phys_segments = 0;
926 clear_bit(BIO_SEG_VALID, &bio->bi_flags);
927
811 if (rw == READ) { 928 if (rw == READ) {
812 /* 929 /*
813 * read balancing logic: 930 * read balancing logic:
814 */ 931 */
815 int disk = read_balance(conf, r10_bio); 932 int disk;
816 int slot = r10_bio->read_slot; 933 int slot;
934
935read_again:
936 disk = read_balance(conf, r10_bio, &max_sectors);
937 slot = r10_bio->read_slot;
817 if (disk < 0) { 938 if (disk < 0) {
818 raid_end_bio_io(r10_bio); 939 raid_end_bio_io(r10_bio);
819 return 0; 940 return 0;
@@ -821,6 +942,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
821 mirror = conf->mirrors + disk; 942 mirror = conf->mirrors + disk;
822 943
823 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); 944 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
945 md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector,
946 max_sectors);
824 947
825 r10_bio->devs[slot].bio = read_bio; 948 r10_bio->devs[slot].bio = read_bio;
826 949
@@ -831,7 +954,37 @@ static int make_request(mddev_t *mddev, struct bio * bio)
831 read_bio->bi_rw = READ | do_sync; 954 read_bio->bi_rw = READ | do_sync;
832 read_bio->bi_private = r10_bio; 955 read_bio->bi_private = r10_bio;
833 956
834 generic_make_request(read_bio); 957 if (max_sectors < r10_bio->sectors) {
958 /* Could not read all from this device, so we will
959 * need another r10_bio.
960 */
961 sectors_handled = (r10_bio->sectors + max_sectors
962 - bio->bi_sector);
963 r10_bio->sectors = max_sectors;
964 spin_lock_irq(&conf->device_lock);
965 if (bio->bi_phys_segments == 0)
966 bio->bi_phys_segments = 2;
967 else
968 bio->bi_phys_segments++;
969 spin_unlock(&conf->device_lock);
970 /* Cannot call generic_make_request directly
971 * as that will be queued in __generic_make_request
972 * and subsequent mempool_alloc might block
973 * waiting for it. so hand bio over to raid10d.
974 */
975 reschedule_retry(r10_bio);
976
977 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
978
979 r10_bio->master_bio = bio;
980 r10_bio->sectors = ((bio->bi_size >> 9)
981 - sectors_handled);
982 r10_bio->state = 0;
983 r10_bio->mddev = mddev;
984 r10_bio->sector = bio->bi_sector + sectors_handled;
985 goto read_again;
986 } else
987 generic_make_request(read_bio);
835 return 0; 988 return 0;
836 } 989 }
837 990
@@ -841,13 +994,22 @@ static int make_request(mddev_t *mddev, struct bio * bio)
841 /* first select target devices under rcu_lock and 994 /* first select target devices under rcu_lock and
842 * inc refcount on their rdev. Record them by setting 995 * inc refcount on their rdev. Record them by setting
843 * bios[x] to bio 996 * bios[x] to bio
997 * If there are known/acknowledged bad blocks on any device
998 * on which we have seen a write error, we want to avoid
999 * writing to those blocks. This potentially requires several
1000 * writes to write around the bad blocks. Each set of writes
1001 * gets its own r10_bio with a set of bios attached. The number
1002 * of r10_bios is recored in bio->bi_phys_segments just as with
1003 * the read case.
844 */ 1004 */
845 plugged = mddev_check_plugged(mddev); 1005 plugged = mddev_check_plugged(mddev);
846 1006
847 raid10_find_phys(conf, r10_bio); 1007 raid10_find_phys(conf, r10_bio);
848 retry_write: 1008retry_write:
849 blocked_rdev = NULL; 1009 blocked_rdev = NULL;
850 rcu_read_lock(); 1010 rcu_read_lock();
1011 max_sectors = r10_bio->sectors;
1012
851 for (i = 0; i < conf->copies; i++) { 1013 for (i = 0; i < conf->copies; i++) {
852 int d = r10_bio->devs[i].devnum; 1014 int d = r10_bio->devs[i].devnum;
853 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev); 1015 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev);
@@ -856,13 +1018,55 @@ static int make_request(mddev_t *mddev, struct bio * bio)
856 blocked_rdev = rdev; 1018 blocked_rdev = rdev;
857 break; 1019 break;
858 } 1020 }
859 if (rdev && !test_bit(Faulty, &rdev->flags)) { 1021 r10_bio->devs[i].bio = NULL;
860 atomic_inc(&rdev->nr_pending); 1022 if (!rdev || test_bit(Faulty, &rdev->flags)) {
861 r10_bio->devs[i].bio = bio;
862 } else {
863 r10_bio->devs[i].bio = NULL;
864 set_bit(R10BIO_Degraded, &r10_bio->state); 1023 set_bit(R10BIO_Degraded, &r10_bio->state);
1024 continue;
865 } 1025 }
1026 if (test_bit(WriteErrorSeen, &rdev->flags)) {
1027 sector_t first_bad;
1028 sector_t dev_sector = r10_bio->devs[i].addr;
1029 int bad_sectors;
1030 int is_bad;
1031
1032 is_bad = is_badblock(rdev, dev_sector,
1033 max_sectors,
1034 &first_bad, &bad_sectors);
1035 if (is_bad < 0) {
1036 /* Mustn't write here until the bad block
1037 * is acknowledged
1038 */
1039 atomic_inc(&rdev->nr_pending);
1040 set_bit(BlockedBadBlocks, &rdev->flags);
1041 blocked_rdev = rdev;
1042 break;
1043 }
1044 if (is_bad && first_bad <= dev_sector) {
1045 /* Cannot write here at all */
1046 bad_sectors -= (dev_sector - first_bad);
1047 if (bad_sectors < max_sectors)
1048 /* Mustn't write more than bad_sectors
1049 * to other devices yet
1050 */
1051 max_sectors = bad_sectors;
1052 /* We don't set R10BIO_Degraded as that
1053 * only applies if the disk is missing,
1054 * so it might be re-added, and we want to
1055 * know to recover this chunk.
1056 * In this case the device is here, and the
1057 * fact that this chunk is not in-sync is
1058 * recorded in the bad block log.
1059 */
1060 continue;
1061 }
1062 if (is_bad) {
1063 int good_sectors = first_bad - dev_sector;
1064 if (good_sectors < max_sectors)
1065 max_sectors = good_sectors;
1066 }
1067 }
1068 r10_bio->devs[i].bio = bio;
1069 atomic_inc(&rdev->nr_pending);
866 } 1070 }
867 rcu_read_unlock(); 1071 rcu_read_unlock();
868 1072
@@ -882,8 +1086,22 @@ static int make_request(mddev_t *mddev, struct bio * bio)
882 goto retry_write; 1086 goto retry_write;
883 } 1087 }
884 1088
1089 if (max_sectors < r10_bio->sectors) {
1090 /* We are splitting this into multiple parts, so
1091 * we need to prepare for allocating another r10_bio.
1092 */
1093 r10_bio->sectors = max_sectors;
1094 spin_lock_irq(&conf->device_lock);
1095 if (bio->bi_phys_segments == 0)
1096 bio->bi_phys_segments = 2;
1097 else
1098 bio->bi_phys_segments++;
1099 spin_unlock_irq(&conf->device_lock);
1100 }
1101 sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector;
1102
885 atomic_set(&r10_bio->remaining, 1); 1103 atomic_set(&r10_bio->remaining, 1);
886 bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0); 1104 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
887 1105
888 for (i = 0; i < conf->copies; i++) { 1106 for (i = 0; i < conf->copies; i++) {
889 struct bio *mbio; 1107 struct bio *mbio;
@@ -892,10 +1110,12 @@ static int make_request(mddev_t *mddev, struct bio * bio)
892 continue; 1110 continue;
893 1111
894 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1112 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1113 md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
1114 max_sectors);
895 r10_bio->devs[i].bio = mbio; 1115 r10_bio->devs[i].bio = mbio;
896 1116
897 mbio->bi_sector = r10_bio->devs[i].addr+ 1117 mbio->bi_sector = (r10_bio->devs[i].addr+
898 conf->mirrors[d].rdev->data_offset; 1118 conf->mirrors[d].rdev->data_offset);
899 mbio->bi_bdev = conf->mirrors[d].rdev->bdev; 1119 mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
900 mbio->bi_end_io = raid10_end_write_request; 1120 mbio->bi_end_io = raid10_end_write_request;
901 mbio->bi_rw = WRITE | do_sync | do_fua; 1121 mbio->bi_rw = WRITE | do_sync | do_fua;
@@ -920,6 +1140,21 @@ static int make_request(mddev_t *mddev, struct bio * bio)
920 /* In case raid10d snuck in to freeze_array */ 1140 /* In case raid10d snuck in to freeze_array */
921 wake_up(&conf->wait_barrier); 1141 wake_up(&conf->wait_barrier);
922 1142
1143 if (sectors_handled < (bio->bi_size >> 9)) {
1144 /* We need another r10_bio. It has already been counted
1145 * in bio->bi_phys_segments.
1146 */
1147 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1148
1149 r10_bio->master_bio = bio;
1150 r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
1151
1152 r10_bio->mddev = mddev;
1153 r10_bio->sector = bio->bi_sector + sectors_handled;
1154 r10_bio->state = 0;
1155 goto retry_write;
1156 }
1157
923 if (do_sync || !mddev->bitmap || !plugged) 1158 if (do_sync || !mddev->bitmap || !plugged)
924 md_wakeup_thread(mddev->thread); 1159 md_wakeup_thread(mddev->thread);
925 return 0; 1160 return 0;
@@ -949,6 +1184,30 @@ static void status(struct seq_file *seq, mddev_t *mddev)
949 seq_printf(seq, "]"); 1184 seq_printf(seq, "]");
950} 1185}
951 1186
1187/* check if there are enough drives for
1188 * every block to appear on atleast one.
1189 * Don't consider the device numbered 'ignore'
1190 * as we might be about to remove it.
1191 */
1192static int enough(conf_t *conf, int ignore)
1193{
1194 int first = 0;
1195
1196 do {
1197 int n = conf->copies;
1198 int cnt = 0;
1199 while (n--) {
1200 if (conf->mirrors[first].rdev &&
1201 first != ignore)
1202 cnt++;
1203 first = (first+1) % conf->raid_disks;
1204 }
1205 if (cnt == 0)
1206 return 0;
1207 } while (first != 0);
1208 return 1;
1209}
1210
952static void error(mddev_t *mddev, mdk_rdev_t *rdev) 1211static void error(mddev_t *mddev, mdk_rdev_t *rdev)
953{ 1212{
954 char b[BDEVNAME_SIZE]; 1213 char b[BDEVNAME_SIZE];
@@ -961,13 +1220,9 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
961 * else mark the drive as failed 1220 * else mark the drive as failed
962 */ 1221 */
963 if (test_bit(In_sync, &rdev->flags) 1222 if (test_bit(In_sync, &rdev->flags)
964 && conf->raid_disks-mddev->degraded == 1) 1223 && !enough(conf, rdev->raid_disk))
965 /* 1224 /*
966 * Don't fail the drive, just return an IO error. 1225 * Don't fail the drive, just return an IO error.
967 * The test should really be more sophisticated than
968 * "working_disks == 1", but it isn't critical, and
969 * can wait until we do more sophisticated "is the drive
970 * really dead" tests...
971 */ 1226 */
972 return; 1227 return;
973 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1228 if (test_and_clear_bit(In_sync, &rdev->flags)) {
@@ -980,6 +1235,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
980 */ 1235 */
981 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1236 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
982 } 1237 }
1238 set_bit(Blocked, &rdev->flags);
983 set_bit(Faulty, &rdev->flags); 1239 set_bit(Faulty, &rdev->flags);
984 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1240 set_bit(MD_CHANGE_DEVS, &mddev->flags);
985 printk(KERN_ALERT 1241 printk(KERN_ALERT
@@ -1022,27 +1278,6 @@ static void close_sync(conf_t *conf)
1022 conf->r10buf_pool = NULL; 1278 conf->r10buf_pool = NULL;
1023} 1279}
1024 1280
1025/* check if there are enough drives for
1026 * every block to appear on atleast one
1027 */
1028static int enough(conf_t *conf)
1029{
1030 int first = 0;
1031
1032 do {
1033 int n = conf->copies;
1034 int cnt = 0;
1035 while (n--) {
1036 if (conf->mirrors[first].rdev)
1037 cnt++;
1038 first = (first+1) % conf->raid_disks;
1039 }
1040 if (cnt == 0)
1041 return 0;
1042 } while (first != 0);
1043 return 1;
1044}
1045
1046static int raid10_spare_active(mddev_t *mddev) 1281static int raid10_spare_active(mddev_t *mddev)
1047{ 1282{
1048 int i; 1283 int i;
@@ -1078,7 +1313,6 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1078 conf_t *conf = mddev->private; 1313 conf_t *conf = mddev->private;
1079 int err = -EEXIST; 1314 int err = -EEXIST;
1080 int mirror; 1315 int mirror;
1081 mirror_info_t *p;
1082 int first = 0; 1316 int first = 0;
1083 int last = conf->raid_disks - 1; 1317 int last = conf->raid_disks - 1;
1084 1318
@@ -1087,44 +1321,47 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1087 * very different from resync 1321 * very different from resync
1088 */ 1322 */
1089 return -EBUSY; 1323 return -EBUSY;
1090 if (!enough(conf)) 1324 if (!enough(conf, -1))
1091 return -EINVAL; 1325 return -EINVAL;
1092 1326
1093 if (rdev->raid_disk >= 0) 1327 if (rdev->raid_disk >= 0)
1094 first = last = rdev->raid_disk; 1328 first = last = rdev->raid_disk;
1095 1329
1096 if (rdev->saved_raid_disk >= 0 && 1330 if (rdev->saved_raid_disk >= first &&
1097 rdev->saved_raid_disk >= first &&
1098 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) 1331 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1099 mirror = rdev->saved_raid_disk; 1332 mirror = rdev->saved_raid_disk;
1100 else 1333 else
1101 mirror = first; 1334 mirror = first;
1102 for ( ; mirror <= last ; mirror++) 1335 for ( ; mirror <= last ; mirror++) {
1103 if ( !(p=conf->mirrors+mirror)->rdev) { 1336 mirror_info_t *p = &conf->mirrors[mirror];
1104 1337 if (p->recovery_disabled == mddev->recovery_disabled)
1105 disk_stack_limits(mddev->gendisk, rdev->bdev, 1338 continue;
1106 rdev->data_offset << 9); 1339 if (!p->rdev)
1107 /* as we don't honour merge_bvec_fn, we must 1340 continue;
1108 * never risk violating it, so limit
1109 * ->max_segments to one lying with a single
1110 * page, as a one page request is never in
1111 * violation.
1112 */
1113 if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
1114 blk_queue_max_segments(mddev->queue, 1);
1115 blk_queue_segment_boundary(mddev->queue,
1116 PAGE_CACHE_SIZE - 1);
1117 }
1118 1341
1119 p->head_position = 0; 1342 disk_stack_limits(mddev->gendisk, rdev->bdev,
1120 rdev->raid_disk = mirror; 1343 rdev->data_offset << 9);
1121 err = 0; 1344 /* as we don't honour merge_bvec_fn, we must
1122 if (rdev->saved_raid_disk != mirror) 1345 * never risk violating it, so limit
1123 conf->fullsync = 1; 1346 * ->max_segments to one lying with a single
1124 rcu_assign_pointer(p->rdev, rdev); 1347 * page, as a one page request is never in
1125 break; 1348 * violation.
1349 */
1350 if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
1351 blk_queue_max_segments(mddev->queue, 1);
1352 blk_queue_segment_boundary(mddev->queue,
1353 PAGE_CACHE_SIZE - 1);
1126 } 1354 }
1127 1355
1356 p->head_position = 0;
1357 rdev->raid_disk = mirror;
1358 err = 0;
1359 if (rdev->saved_raid_disk != mirror)
1360 conf->fullsync = 1;
1361 rcu_assign_pointer(p->rdev, rdev);
1362 break;
1363 }
1364
1128 md_integrity_add_rdev(rdev, mddev); 1365 md_integrity_add_rdev(rdev, mddev);
1129 print_conf(conf); 1366 print_conf(conf);
1130 return err; 1367 return err;
@@ -1149,7 +1386,8 @@ static int raid10_remove_disk(mddev_t *mddev, int number)
1149 * is not possible. 1386 * is not possible.
1150 */ 1387 */
1151 if (!test_bit(Faulty, &rdev->flags) && 1388 if (!test_bit(Faulty, &rdev->flags) &&
1152 enough(conf)) { 1389 mddev->recovery_disabled != p->recovery_disabled &&
1390 enough(conf, -1)) {
1153 err = -EBUSY; 1391 err = -EBUSY;
1154 goto abort; 1392 goto abort;
1155 } 1393 }
@@ -1174,24 +1412,18 @@ static void end_sync_read(struct bio *bio, int error)
1174{ 1412{
1175 r10bio_t *r10_bio = bio->bi_private; 1413 r10bio_t *r10_bio = bio->bi_private;
1176 conf_t *conf = r10_bio->mddev->private; 1414 conf_t *conf = r10_bio->mddev->private;
1177 int i,d; 1415 int d;
1178 1416
1179 for (i=0; i<conf->copies; i++) 1417 d = find_bio_disk(conf, r10_bio, bio, NULL);
1180 if (r10_bio->devs[i].bio == bio)
1181 break;
1182 BUG_ON(i == conf->copies);
1183 update_head_pos(i, r10_bio);
1184 d = r10_bio->devs[i].devnum;
1185 1418
1186 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 1419 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1187 set_bit(R10BIO_Uptodate, &r10_bio->state); 1420 set_bit(R10BIO_Uptodate, &r10_bio->state);
1188 else { 1421 else
1422 /* The write handler will notice the lack of
1423 * R10BIO_Uptodate and record any errors etc
1424 */
1189 atomic_add(r10_bio->sectors, 1425 atomic_add(r10_bio->sectors,
1190 &conf->mirrors[d].rdev->corrected_errors); 1426 &conf->mirrors[d].rdev->corrected_errors);
1191 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
1192 md_error(r10_bio->mddev,
1193 conf->mirrors[d].rdev);
1194 }
1195 1427
1196 /* for reconstruct, we always reschedule after a read. 1428 /* for reconstruct, we always reschedule after a read.
1197 * for resync, only after all reads 1429 * for resync, only after all reads
@@ -1206,40 +1438,60 @@ static void end_sync_read(struct bio *bio, int error)
1206 } 1438 }
1207} 1439}
1208 1440
1209static void end_sync_write(struct bio *bio, int error) 1441static void end_sync_request(r10bio_t *r10_bio)
1210{ 1442{
1211 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1212 r10bio_t *r10_bio = bio->bi_private;
1213 mddev_t *mddev = r10_bio->mddev; 1443 mddev_t *mddev = r10_bio->mddev;
1214 conf_t *conf = mddev->private;
1215 int i,d;
1216
1217 for (i = 0; i < conf->copies; i++)
1218 if (r10_bio->devs[i].bio == bio)
1219 break;
1220 d = r10_bio->devs[i].devnum;
1221 1444
1222 if (!uptodate)
1223 md_error(mddev, conf->mirrors[d].rdev);
1224
1225 update_head_pos(i, r10_bio);
1226
1227 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1228 while (atomic_dec_and_test(&r10_bio->remaining)) { 1445 while (atomic_dec_and_test(&r10_bio->remaining)) {
1229 if (r10_bio->master_bio == NULL) { 1446 if (r10_bio->master_bio == NULL) {
1230 /* the primary of several recovery bios */ 1447 /* the primary of several recovery bios */
1231 sector_t s = r10_bio->sectors; 1448 sector_t s = r10_bio->sectors;
1232 put_buf(r10_bio); 1449 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1450 test_bit(R10BIO_WriteError, &r10_bio->state))
1451 reschedule_retry(r10_bio);
1452 else
1453 put_buf(r10_bio);
1233 md_done_sync(mddev, s, 1); 1454 md_done_sync(mddev, s, 1);
1234 break; 1455 break;
1235 } else { 1456 } else {
1236 r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio; 1457 r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
1237 put_buf(r10_bio); 1458 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1459 test_bit(R10BIO_WriteError, &r10_bio->state))
1460 reschedule_retry(r10_bio);
1461 else
1462 put_buf(r10_bio);
1238 r10_bio = r10_bio2; 1463 r10_bio = r10_bio2;
1239 } 1464 }
1240 } 1465 }
1241} 1466}
1242 1467
1468static void end_sync_write(struct bio *bio, int error)
1469{
1470 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1471 r10bio_t *r10_bio = bio->bi_private;
1472 mddev_t *mddev = r10_bio->mddev;
1473 conf_t *conf = mddev->private;
1474 int d;
1475 sector_t first_bad;
1476 int bad_sectors;
1477 int slot;
1478
1479 d = find_bio_disk(conf, r10_bio, bio, &slot);
1480
1481 if (!uptodate) {
1482 set_bit(WriteErrorSeen, &conf->mirrors[d].rdev->flags);
1483 set_bit(R10BIO_WriteError, &r10_bio->state);
1484 } else if (is_badblock(conf->mirrors[d].rdev,
1485 r10_bio->devs[slot].addr,
1486 r10_bio->sectors,
1487 &first_bad, &bad_sectors))
1488 set_bit(R10BIO_MadeGood, &r10_bio->state);
1489
1490 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1491
1492 end_sync_request(r10_bio);
1493}
1494
1243/* 1495/*
1244 * Note: sync and recover and handled very differently for raid10 1496 * Note: sync and recover and handled very differently for raid10
1245 * This code is for resync. 1497 * This code is for resync.
@@ -1299,11 +1551,12 @@ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1299 if (j == vcnt) 1551 if (j == vcnt)
1300 continue; 1552 continue;
1301 mddev->resync_mismatches += r10_bio->sectors; 1553 mddev->resync_mismatches += r10_bio->sectors;
1554 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1555 /* Don't fix anything. */
1556 continue;
1302 } 1557 }
1303 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 1558 /* Ok, we need to write this bio, either to correct an
1304 /* Don't fix anything. */ 1559 * inconsistency or to correct an unreadable block.
1305 continue;
1306 /* Ok, we need to write this bio
1307 * First we need to fixup bv_offset, bv_len and 1560 * First we need to fixup bv_offset, bv_len and
1308 * bi_vecs, as the read request might have corrupted these 1561 * bi_vecs, as the read request might have corrupted these
1309 */ 1562 */
@@ -1355,32 +1608,107 @@ done:
1355 * The second for writing. 1608 * The second for writing.
1356 * 1609 *
1357 */ 1610 */
1611static void fix_recovery_read_error(r10bio_t *r10_bio)
1612{
1613 /* We got a read error during recovery.
1614 * We repeat the read in smaller page-sized sections.
1615 * If a read succeeds, write it to the new device or record
1616 * a bad block if we cannot.
1617 * If a read fails, record a bad block on both old and
1618 * new devices.
1619 */
1620 mddev_t *mddev = r10_bio->mddev;
1621 conf_t *conf = mddev->private;
1622 struct bio *bio = r10_bio->devs[0].bio;
1623 sector_t sect = 0;
1624 int sectors = r10_bio->sectors;
1625 int idx = 0;
1626 int dr = r10_bio->devs[0].devnum;
1627 int dw = r10_bio->devs[1].devnum;
1628
1629 while (sectors) {
1630 int s = sectors;
1631 mdk_rdev_t *rdev;
1632 sector_t addr;
1633 int ok;
1634
1635 if (s > (PAGE_SIZE>>9))
1636 s = PAGE_SIZE >> 9;
1637
1638 rdev = conf->mirrors[dr].rdev;
1639 addr = r10_bio->devs[0].addr + sect,
1640 ok = sync_page_io(rdev,
1641 addr,
1642 s << 9,
1643 bio->bi_io_vec[idx].bv_page,
1644 READ, false);
1645 if (ok) {
1646 rdev = conf->mirrors[dw].rdev;
1647 addr = r10_bio->devs[1].addr + sect;
1648 ok = sync_page_io(rdev,
1649 addr,
1650 s << 9,
1651 bio->bi_io_vec[idx].bv_page,
1652 WRITE, false);
1653 if (!ok)
1654 set_bit(WriteErrorSeen, &rdev->flags);
1655 }
1656 if (!ok) {
1657 /* We don't worry if we cannot set a bad block -
1658 * it really is bad so there is no loss in not
1659 * recording it yet
1660 */
1661 rdev_set_badblocks(rdev, addr, s, 0);
1662
1663 if (rdev != conf->mirrors[dw].rdev) {
1664 /* need bad block on destination too */
1665 mdk_rdev_t *rdev2 = conf->mirrors[dw].rdev;
1666 addr = r10_bio->devs[1].addr + sect;
1667 ok = rdev_set_badblocks(rdev2, addr, s, 0);
1668 if (!ok) {
1669 /* just abort the recovery */
1670 printk(KERN_NOTICE
1671 "md/raid10:%s: recovery aborted"
1672 " due to read error\n",
1673 mdname(mddev));
1674
1675 conf->mirrors[dw].recovery_disabled
1676 = mddev->recovery_disabled;
1677 set_bit(MD_RECOVERY_INTR,
1678 &mddev->recovery);
1679 break;
1680 }
1681 }
1682 }
1683
1684 sectors -= s;
1685 sect += s;
1686 idx++;
1687 }
1688}
1358 1689
1359static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio) 1690static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1360{ 1691{
1361 conf_t *conf = mddev->private; 1692 conf_t *conf = mddev->private;
1362 int i, d; 1693 int d;
1363 struct bio *bio, *wbio; 1694 struct bio *wbio;
1364 1695
1696 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
1697 fix_recovery_read_error(r10_bio);
1698 end_sync_request(r10_bio);
1699 return;
1700 }
1365 1701
1366 /* move the pages across to the second bio 1702 /*
1703 * share the pages with the first bio
1367 * and submit the write request 1704 * and submit the write request
1368 */ 1705 */
1369 bio = r10_bio->devs[0].bio;
1370 wbio = r10_bio->devs[1].bio; 1706 wbio = r10_bio->devs[1].bio;
1371 for (i=0; i < wbio->bi_vcnt; i++) {
1372 struct page *p = bio->bi_io_vec[i].bv_page;
1373 bio->bi_io_vec[i].bv_page = wbio->bi_io_vec[i].bv_page;
1374 wbio->bi_io_vec[i].bv_page = p;
1375 }
1376 d = r10_bio->devs[1].devnum; 1707 d = r10_bio->devs[1].devnum;
1377 1708
1378 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 1709 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1379 md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9); 1710 md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
1380 if (test_bit(R10BIO_Uptodate, &r10_bio->state)) 1711 generic_make_request(wbio);
1381 generic_make_request(wbio);
1382 else
1383 bio_endio(wbio, -EIO);
1384} 1712}
1385 1713
1386 1714
@@ -1421,6 +1749,26 @@ static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
1421 atomic_set(&rdev->read_errors, read_errors >> hours_since_last); 1749 atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
1422} 1750}
1423 1751
1752static int r10_sync_page_io(mdk_rdev_t *rdev, sector_t sector,
1753 int sectors, struct page *page, int rw)
1754{
1755 sector_t first_bad;
1756 int bad_sectors;
1757
1758 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
1759 && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
1760 return -1;
1761 if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
1762 /* success */
1763 return 1;
1764 if (rw == WRITE)
1765 set_bit(WriteErrorSeen, &rdev->flags);
1766 /* need to record an error - either for the block or the device */
1767 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1768 md_error(rdev->mddev, rdev);
1769 return 0;
1770}
1771
1424/* 1772/*
1425 * This is a kernel thread which: 1773 * This is a kernel thread which:
1426 * 1774 *
@@ -1476,10 +1824,15 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
1476 1824
1477 rcu_read_lock(); 1825 rcu_read_lock();
1478 do { 1826 do {
1827 sector_t first_bad;
1828 int bad_sectors;
1829
1479 d = r10_bio->devs[sl].devnum; 1830 d = r10_bio->devs[sl].devnum;
1480 rdev = rcu_dereference(conf->mirrors[d].rdev); 1831 rdev = rcu_dereference(conf->mirrors[d].rdev);
1481 if (rdev && 1832 if (rdev &&
1482 test_bit(In_sync, &rdev->flags)) { 1833 test_bit(In_sync, &rdev->flags) &&
1834 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
1835 &first_bad, &bad_sectors) == 0) {
1483 atomic_inc(&rdev->nr_pending); 1836 atomic_inc(&rdev->nr_pending);
1484 rcu_read_unlock(); 1837 rcu_read_unlock();
1485 success = sync_page_io(rdev, 1838 success = sync_page_io(rdev,
@@ -1499,9 +1852,19 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
1499 rcu_read_unlock(); 1852 rcu_read_unlock();
1500 1853
1501 if (!success) { 1854 if (!success) {
1502 /* Cannot read from anywhere -- bye bye array */ 1855 /* Cannot read from anywhere, just mark the block
1856 * as bad on the first device to discourage future
1857 * reads.
1858 */
1503 int dn = r10_bio->devs[r10_bio->read_slot].devnum; 1859 int dn = r10_bio->devs[r10_bio->read_slot].devnum;
1504 md_error(mddev, conf->mirrors[dn].rdev); 1860 rdev = conf->mirrors[dn].rdev;
1861
1862 if (!rdev_set_badblocks(
1863 rdev,
1864 r10_bio->devs[r10_bio->read_slot].addr
1865 + sect,
1866 s, 0))
1867 md_error(mddev, rdev);
1505 break; 1868 break;
1506 } 1869 }
1507 1870
@@ -1516,80 +1879,82 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
1516 sl--; 1879 sl--;
1517 d = r10_bio->devs[sl].devnum; 1880 d = r10_bio->devs[sl].devnum;
1518 rdev = rcu_dereference(conf->mirrors[d].rdev); 1881 rdev = rcu_dereference(conf->mirrors[d].rdev);
1519 if (rdev && 1882 if (!rdev ||
1520 test_bit(In_sync, &rdev->flags)) { 1883 !test_bit(In_sync, &rdev->flags))
1521 atomic_inc(&rdev->nr_pending); 1884 continue;
1522 rcu_read_unlock(); 1885
1523 atomic_add(s, &rdev->corrected_errors); 1886 atomic_inc(&rdev->nr_pending);
1524 if (sync_page_io(rdev, 1887 rcu_read_unlock();
1525 r10_bio->devs[sl].addr + 1888 if (r10_sync_page_io(rdev,
1526 sect, 1889 r10_bio->devs[sl].addr +
1527 s<<9, conf->tmppage, WRITE, false) 1890 sect,
1528 == 0) { 1891 s<<9, conf->tmppage, WRITE)
1529 /* Well, this device is dead */ 1892 == 0) {
1530 printk(KERN_NOTICE 1893 /* Well, this device is dead */
1531 "md/raid10:%s: read correction " 1894 printk(KERN_NOTICE
1532 "write failed" 1895 "md/raid10:%s: read correction "
1533 " (%d sectors at %llu on %s)\n", 1896 "write failed"
1534 mdname(mddev), s, 1897 " (%d sectors at %llu on %s)\n",
1535 (unsigned long long)( 1898 mdname(mddev), s,
1536 sect + rdev->data_offset), 1899 (unsigned long long)(
1537 bdevname(rdev->bdev, b)); 1900 sect + rdev->data_offset),
1538 printk(KERN_NOTICE "md/raid10:%s: %s: failing " 1901 bdevname(rdev->bdev, b));
1539 "drive\n", 1902 printk(KERN_NOTICE "md/raid10:%s: %s: failing "
1540 mdname(mddev), 1903 "drive\n",
1541 bdevname(rdev->bdev, b)); 1904 mdname(mddev),
1542 md_error(mddev, rdev); 1905 bdevname(rdev->bdev, b));
1543 }
1544 rdev_dec_pending(rdev, mddev);
1545 rcu_read_lock();
1546 } 1906 }
1907 rdev_dec_pending(rdev, mddev);
1908 rcu_read_lock();
1547 } 1909 }
1548 sl = start; 1910 sl = start;
1549 while (sl != r10_bio->read_slot) { 1911 while (sl != r10_bio->read_slot) {
1912 char b[BDEVNAME_SIZE];
1550 1913
1551 if (sl==0) 1914 if (sl==0)
1552 sl = conf->copies; 1915 sl = conf->copies;
1553 sl--; 1916 sl--;
1554 d = r10_bio->devs[sl].devnum; 1917 d = r10_bio->devs[sl].devnum;
1555 rdev = rcu_dereference(conf->mirrors[d].rdev); 1918 rdev = rcu_dereference(conf->mirrors[d].rdev);
1556 if (rdev && 1919 if (!rdev ||
1557 test_bit(In_sync, &rdev->flags)) { 1920 !test_bit(In_sync, &rdev->flags))
1558 char b[BDEVNAME_SIZE]; 1921 continue;
1559 atomic_inc(&rdev->nr_pending);
1560 rcu_read_unlock();
1561 if (sync_page_io(rdev,
1562 r10_bio->devs[sl].addr +
1563 sect,
1564 s<<9, conf->tmppage,
1565 READ, false) == 0) {
1566 /* Well, this device is dead */
1567 printk(KERN_NOTICE
1568 "md/raid10:%s: unable to read back "
1569 "corrected sectors"
1570 " (%d sectors at %llu on %s)\n",
1571 mdname(mddev), s,
1572 (unsigned long long)(
1573 sect + rdev->data_offset),
1574 bdevname(rdev->bdev, b));
1575 printk(KERN_NOTICE "md/raid10:%s: %s: failing drive\n",
1576 mdname(mddev),
1577 bdevname(rdev->bdev, b));
1578
1579 md_error(mddev, rdev);
1580 } else {
1581 printk(KERN_INFO
1582 "md/raid10:%s: read error corrected"
1583 " (%d sectors at %llu on %s)\n",
1584 mdname(mddev), s,
1585 (unsigned long long)(
1586 sect + rdev->data_offset),
1587 bdevname(rdev->bdev, b));
1588 }
1589 1922
1590 rdev_dec_pending(rdev, mddev); 1923 atomic_inc(&rdev->nr_pending);
1591 rcu_read_lock(); 1924 rcu_read_unlock();
1925 switch (r10_sync_page_io(rdev,
1926 r10_bio->devs[sl].addr +
1927 sect,
1928 s<<9, conf->tmppage,
1929 READ)) {
1930 case 0:
1931 /* Well, this device is dead */
1932 printk(KERN_NOTICE
1933 "md/raid10:%s: unable to read back "
1934 "corrected sectors"
1935 " (%d sectors at %llu on %s)\n",
1936 mdname(mddev), s,
1937 (unsigned long long)(
1938 sect + rdev->data_offset),
1939 bdevname(rdev->bdev, b));
1940 printk(KERN_NOTICE "md/raid10:%s: %s: failing "
1941 "drive\n",
1942 mdname(mddev),
1943 bdevname(rdev->bdev, b));
1944 break;
1945 case 1:
1946 printk(KERN_INFO
1947 "md/raid10:%s: read error corrected"
1948 " (%d sectors at %llu on %s)\n",
1949 mdname(mddev), s,
1950 (unsigned long long)(
1951 sect + rdev->data_offset),
1952 bdevname(rdev->bdev, b));
1953 atomic_add(s, &rdev->corrected_errors);
1592 } 1954 }
1955
1956 rdev_dec_pending(rdev, mddev);
1957 rcu_read_lock();
1593 } 1958 }
1594 rcu_read_unlock(); 1959 rcu_read_unlock();
1595 1960
@@ -1598,21 +1963,254 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
1598 } 1963 }
1599} 1964}
1600 1965
1966static void bi_complete(struct bio *bio, int error)
1967{
1968 complete((struct completion *)bio->bi_private);
1969}
1970
1971static int submit_bio_wait(int rw, struct bio *bio)
1972{
1973 struct completion event;
1974 rw |= REQ_SYNC;
1975
1976 init_completion(&event);
1977 bio->bi_private = &event;
1978 bio->bi_end_io = bi_complete;
1979 submit_bio(rw, bio);
1980 wait_for_completion(&event);
1981
1982 return test_bit(BIO_UPTODATE, &bio->bi_flags);
1983}
1984
1985static int narrow_write_error(r10bio_t *r10_bio, int i)
1986{
1987 struct bio *bio = r10_bio->master_bio;
1988 mddev_t *mddev = r10_bio->mddev;
1989 conf_t *conf = mddev->private;
1990 mdk_rdev_t *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
1991 /* bio has the data to be written to slot 'i' where
1992 * we just recently had a write error.
1993 * We repeatedly clone the bio and trim down to one block,
1994 * then try the write. Where the write fails we record
1995 * a bad block.
1996 * It is conceivable that the bio doesn't exactly align with
1997 * blocks. We must handle this.
1998 *
1999 * We currently own a reference to the rdev.
2000 */
2001
2002 int block_sectors;
2003 sector_t sector;
2004 int sectors;
2005 int sect_to_write = r10_bio->sectors;
2006 int ok = 1;
2007
2008 if (rdev->badblocks.shift < 0)
2009 return 0;
2010
2011 block_sectors = 1 << rdev->badblocks.shift;
2012 sector = r10_bio->sector;
2013 sectors = ((r10_bio->sector + block_sectors)
2014 & ~(sector_t)(block_sectors - 1))
2015 - sector;
2016
2017 while (sect_to_write) {
2018 struct bio *wbio;
2019 if (sectors > sect_to_write)
2020 sectors = sect_to_write;
2021 /* Write at 'sector' for 'sectors' */
2022 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
2023 md_trim_bio(wbio, sector - bio->bi_sector, sectors);
2024 wbio->bi_sector = (r10_bio->devs[i].addr+
2025 rdev->data_offset+
2026 (sector - r10_bio->sector));
2027 wbio->bi_bdev = rdev->bdev;
2028 if (submit_bio_wait(WRITE, wbio) == 0)
2029 /* Failure! */
2030 ok = rdev_set_badblocks(rdev, sector,
2031 sectors, 0)
2032 && ok;
2033
2034 bio_put(wbio);
2035 sect_to_write -= sectors;
2036 sector += sectors;
2037 sectors = block_sectors;
2038 }
2039 return ok;
2040}
2041
2042static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio)
2043{
2044 int slot = r10_bio->read_slot;
2045 int mirror = r10_bio->devs[slot].devnum;
2046 struct bio *bio;
2047 conf_t *conf = mddev->private;
2048 mdk_rdev_t *rdev;
2049 char b[BDEVNAME_SIZE];
2050 unsigned long do_sync;
2051 int max_sectors;
2052
2053 /* we got a read error. Maybe the drive is bad. Maybe just
2054 * the block and we can fix it.
2055 * We freeze all other IO, and try reading the block from
2056 * other devices. When we find one, we re-write
2057 * and check it that fixes the read error.
2058 * This is all done synchronously while the array is
2059 * frozen.
2060 */
2061 if (mddev->ro == 0) {
2062 freeze_array(conf);
2063 fix_read_error(conf, mddev, r10_bio);
2064 unfreeze_array(conf);
2065 }
2066 rdev_dec_pending(conf->mirrors[mirror].rdev, mddev);
2067
2068 bio = r10_bio->devs[slot].bio;
2069 bdevname(bio->bi_bdev, b);
2070 r10_bio->devs[slot].bio =
2071 mddev->ro ? IO_BLOCKED : NULL;
2072read_more:
2073 mirror = read_balance(conf, r10_bio, &max_sectors);
2074 if (mirror == -1) {
2075 printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
2076 " read error for block %llu\n",
2077 mdname(mddev), b,
2078 (unsigned long long)r10_bio->sector);
2079 raid_end_bio_io(r10_bio);
2080 bio_put(bio);
2081 return;
2082 }
2083
2084 do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
2085 if (bio)
2086 bio_put(bio);
2087 slot = r10_bio->read_slot;
2088 rdev = conf->mirrors[mirror].rdev;
2089 printk_ratelimited(
2090 KERN_ERR
2091 "md/raid10:%s: %s: redirecting"
2092 "sector %llu to another mirror\n",
2093 mdname(mddev),
2094 bdevname(rdev->bdev, b),
2095 (unsigned long long)r10_bio->sector);
2096 bio = bio_clone_mddev(r10_bio->master_bio,
2097 GFP_NOIO, mddev);
2098 md_trim_bio(bio,
2099 r10_bio->sector - bio->bi_sector,
2100 max_sectors);
2101 r10_bio->devs[slot].bio = bio;
2102 bio->bi_sector = r10_bio->devs[slot].addr
2103 + rdev->data_offset;
2104 bio->bi_bdev = rdev->bdev;
2105 bio->bi_rw = READ | do_sync;
2106 bio->bi_private = r10_bio;
2107 bio->bi_end_io = raid10_end_read_request;
2108 if (max_sectors < r10_bio->sectors) {
2109 /* Drat - have to split this up more */
2110 struct bio *mbio = r10_bio->master_bio;
2111 int sectors_handled =
2112 r10_bio->sector + max_sectors
2113 - mbio->bi_sector;
2114 r10_bio->sectors = max_sectors;
2115 spin_lock_irq(&conf->device_lock);
2116 if (mbio->bi_phys_segments == 0)
2117 mbio->bi_phys_segments = 2;
2118 else
2119 mbio->bi_phys_segments++;
2120 spin_unlock_irq(&conf->device_lock);
2121 generic_make_request(bio);
2122 bio = NULL;
2123
2124 r10_bio = mempool_alloc(conf->r10bio_pool,
2125 GFP_NOIO);
2126 r10_bio->master_bio = mbio;
2127 r10_bio->sectors = (mbio->bi_size >> 9)
2128 - sectors_handled;
2129 r10_bio->state = 0;
2130 set_bit(R10BIO_ReadError,
2131 &r10_bio->state);
2132 r10_bio->mddev = mddev;
2133 r10_bio->sector = mbio->bi_sector
2134 + sectors_handled;
2135
2136 goto read_more;
2137 } else
2138 generic_make_request(bio);
2139}
2140
2141static void handle_write_completed(conf_t *conf, r10bio_t *r10_bio)
2142{
2143 /* Some sort of write request has finished and it
2144 * succeeded in writing where we thought there was a
2145 * bad block. So forget the bad block.
2146 * Or possibly if failed and we need to record
2147 * a bad block.
2148 */
2149 int m;
2150 mdk_rdev_t *rdev;
2151
2152 if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2153 test_bit(R10BIO_IsRecover, &r10_bio->state)) {
2154 for (m = 0; m < conf->copies; m++) {
2155 int dev = r10_bio->devs[m].devnum;
2156 rdev = conf->mirrors[dev].rdev;
2157 if (r10_bio->devs[m].bio == NULL)
2158 continue;
2159 if (test_bit(BIO_UPTODATE,
2160 &r10_bio->devs[m].bio->bi_flags)) {
2161 rdev_clear_badblocks(
2162 rdev,
2163 r10_bio->devs[m].addr,
2164 r10_bio->sectors);
2165 } else {
2166 if (!rdev_set_badblocks(
2167 rdev,
2168 r10_bio->devs[m].addr,
2169 r10_bio->sectors, 0))
2170 md_error(conf->mddev, rdev);
2171 }
2172 }
2173 put_buf(r10_bio);
2174 } else {
2175 for (m = 0; m < conf->copies; m++) {
2176 int dev = r10_bio->devs[m].devnum;
2177 struct bio *bio = r10_bio->devs[m].bio;
2178 rdev = conf->mirrors[dev].rdev;
2179 if (bio == IO_MADE_GOOD) {
2180 rdev_clear_badblocks(
2181 rdev,
2182 r10_bio->devs[m].addr,
2183 r10_bio->sectors);
2184 rdev_dec_pending(rdev, conf->mddev);
2185 } else if (bio != NULL &&
2186 !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
2187 if (!narrow_write_error(r10_bio, m)) {
2188 md_error(conf->mddev, rdev);
2189 set_bit(R10BIO_Degraded,
2190 &r10_bio->state);
2191 }
2192 rdev_dec_pending(rdev, conf->mddev);
2193 }
2194 }
2195 if (test_bit(R10BIO_WriteError,
2196 &r10_bio->state))
2197 close_write(r10_bio);
2198 raid_end_bio_io(r10_bio);
2199 }
2200}
2201
1601static void raid10d(mddev_t *mddev) 2202static void raid10d(mddev_t *mddev)
1602{ 2203{
1603 r10bio_t *r10_bio; 2204 r10bio_t *r10_bio;
1604 struct bio *bio;
1605 unsigned long flags; 2205 unsigned long flags;
1606 conf_t *conf = mddev->private; 2206 conf_t *conf = mddev->private;
1607 struct list_head *head = &conf->retry_list; 2207 struct list_head *head = &conf->retry_list;
1608 mdk_rdev_t *rdev;
1609 struct blk_plug plug; 2208 struct blk_plug plug;
1610 2209
1611 md_check_recovery(mddev); 2210 md_check_recovery(mddev);
1612 2211
1613 blk_start_plug(&plug); 2212 blk_start_plug(&plug);
1614 for (;;) { 2213 for (;;) {
1615 char b[BDEVNAME_SIZE];
1616 2214
1617 flush_pending_writes(conf); 2215 flush_pending_writes(conf);
1618 2216
@@ -1628,64 +2226,26 @@ static void raid10d(mddev_t *mddev)
1628 2226
1629 mddev = r10_bio->mddev; 2227 mddev = r10_bio->mddev;
1630 conf = mddev->private; 2228 conf = mddev->private;
1631 if (test_bit(R10BIO_IsSync, &r10_bio->state)) 2229 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2230 test_bit(R10BIO_WriteError, &r10_bio->state))
2231 handle_write_completed(conf, r10_bio);
2232 else if (test_bit(R10BIO_IsSync, &r10_bio->state))
1632 sync_request_write(mddev, r10_bio); 2233 sync_request_write(mddev, r10_bio);
1633 else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) 2234 else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
1634 recovery_request_write(mddev, r10_bio); 2235 recovery_request_write(mddev, r10_bio);
2236 else if (test_bit(R10BIO_ReadError, &r10_bio->state))
2237 handle_read_error(mddev, r10_bio);
1635 else { 2238 else {
1636 int slot = r10_bio->read_slot; 2239 /* just a partial read to be scheduled from a
1637 int mirror = r10_bio->devs[slot].devnum; 2240 * separate context
1638 /* we got a read error. Maybe the drive is bad. Maybe just
1639 * the block and we can fix it.
1640 * We freeze all other IO, and try reading the block from
1641 * other devices. When we find one, we re-write
1642 * and check it that fixes the read error.
1643 * This is all done synchronously while the array is
1644 * frozen.
1645 */ 2241 */
1646 if (mddev->ro == 0) { 2242 int slot = r10_bio->read_slot;
1647 freeze_array(conf); 2243 generic_make_request(r10_bio->devs[slot].bio);
1648 fix_read_error(conf, mddev, r10_bio);
1649 unfreeze_array(conf);
1650 }
1651 rdev_dec_pending(conf->mirrors[mirror].rdev, mddev);
1652
1653 bio = r10_bio->devs[slot].bio;
1654 r10_bio->devs[slot].bio =
1655 mddev->ro ? IO_BLOCKED : NULL;
1656 mirror = read_balance(conf, r10_bio);
1657 if (mirror == -1) {
1658 printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
1659 " read error for block %llu\n",
1660 mdname(mddev),
1661 bdevname(bio->bi_bdev,b),
1662 (unsigned long long)r10_bio->sector);
1663 raid_end_bio_io(r10_bio);
1664 bio_put(bio);
1665 } else {
1666 const unsigned long do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
1667 bio_put(bio);
1668 slot = r10_bio->read_slot;
1669 rdev = conf->mirrors[mirror].rdev;
1670 if (printk_ratelimit())
1671 printk(KERN_ERR "md/raid10:%s: %s: redirecting sector %llu to"
1672 " another mirror\n",
1673 mdname(mddev),
1674 bdevname(rdev->bdev,b),
1675 (unsigned long long)r10_bio->sector);
1676 bio = bio_clone_mddev(r10_bio->master_bio,
1677 GFP_NOIO, mddev);
1678 r10_bio->devs[slot].bio = bio;
1679 bio->bi_sector = r10_bio->devs[slot].addr
1680 + rdev->data_offset;
1681 bio->bi_bdev = rdev->bdev;
1682 bio->bi_rw = READ | do_sync;
1683 bio->bi_private = r10_bio;
1684 bio->bi_end_io = raid10_end_read_request;
1685 generic_make_request(bio);
1686 }
1687 } 2244 }
2245
1688 cond_resched(); 2246 cond_resched();
2247 if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
2248 md_check_recovery(mddev);
1689 } 2249 }
1690 blk_finish_plug(&plug); 2250 blk_finish_plug(&plug);
1691} 2251}
@@ -1746,7 +2306,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
1746 int i; 2306 int i;
1747 int max_sync; 2307 int max_sync;
1748 sector_t sync_blocks; 2308 sector_t sync_blocks;
1749
1750 sector_t sectors_skipped = 0; 2309 sector_t sectors_skipped = 0;
1751 int chunks_skipped = 0; 2310 int chunks_skipped = 0;
1752 2311
@@ -1828,7 +2387,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
1828 max_sync = RESYNC_PAGES << (PAGE_SHIFT-9); 2387 max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
1829 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 2388 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1830 /* recovery... the complicated one */ 2389 /* recovery... the complicated one */
1831 int j, k; 2390 int j;
1832 r10_bio = NULL; 2391 r10_bio = NULL;
1833 2392
1834 for (i=0 ; i<conf->raid_disks; i++) { 2393 for (i=0 ; i<conf->raid_disks; i++) {
@@ -1836,6 +2395,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
1836 r10bio_t *rb2; 2395 r10bio_t *rb2;
1837 sector_t sect; 2396 sector_t sect;
1838 int must_sync; 2397 int must_sync;
2398 int any_working;
1839 2399
1840 if (conf->mirrors[i].rdev == NULL || 2400 if (conf->mirrors[i].rdev == NULL ||
1841 test_bit(In_sync, &conf->mirrors[i].rdev->flags)) 2401 test_bit(In_sync, &conf->mirrors[i].rdev->flags))
@@ -1887,19 +2447,42 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
1887 must_sync = bitmap_start_sync(mddev->bitmap, sect, 2447 must_sync = bitmap_start_sync(mddev->bitmap, sect,
1888 &sync_blocks, still_degraded); 2448 &sync_blocks, still_degraded);
1889 2449
2450 any_working = 0;
1890 for (j=0; j<conf->copies;j++) { 2451 for (j=0; j<conf->copies;j++) {
2452 int k;
1891 int d = r10_bio->devs[j].devnum; 2453 int d = r10_bio->devs[j].devnum;
2454 sector_t from_addr, to_addr;
2455 mdk_rdev_t *rdev;
2456 sector_t sector, first_bad;
2457 int bad_sectors;
1892 if (!conf->mirrors[d].rdev || 2458 if (!conf->mirrors[d].rdev ||
1893 !test_bit(In_sync, &conf->mirrors[d].rdev->flags)) 2459 !test_bit(In_sync, &conf->mirrors[d].rdev->flags))
1894 continue; 2460 continue;
1895 /* This is where we read from */ 2461 /* This is where we read from */
2462 any_working = 1;
2463 rdev = conf->mirrors[d].rdev;
2464 sector = r10_bio->devs[j].addr;
2465
2466 if (is_badblock(rdev, sector, max_sync,
2467 &first_bad, &bad_sectors)) {
2468 if (first_bad > sector)
2469 max_sync = first_bad - sector;
2470 else {
2471 bad_sectors -= (sector
2472 - first_bad);
2473 if (max_sync > bad_sectors)
2474 max_sync = bad_sectors;
2475 continue;
2476 }
2477 }
1896 bio = r10_bio->devs[0].bio; 2478 bio = r10_bio->devs[0].bio;
1897 bio->bi_next = biolist; 2479 bio->bi_next = biolist;
1898 biolist = bio; 2480 biolist = bio;
1899 bio->bi_private = r10_bio; 2481 bio->bi_private = r10_bio;
1900 bio->bi_end_io = end_sync_read; 2482 bio->bi_end_io = end_sync_read;
1901 bio->bi_rw = READ; 2483 bio->bi_rw = READ;
1902 bio->bi_sector = r10_bio->devs[j].addr + 2484 from_addr = r10_bio->devs[j].addr;
2485 bio->bi_sector = from_addr +
1903 conf->mirrors[d].rdev->data_offset; 2486 conf->mirrors[d].rdev->data_offset;
1904 bio->bi_bdev = conf->mirrors[d].rdev->bdev; 2487 bio->bi_bdev = conf->mirrors[d].rdev->bdev;
1905 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2488 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
@@ -1916,26 +2499,48 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
1916 bio->bi_private = r10_bio; 2499 bio->bi_private = r10_bio;
1917 bio->bi_end_io = end_sync_write; 2500 bio->bi_end_io = end_sync_write;
1918 bio->bi_rw = WRITE; 2501 bio->bi_rw = WRITE;
1919 bio->bi_sector = r10_bio->devs[k].addr + 2502 to_addr = r10_bio->devs[k].addr;
2503 bio->bi_sector = to_addr +
1920 conf->mirrors[i].rdev->data_offset; 2504 conf->mirrors[i].rdev->data_offset;
1921 bio->bi_bdev = conf->mirrors[i].rdev->bdev; 2505 bio->bi_bdev = conf->mirrors[i].rdev->bdev;
1922 2506
1923 r10_bio->devs[0].devnum = d; 2507 r10_bio->devs[0].devnum = d;
2508 r10_bio->devs[0].addr = from_addr;
1924 r10_bio->devs[1].devnum = i; 2509 r10_bio->devs[1].devnum = i;
2510 r10_bio->devs[1].addr = to_addr;
1925 2511
1926 break; 2512 break;
1927 } 2513 }
1928 if (j == conf->copies) { 2514 if (j == conf->copies) {
1929 /* Cannot recover, so abort the recovery */ 2515 /* Cannot recover, so abort the recovery or
2516 * record a bad block */
1930 put_buf(r10_bio); 2517 put_buf(r10_bio);
1931 if (rb2) 2518 if (rb2)
1932 atomic_dec(&rb2->remaining); 2519 atomic_dec(&rb2->remaining);
1933 r10_bio = rb2; 2520 r10_bio = rb2;
1934 if (!test_and_set_bit(MD_RECOVERY_INTR, 2521 if (any_working) {
1935 &mddev->recovery)) 2522 /* problem is that there are bad blocks
1936 printk(KERN_INFO "md/raid10:%s: insufficient " 2523 * on other device(s)
1937 "working devices for recovery.\n", 2524 */
1938 mdname(mddev)); 2525 int k;
2526 for (k = 0; k < conf->copies; k++)
2527 if (r10_bio->devs[k].devnum == i)
2528 break;
2529 if (!rdev_set_badblocks(
2530 conf->mirrors[i].rdev,
2531 r10_bio->devs[k].addr,
2532 max_sync, 0))
2533 any_working = 0;
2534 }
2535 if (!any_working) {
2536 if (!test_and_set_bit(MD_RECOVERY_INTR,
2537 &mddev->recovery))
2538 printk(KERN_INFO "md/raid10:%s: insufficient "
2539 "working devices for recovery.\n",
2540 mdname(mddev));
2541 conf->mirrors[i].recovery_disabled
2542 = mddev->recovery_disabled;
2543 }
1939 break; 2544 break;
1940 } 2545 }
1941 } 2546 }
@@ -1979,12 +2584,28 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
1979 2584
1980 for (i=0; i<conf->copies; i++) { 2585 for (i=0; i<conf->copies; i++) {
1981 int d = r10_bio->devs[i].devnum; 2586 int d = r10_bio->devs[i].devnum;
2587 sector_t first_bad, sector;
2588 int bad_sectors;
2589
1982 bio = r10_bio->devs[i].bio; 2590 bio = r10_bio->devs[i].bio;
1983 bio->bi_end_io = NULL; 2591 bio->bi_end_io = NULL;
1984 clear_bit(BIO_UPTODATE, &bio->bi_flags); 2592 clear_bit(BIO_UPTODATE, &bio->bi_flags);
1985 if (conf->mirrors[d].rdev == NULL || 2593 if (conf->mirrors[d].rdev == NULL ||
1986 test_bit(Faulty, &conf->mirrors[d].rdev->flags)) 2594 test_bit(Faulty, &conf->mirrors[d].rdev->flags))
1987 continue; 2595 continue;
2596 sector = r10_bio->devs[i].addr;
2597 if (is_badblock(conf->mirrors[d].rdev,
2598 sector, max_sync,
2599 &first_bad, &bad_sectors)) {
2600 if (first_bad > sector)
2601 max_sync = first_bad - sector;
2602 else {
2603 bad_sectors -= (sector - first_bad);
2604 if (max_sync > bad_sectors)
2605 max_sync = max_sync;
2606 continue;
2607 }
2608 }
1988 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2609 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1989 atomic_inc(&r10_bio->remaining); 2610 atomic_inc(&r10_bio->remaining);
1990 bio->bi_next = biolist; 2611 bio->bi_next = biolist;
@@ -1992,7 +2613,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
1992 bio->bi_private = r10_bio; 2613 bio->bi_private = r10_bio;
1993 bio->bi_end_io = end_sync_read; 2614 bio->bi_end_io = end_sync_read;
1994 bio->bi_rw = READ; 2615 bio->bi_rw = READ;
1995 bio->bi_sector = r10_bio->devs[i].addr + 2616 bio->bi_sector = sector +
1996 conf->mirrors[d].rdev->data_offset; 2617 conf->mirrors[d].rdev->data_offset;
1997 bio->bi_bdev = conf->mirrors[d].rdev->bdev; 2618 bio->bi_bdev = conf->mirrors[d].rdev->bdev;
1998 count++; 2619 count++;
@@ -2079,7 +2700,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
2079 return sectors_skipped + nr_sectors; 2700 return sectors_skipped + nr_sectors;
2080 giveup: 2701 giveup:
2081 /* There is nowhere to write, so all non-sync 2702 /* There is nowhere to write, so all non-sync
2082 * drives must be failed, so try the next chunk... 2703 * drives must be failed or in resync, all drives
2704 * have a bad block, so try the next chunk...
2083 */ 2705 */
2084 if (sector_nr + max_sync < max_sector) 2706 if (sector_nr + max_sync < max_sector)
2085 max_sector = sector_nr + max_sync; 2707 max_sector = sector_nr + max_sync;
@@ -2249,6 +2871,7 @@ static int run(mddev_t *mddev)
2249 (conf->raid_disks / conf->near_copies)); 2871 (conf->raid_disks / conf->near_copies));
2250 2872
2251 list_for_each_entry(rdev, &mddev->disks, same_set) { 2873 list_for_each_entry(rdev, &mddev->disks, same_set) {
2874
2252 disk_idx = rdev->raid_disk; 2875 disk_idx = rdev->raid_disk;
2253 if (disk_idx >= conf->raid_disks 2876 if (disk_idx >= conf->raid_disks
2254 || disk_idx < 0) 2877 || disk_idx < 0)
@@ -2271,7 +2894,7 @@ static int run(mddev_t *mddev)
2271 disk->head_position = 0; 2894 disk->head_position = 0;
2272 } 2895 }
2273 /* need to check that every block has at least one working mirror */ 2896 /* need to check that every block has at least one working mirror */
2274 if (!enough(conf)) { 2897 if (!enough(conf, -1)) {
2275 printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n", 2898 printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
2276 mdname(mddev)); 2899 mdname(mddev));
2277 goto out_free_conf; 2900 goto out_free_conf;
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 944b1104d3b4..79cb52a0d4a2 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -6,6 +6,11 @@ typedef struct mirror_info mirror_info_t;
6struct mirror_info { 6struct mirror_info {
7 mdk_rdev_t *rdev; 7 mdk_rdev_t *rdev;
8 sector_t head_position; 8 sector_t head_position;
9 int recovery_disabled; /* matches
10 * mddev->recovery_disabled
11 * when we shouldn't try
12 * recovering this device.
13 */
9}; 14};
10 15
11typedef struct r10bio_s r10bio_t; 16typedef struct r10bio_s r10bio_t;
@@ -113,10 +118,26 @@ struct r10bio_s {
113 * level, we store IO_BLOCKED in the appropriate 'bios' pointer 118 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
114 */ 119 */
115#define IO_BLOCKED ((struct bio*)1) 120#define IO_BLOCKED ((struct bio*)1)
121/* When we successfully write to a known bad-block, we need to remove the
122 * bad-block marking which must be done from process context. So we record
123 * the success by setting devs[n].bio to IO_MADE_GOOD
124 */
125#define IO_MADE_GOOD ((struct bio *)2)
126
127#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
116 128
117/* bits for r10bio.state */ 129/* bits for r10bio.state */
118#define R10BIO_Uptodate 0 130#define R10BIO_Uptodate 0
119#define R10BIO_IsSync 1 131#define R10BIO_IsSync 1
120#define R10BIO_IsRecover 2 132#define R10BIO_IsRecover 2
121#define R10BIO_Degraded 3 133#define R10BIO_Degraded 3
134/* Set ReadError on bios that experience a read error
135 * so that raid10d knows what to do with them.
136 */
137#define R10BIO_ReadError 4
138/* If a write for this request means we can clear some
139 * known-bad-block records, we set this flag.
140 */
141#define R10BIO_MadeGood 5
142#define R10BIO_WriteError 6
122#endif 143#endif
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index b72edf35ec54..dbae459fb02d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -51,6 +51,7 @@
51#include <linux/seq_file.h> 51#include <linux/seq_file.h>
52#include <linux/cpu.h> 52#include <linux/cpu.h>
53#include <linux/slab.h> 53#include <linux/slab.h>
54#include <linux/ratelimit.h>
54#include "md.h" 55#include "md.h"
55#include "raid5.h" 56#include "raid5.h"
56#include "raid0.h" 57#include "raid0.h"
@@ -96,8 +97,6 @@
96#define __inline__ 97#define __inline__
97#endif 98#endif
98 99
99#define printk_rl(args...) ((void) (printk_ratelimit() && printk(args)))
100
101/* 100/*
102 * We maintain a biased count of active stripes in the bottom 16 bits of 101 * We maintain a biased count of active stripes in the bottom 16 bits of
103 * bi_phys_segments, and a count of processed stripes in the upper 16 bits 102 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
@@ -341,7 +340,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
341 (unsigned long long)sh->sector, i, dev->toread, 340 (unsigned long long)sh->sector, i, dev->toread,
342 dev->read, dev->towrite, dev->written, 341 dev->read, dev->towrite, dev->written,
343 test_bit(R5_LOCKED, &dev->flags)); 342 test_bit(R5_LOCKED, &dev->flags));
344 BUG(); 343 WARN_ON(1);
345 } 344 }
346 dev->flags = 0; 345 dev->flags = 0;
347 raid5_build_block(sh, i, previous); 346 raid5_build_block(sh, i, previous);
@@ -527,6 +526,36 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
527 atomic_inc(&rdev->nr_pending); 526 atomic_inc(&rdev->nr_pending);
528 rcu_read_unlock(); 527 rcu_read_unlock();
529 528
529 /* We have already checked bad blocks for reads. Now
530 * need to check for writes.
531 */
532 while ((rw & WRITE) && rdev &&
533 test_bit(WriteErrorSeen, &rdev->flags)) {
534 sector_t first_bad;
535 int bad_sectors;
536 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
537 &first_bad, &bad_sectors);
538 if (!bad)
539 break;
540
541 if (bad < 0) {
542 set_bit(BlockedBadBlocks, &rdev->flags);
543 if (!conf->mddev->external &&
544 conf->mddev->flags) {
545 /* It is very unlikely, but we might
546 * still need to write out the
547 * bad block log - better give it
548 * a chance*/
549 md_check_recovery(conf->mddev);
550 }
551 md_wait_for_blocked_rdev(rdev, conf->mddev);
552 } else {
553 /* Acknowledged bad block - skip the write */
554 rdev_dec_pending(rdev, conf->mddev);
555 rdev = NULL;
556 }
557 }
558
530 if (rdev) { 559 if (rdev) {
531 if (s->syncing || s->expanding || s->expanded) 560 if (s->syncing || s->expanding || s->expanded)
532 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 561 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
@@ -548,10 +577,6 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
548 bi->bi_io_vec[0].bv_offset = 0; 577 bi->bi_io_vec[0].bv_offset = 0;
549 bi->bi_size = STRIPE_SIZE; 578 bi->bi_size = STRIPE_SIZE;
550 bi->bi_next = NULL; 579 bi->bi_next = NULL;
551 if ((rw & WRITE) &&
552 test_bit(R5_ReWrite, &sh->dev[i].flags))
553 atomic_add(STRIPE_SECTORS,
554 &rdev->corrected_errors);
555 generic_make_request(bi); 580 generic_make_request(bi);
556 } else { 581 } else {
557 if (rw & WRITE) 582 if (rw & WRITE)
@@ -1020,12 +1045,12 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
1020 if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) { 1045 if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
1021 struct bio *wbi; 1046 struct bio *wbi;
1022 1047
1023 spin_lock(&sh->lock); 1048 spin_lock_irq(&sh->raid_conf->device_lock);
1024 chosen = dev->towrite; 1049 chosen = dev->towrite;
1025 dev->towrite = NULL; 1050 dev->towrite = NULL;
1026 BUG_ON(dev->written); 1051 BUG_ON(dev->written);
1027 wbi = dev->written = chosen; 1052 wbi = dev->written = chosen;
1028 spin_unlock(&sh->lock); 1053 spin_unlock_irq(&sh->raid_conf->device_lock);
1029 1054
1030 while (wbi && wbi->bi_sector < 1055 while (wbi && wbi->bi_sector <
1031 dev->sector + STRIPE_SECTORS) { 1056 dev->sector + STRIPE_SECTORS) {
@@ -1315,12 +1340,11 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1315static int grow_one_stripe(raid5_conf_t *conf) 1340static int grow_one_stripe(raid5_conf_t *conf)
1316{ 1341{
1317 struct stripe_head *sh; 1342 struct stripe_head *sh;
1318 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); 1343 sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL);
1319 if (!sh) 1344 if (!sh)
1320 return 0; 1345 return 0;
1321 memset(sh, 0, sizeof(*sh) + (conf->pool_size-1)*sizeof(struct r5dev)); 1346
1322 sh->raid_conf = conf; 1347 sh->raid_conf = conf;
1323 spin_lock_init(&sh->lock);
1324 #ifdef CONFIG_MULTICORE_RAID456 1348 #ifdef CONFIG_MULTICORE_RAID456
1325 init_waitqueue_head(&sh->ops.wait_for_ops); 1349 init_waitqueue_head(&sh->ops.wait_for_ops);
1326 #endif 1350 #endif
@@ -1435,14 +1459,11 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
1435 return -ENOMEM; 1459 return -ENOMEM;
1436 1460
1437 for (i = conf->max_nr_stripes; i; i--) { 1461 for (i = conf->max_nr_stripes; i; i--) {
1438 nsh = kmem_cache_alloc(sc, GFP_KERNEL); 1462 nsh = kmem_cache_zalloc(sc, GFP_KERNEL);
1439 if (!nsh) 1463 if (!nsh)
1440 break; 1464 break;
1441 1465
1442 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev));
1443
1444 nsh->raid_conf = conf; 1466 nsh->raid_conf = conf;
1445 spin_lock_init(&nsh->lock);
1446 #ifdef CONFIG_MULTICORE_RAID456 1467 #ifdef CONFIG_MULTICORE_RAID456
1447 init_waitqueue_head(&nsh->ops.wait_for_ops); 1468 init_waitqueue_head(&nsh->ops.wait_for_ops);
1448 #endif 1469 #endif
@@ -1587,12 +1608,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
1587 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1608 set_bit(R5_UPTODATE, &sh->dev[i].flags);
1588 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1609 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1589 rdev = conf->disks[i].rdev; 1610 rdev = conf->disks[i].rdev;
1590 printk_rl(KERN_INFO "md/raid:%s: read error corrected" 1611 printk_ratelimited(
1591 " (%lu sectors at %llu on %s)\n", 1612 KERN_INFO
1592 mdname(conf->mddev), STRIPE_SECTORS, 1613 "md/raid:%s: read error corrected"
1593 (unsigned long long)(sh->sector 1614 " (%lu sectors at %llu on %s)\n",
1594 + rdev->data_offset), 1615 mdname(conf->mddev), STRIPE_SECTORS,
1595 bdevname(rdev->bdev, b)); 1616 (unsigned long long)(sh->sector
1617 + rdev->data_offset),
1618 bdevname(rdev->bdev, b));
1619 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
1596 clear_bit(R5_ReadError, &sh->dev[i].flags); 1620 clear_bit(R5_ReadError, &sh->dev[i].flags);
1597 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1621 clear_bit(R5_ReWrite, &sh->dev[i].flags);
1598 } 1622 }
@@ -1606,22 +1630,24 @@ static void raid5_end_read_request(struct bio * bi, int error)
1606 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 1630 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
1607 atomic_inc(&rdev->read_errors); 1631 atomic_inc(&rdev->read_errors);
1608 if (conf->mddev->degraded >= conf->max_degraded) 1632 if (conf->mddev->degraded >= conf->max_degraded)
1609 printk_rl(KERN_WARNING 1633 printk_ratelimited(
1610 "md/raid:%s: read error not correctable " 1634 KERN_WARNING
1611 "(sector %llu on %s).\n", 1635 "md/raid:%s: read error not correctable "
1612 mdname(conf->mddev), 1636 "(sector %llu on %s).\n",
1613 (unsigned long long)(sh->sector 1637 mdname(conf->mddev),
1614 + rdev->data_offset), 1638 (unsigned long long)(sh->sector
1615 bdn); 1639 + rdev->data_offset),
1640 bdn);
1616 else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) 1641 else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
1617 /* Oh, no!!! */ 1642 /* Oh, no!!! */
1618 printk_rl(KERN_WARNING 1643 printk_ratelimited(
1619 "md/raid:%s: read error NOT corrected!! " 1644 KERN_WARNING
1620 "(sector %llu on %s).\n", 1645 "md/raid:%s: read error NOT corrected!! "
1621 mdname(conf->mddev), 1646 "(sector %llu on %s).\n",
1622 (unsigned long long)(sh->sector 1647 mdname(conf->mddev),
1623 + rdev->data_offset), 1648 (unsigned long long)(sh->sector
1624 bdn); 1649 + rdev->data_offset),
1650 bdn);
1625 else if (atomic_read(&rdev->read_errors) 1651 else if (atomic_read(&rdev->read_errors)
1626 > conf->max_nr_stripes) 1652 > conf->max_nr_stripes)
1627 printk(KERN_WARNING 1653 printk(KERN_WARNING
@@ -1649,6 +1675,8 @@ static void raid5_end_write_request(struct bio *bi, int error)
1649 raid5_conf_t *conf = sh->raid_conf; 1675 raid5_conf_t *conf = sh->raid_conf;
1650 int disks = sh->disks, i; 1676 int disks = sh->disks, i;
1651 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1677 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1678 sector_t first_bad;
1679 int bad_sectors;
1652 1680
1653 for (i=0 ; i<disks; i++) 1681 for (i=0 ; i<disks; i++)
1654 if (bi == &sh->dev[i].req) 1682 if (bi == &sh->dev[i].req)
@@ -1662,8 +1690,12 @@ static void raid5_end_write_request(struct bio *bi, int error)
1662 return; 1690 return;
1663 } 1691 }
1664 1692
1665 if (!uptodate) 1693 if (!uptodate) {
1666 md_error(conf->mddev, conf->disks[i].rdev); 1694 set_bit(WriteErrorSeen, &conf->disks[i].rdev->flags);
1695 set_bit(R5_WriteError, &sh->dev[i].flags);
1696 } else if (is_badblock(conf->disks[i].rdev, sh->sector, STRIPE_SECTORS,
1697 &first_bad, &bad_sectors))
1698 set_bit(R5_MadeGood, &sh->dev[i].flags);
1667 1699
1668 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 1700 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1669 1701
@@ -1710,6 +1742,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1710 */ 1742 */
1711 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1743 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1712 } 1744 }
1745 set_bit(Blocked, &rdev->flags);
1713 set_bit(Faulty, &rdev->flags); 1746 set_bit(Faulty, &rdev->flags);
1714 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1747 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1715 printk(KERN_ALERT 1748 printk(KERN_ALERT
@@ -1760,7 +1793,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1760 /* 1793 /*
1761 * Select the parity disk based on the user selected algorithm. 1794 * Select the parity disk based on the user selected algorithm.
1762 */ 1795 */
1763 pd_idx = qd_idx = ~0; 1796 pd_idx = qd_idx = -1;
1764 switch(conf->level) { 1797 switch(conf->level) {
1765 case 4: 1798 case 4:
1766 pd_idx = data_disks; 1799 pd_idx = data_disks;
@@ -2143,12 +2176,11 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2143 raid5_conf_t *conf = sh->raid_conf; 2176 raid5_conf_t *conf = sh->raid_conf;
2144 int firstwrite=0; 2177 int firstwrite=0;
2145 2178
2146 pr_debug("adding bh b#%llu to stripe s#%llu\n", 2179 pr_debug("adding bi b#%llu to stripe s#%llu\n",
2147 (unsigned long long)bi->bi_sector, 2180 (unsigned long long)bi->bi_sector,
2148 (unsigned long long)sh->sector); 2181 (unsigned long long)sh->sector);
2149 2182
2150 2183
2151 spin_lock(&sh->lock);
2152 spin_lock_irq(&conf->device_lock); 2184 spin_lock_irq(&conf->device_lock);
2153 if (forwrite) { 2185 if (forwrite) {
2154 bip = &sh->dev[dd_idx].towrite; 2186 bip = &sh->dev[dd_idx].towrite;
@@ -2169,19 +2201,6 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2169 bi->bi_next = *bip; 2201 bi->bi_next = *bip;
2170 *bip = bi; 2202 *bip = bi;
2171 bi->bi_phys_segments++; 2203 bi->bi_phys_segments++;
2172 spin_unlock_irq(&conf->device_lock);
2173 spin_unlock(&sh->lock);
2174
2175 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2176 (unsigned long long)bi->bi_sector,
2177 (unsigned long long)sh->sector, dd_idx);
2178
2179 if (conf->mddev->bitmap && firstwrite) {
2180 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
2181 STRIPE_SECTORS, 0);
2182 sh->bm_seq = conf->seq_flush+1;
2183 set_bit(STRIPE_BIT_DELAY, &sh->state);
2184 }
2185 2204
2186 if (forwrite) { 2205 if (forwrite) {
2187 /* check if page is covered */ 2206 /* check if page is covered */
@@ -2196,12 +2215,23 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2196 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 2215 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
2197 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); 2216 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
2198 } 2217 }
2218 spin_unlock_irq(&conf->device_lock);
2219
2220 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2221 (unsigned long long)(*bip)->bi_sector,
2222 (unsigned long long)sh->sector, dd_idx);
2223
2224 if (conf->mddev->bitmap && firstwrite) {
2225 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
2226 STRIPE_SECTORS, 0);
2227 sh->bm_seq = conf->seq_flush+1;
2228 set_bit(STRIPE_BIT_DELAY, &sh->state);
2229 }
2199 return 1; 2230 return 1;
2200 2231
2201 overlap: 2232 overlap:
2202 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 2233 set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
2203 spin_unlock_irq(&conf->device_lock); 2234 spin_unlock_irq(&conf->device_lock);
2204 spin_unlock(&sh->lock);
2205 return 0; 2235 return 0;
2206} 2236}
2207 2237
@@ -2238,9 +2268,18 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
2238 rcu_read_lock(); 2268 rcu_read_lock();
2239 rdev = rcu_dereference(conf->disks[i].rdev); 2269 rdev = rcu_dereference(conf->disks[i].rdev);
2240 if (rdev && test_bit(In_sync, &rdev->flags)) 2270 if (rdev && test_bit(In_sync, &rdev->flags))
2241 /* multiple read failures in one stripe */ 2271 atomic_inc(&rdev->nr_pending);
2242 md_error(conf->mddev, rdev); 2272 else
2273 rdev = NULL;
2243 rcu_read_unlock(); 2274 rcu_read_unlock();
2275 if (rdev) {
2276 if (!rdev_set_badblocks(
2277 rdev,
2278 sh->sector,
2279 STRIPE_SECTORS, 0))
2280 md_error(conf->mddev, rdev);
2281 rdev_dec_pending(rdev, conf->mddev);
2282 }
2244 } 2283 }
2245 spin_lock_irq(&conf->device_lock); 2284 spin_lock_irq(&conf->device_lock);
2246 /* fail all writes first */ 2285 /* fail all writes first */
@@ -2308,6 +2347,10 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
2308 if (bitmap_end) 2347 if (bitmap_end)
2309 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 2348 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2310 STRIPE_SECTORS, 0, 0); 2349 STRIPE_SECTORS, 0, 0);
2350 /* If we were in the middle of a write the parity block might
2351 * still be locked - so just clear all R5_LOCKED flags
2352 */
2353 clear_bit(R5_LOCKED, &sh->dev[i].flags);
2311 } 2354 }
2312 2355
2313 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 2356 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
@@ -2315,109 +2358,73 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
2315 md_wakeup_thread(conf->mddev->thread); 2358 md_wakeup_thread(conf->mddev->thread);
2316} 2359}
2317 2360
2318/* fetch_block5 - checks the given member device to see if its data needs 2361static void
2319 * to be read or computed to satisfy a request. 2362handle_failed_sync(raid5_conf_t *conf, struct stripe_head *sh,
2320 * 2363 struct stripe_head_state *s)
2321 * Returns 1 when no more member devices need to be checked, otherwise returns
2322 * 0 to tell the loop in handle_stripe_fill5 to continue
2323 */
2324static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s,
2325 int disk_idx, int disks)
2326{
2327 struct r5dev *dev = &sh->dev[disk_idx];
2328 struct r5dev *failed_dev = &sh->dev[s->failed_num];
2329
2330 /* is the data in this block needed, and can we get it? */
2331 if (!test_bit(R5_LOCKED, &dev->flags) &&
2332 !test_bit(R5_UPTODATE, &dev->flags) &&
2333 (dev->toread ||
2334 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
2335 s->syncing || s->expanding ||
2336 (s->failed &&
2337 (failed_dev->toread ||
2338 (failed_dev->towrite &&
2339 !test_bit(R5_OVERWRITE, &failed_dev->flags)))))) {
2340 /* We would like to get this block, possibly by computing it,
2341 * otherwise read it if the backing disk is insync
2342 */
2343 if ((s->uptodate == disks - 1) &&
2344 (s->failed && disk_idx == s->failed_num)) {
2345 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2346 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2347 set_bit(R5_Wantcompute, &dev->flags);
2348 sh->ops.target = disk_idx;
2349 sh->ops.target2 = -1;
2350 s->req_compute = 1;
2351 /* Careful: from this point on 'uptodate' is in the eye
2352 * of raid_run_ops which services 'compute' operations
2353 * before writes. R5_Wantcompute flags a block that will
2354 * be R5_UPTODATE by the time it is needed for a
2355 * subsequent operation.
2356 */
2357 s->uptodate++;
2358 return 1; /* uptodate + compute == disks */
2359 } else if (test_bit(R5_Insync, &dev->flags)) {
2360 set_bit(R5_LOCKED, &dev->flags);
2361 set_bit(R5_Wantread, &dev->flags);
2362 s->locked++;
2363 pr_debug("Reading block %d (sync=%d)\n", disk_idx,
2364 s->syncing);
2365 }
2366 }
2367
2368 return 0;
2369}
2370
2371/**
2372 * handle_stripe_fill5 - read or compute data to satisfy pending requests.
2373 */
2374static void handle_stripe_fill5(struct stripe_head *sh,
2375 struct stripe_head_state *s, int disks)
2376{ 2364{
2365 int abort = 0;
2377 int i; 2366 int i;
2378 2367
2379 /* look for blocks to read/compute, skip this if a compute 2368 md_done_sync(conf->mddev, STRIPE_SECTORS, 0);
2380 * is already in flight, or if the stripe contents are in the 2369 clear_bit(STRIPE_SYNCING, &sh->state);
2381 * midst of changing due to a write 2370 s->syncing = 0;
2371 /* There is nothing more to do for sync/check/repair.
2372 * For recover we need to record a bad block on all
2373 * non-sync devices, or abort the recovery
2382 */ 2374 */
2383 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && 2375 if (!test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery))
2384 !sh->reconstruct_state) 2376 return;
2385 for (i = disks; i--; ) 2377 /* During recovery devices cannot be removed, so locking and
2386 if (fetch_block5(sh, s, i, disks)) 2378 * refcounting of rdevs is not needed
2387 break; 2379 */
2388 set_bit(STRIPE_HANDLE, &sh->state); 2380 for (i = 0; i < conf->raid_disks; i++) {
2381 mdk_rdev_t *rdev = conf->disks[i].rdev;
2382 if (!rdev
2383 || test_bit(Faulty, &rdev->flags)
2384 || test_bit(In_sync, &rdev->flags))
2385 continue;
2386 if (!rdev_set_badblocks(rdev, sh->sector,
2387 STRIPE_SECTORS, 0))
2388 abort = 1;
2389 }
2390 if (abort) {
2391 conf->recovery_disabled = conf->mddev->recovery_disabled;
2392 set_bit(MD_RECOVERY_INTR, &conf->mddev->recovery);
2393 }
2389} 2394}
2390 2395
2391/* fetch_block6 - checks the given member device to see if its data needs 2396/* fetch_block - checks the given member device to see if its data needs
2392 * to be read or computed to satisfy a request. 2397 * to be read or computed to satisfy a request.
2393 * 2398 *
2394 * Returns 1 when no more member devices need to be checked, otherwise returns 2399 * Returns 1 when no more member devices need to be checked, otherwise returns
2395 * 0 to tell the loop in handle_stripe_fill6 to continue 2400 * 0 to tell the loop in handle_stripe_fill to continue
2396 */ 2401 */
2397static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s, 2402static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
2398 struct r6_state *r6s, int disk_idx, int disks) 2403 int disk_idx, int disks)
2399{ 2404{
2400 struct r5dev *dev = &sh->dev[disk_idx]; 2405 struct r5dev *dev = &sh->dev[disk_idx];
2401 struct r5dev *fdev[2] = { &sh->dev[r6s->failed_num[0]], 2406 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
2402 &sh->dev[r6s->failed_num[1]] }; 2407 &sh->dev[s->failed_num[1]] };
2403 2408
2409 /* is the data in this block needed, and can we get it? */
2404 if (!test_bit(R5_LOCKED, &dev->flags) && 2410 if (!test_bit(R5_LOCKED, &dev->flags) &&
2405 !test_bit(R5_UPTODATE, &dev->flags) && 2411 !test_bit(R5_UPTODATE, &dev->flags) &&
2406 (dev->toread || 2412 (dev->toread ||
2407 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 2413 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
2408 s->syncing || s->expanding || 2414 s->syncing || s->expanding ||
2409 (s->failed >= 1 && 2415 (s->failed >= 1 && fdev[0]->toread) ||
2410 (fdev[0]->toread || s->to_write)) || 2416 (s->failed >= 2 && fdev[1]->toread) ||
2411 (s->failed >= 2 && 2417 (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
2412 (fdev[1]->toread || s->to_write)))) { 2418 !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
2419 (sh->raid_conf->level == 6 && s->failed && s->to_write))) {
2413 /* we would like to get this block, possibly by computing it, 2420 /* we would like to get this block, possibly by computing it,
2414 * otherwise read it if the backing disk is insync 2421 * otherwise read it if the backing disk is insync
2415 */ 2422 */
2416 BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); 2423 BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
2417 BUG_ON(test_bit(R5_Wantread, &dev->flags)); 2424 BUG_ON(test_bit(R5_Wantread, &dev->flags));
2418 if ((s->uptodate == disks - 1) && 2425 if ((s->uptodate == disks - 1) &&
2419 (s->failed && (disk_idx == r6s->failed_num[0] || 2426 (s->failed && (disk_idx == s->failed_num[0] ||
2420 disk_idx == r6s->failed_num[1]))) { 2427 disk_idx == s->failed_num[1]))) {
2421 /* have disk failed, and we're requested to fetch it; 2428 /* have disk failed, and we're requested to fetch it;
2422 * do compute it 2429 * do compute it
2423 */ 2430 */
@@ -2429,6 +2436,12 @@ static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s,
2429 sh->ops.target = disk_idx; 2436 sh->ops.target = disk_idx;
2430 sh->ops.target2 = -1; /* no 2nd target */ 2437 sh->ops.target2 = -1; /* no 2nd target */
2431 s->req_compute = 1; 2438 s->req_compute = 1;
2439 /* Careful: from this point on 'uptodate' is in the eye
2440 * of raid_run_ops which services 'compute' operations
2441 * before writes. R5_Wantcompute flags a block that will
2442 * be R5_UPTODATE by the time it is needed for a
2443 * subsequent operation.
2444 */
2432 s->uptodate++; 2445 s->uptodate++;
2433 return 1; 2446 return 1;
2434 } else if (s->uptodate == disks-2 && s->failed >= 2) { 2447 } else if (s->uptodate == disks-2 && s->failed >= 2) {
@@ -2469,11 +2482,11 @@ static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s,
2469} 2482}
2470 2483
2471/** 2484/**
2472 * handle_stripe_fill6 - read or compute data to satisfy pending requests. 2485 * handle_stripe_fill - read or compute data to satisfy pending requests.
2473 */ 2486 */
2474static void handle_stripe_fill6(struct stripe_head *sh, 2487static void handle_stripe_fill(struct stripe_head *sh,
2475 struct stripe_head_state *s, struct r6_state *r6s, 2488 struct stripe_head_state *s,
2476 int disks) 2489 int disks)
2477{ 2490{
2478 int i; 2491 int i;
2479 2492
@@ -2484,7 +2497,7 @@ static void handle_stripe_fill6(struct stripe_head *sh,
2484 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && 2497 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
2485 !sh->reconstruct_state) 2498 !sh->reconstruct_state)
2486 for (i = disks; i--; ) 2499 for (i = disks; i--; )
2487 if (fetch_block6(sh, s, r6s, i, disks)) 2500 if (fetch_block(sh, s, i, disks))
2488 break; 2501 break;
2489 set_bit(STRIPE_HANDLE, &sh->state); 2502 set_bit(STRIPE_HANDLE, &sh->state);
2490} 2503}
@@ -2540,11 +2553,19 @@ static void handle_stripe_clean_event(raid5_conf_t *conf,
2540 md_wakeup_thread(conf->mddev->thread); 2553 md_wakeup_thread(conf->mddev->thread);
2541} 2554}
2542 2555
2543static void handle_stripe_dirtying5(raid5_conf_t *conf, 2556static void handle_stripe_dirtying(raid5_conf_t *conf,
2544 struct stripe_head *sh, struct stripe_head_state *s, int disks) 2557 struct stripe_head *sh,
2558 struct stripe_head_state *s,
2559 int disks)
2545{ 2560{
2546 int rmw = 0, rcw = 0, i; 2561 int rmw = 0, rcw = 0, i;
2547 for (i = disks; i--; ) { 2562 if (conf->max_degraded == 2) {
2563 /* RAID6 requires 'rcw' in current implementation
2564 * Calculate the real rcw later - for now fake it
2565 * look like rcw is cheaper
2566 */
2567 rcw = 1; rmw = 2;
2568 } else for (i = disks; i--; ) {
2548 /* would I have to read this buffer for read_modify_write */ 2569 /* would I have to read this buffer for read_modify_write */
2549 struct r5dev *dev = &sh->dev[i]; 2570 struct r5dev *dev = &sh->dev[i];
2550 if ((dev->towrite || i == sh->pd_idx) && 2571 if ((dev->towrite || i == sh->pd_idx) &&
@@ -2591,16 +2612,19 @@ static void handle_stripe_dirtying5(raid5_conf_t *conf,
2591 } 2612 }
2592 } 2613 }
2593 } 2614 }
2594 if (rcw <= rmw && rcw > 0) 2615 if (rcw <= rmw && rcw > 0) {
2595 /* want reconstruct write, but need to get some data */ 2616 /* want reconstruct write, but need to get some data */
2617 rcw = 0;
2596 for (i = disks; i--; ) { 2618 for (i = disks; i--; ) {
2597 struct r5dev *dev = &sh->dev[i]; 2619 struct r5dev *dev = &sh->dev[i];
2598 if (!test_bit(R5_OVERWRITE, &dev->flags) && 2620 if (!test_bit(R5_OVERWRITE, &dev->flags) &&
2599 i != sh->pd_idx && 2621 i != sh->pd_idx && i != sh->qd_idx &&
2600 !test_bit(R5_LOCKED, &dev->flags) && 2622 !test_bit(R5_LOCKED, &dev->flags) &&
2601 !(test_bit(R5_UPTODATE, &dev->flags) || 2623 !(test_bit(R5_UPTODATE, &dev->flags) ||
2602 test_bit(R5_Wantcompute, &dev->flags)) && 2624 test_bit(R5_Wantcompute, &dev->flags))) {
2603 test_bit(R5_Insync, &dev->flags)) { 2625 rcw++;
2626 if (!test_bit(R5_Insync, &dev->flags))
2627 continue; /* it's a failed drive */
2604 if ( 2628 if (
2605 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2629 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2606 pr_debug("Read_old block " 2630 pr_debug("Read_old block "
@@ -2614,6 +2638,7 @@ static void handle_stripe_dirtying5(raid5_conf_t *conf,
2614 } 2638 }
2615 } 2639 }
2616 } 2640 }
2641 }
2617 /* now if nothing is locked, and if we have enough data, 2642 /* now if nothing is locked, and if we have enough data,
2618 * we can start a write request 2643 * we can start a write request
2619 */ 2644 */
@@ -2630,53 +2655,6 @@ static void handle_stripe_dirtying5(raid5_conf_t *conf,
2630 schedule_reconstruction(sh, s, rcw == 0, 0); 2655 schedule_reconstruction(sh, s, rcw == 0, 0);
2631} 2656}
2632 2657
2633static void handle_stripe_dirtying6(raid5_conf_t *conf,
2634 struct stripe_head *sh, struct stripe_head_state *s,
2635 struct r6_state *r6s, int disks)
2636{
2637 int rcw = 0, pd_idx = sh->pd_idx, i;
2638 int qd_idx = sh->qd_idx;
2639
2640 set_bit(STRIPE_HANDLE, &sh->state);
2641 for (i = disks; i--; ) {
2642 struct r5dev *dev = &sh->dev[i];
2643 /* check if we haven't enough data */
2644 if (!test_bit(R5_OVERWRITE, &dev->flags) &&
2645 i != pd_idx && i != qd_idx &&
2646 !test_bit(R5_LOCKED, &dev->flags) &&
2647 !(test_bit(R5_UPTODATE, &dev->flags) ||
2648 test_bit(R5_Wantcompute, &dev->flags))) {
2649 rcw++;
2650 if (!test_bit(R5_Insync, &dev->flags))
2651 continue; /* it's a failed drive */
2652
2653 if (
2654 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2655 pr_debug("Read_old stripe %llu "
2656 "block %d for Reconstruct\n",
2657 (unsigned long long)sh->sector, i);
2658 set_bit(R5_LOCKED, &dev->flags);
2659 set_bit(R5_Wantread, &dev->flags);
2660 s->locked++;
2661 } else {
2662 pr_debug("Request delayed stripe %llu "
2663 "block %d for Reconstruct\n",
2664 (unsigned long long)sh->sector, i);
2665 set_bit(STRIPE_DELAYED, &sh->state);
2666 set_bit(STRIPE_HANDLE, &sh->state);
2667 }
2668 }
2669 }
2670 /* now if nothing is locked, and if we have enough data, we can start a
2671 * write request
2672 */
2673 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
2674 s->locked == 0 && rcw == 0 &&
2675 !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
2676 schedule_reconstruction(sh, s, 1, 0);
2677 }
2678}
2679
2680static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, 2658static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
2681 struct stripe_head_state *s, int disks) 2659 struct stripe_head_state *s, int disks)
2682{ 2660{
@@ -2695,7 +2673,7 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
2695 s->uptodate--; 2673 s->uptodate--;
2696 break; 2674 break;
2697 } 2675 }
2698 dev = &sh->dev[s->failed_num]; 2676 dev = &sh->dev[s->failed_num[0]];
2699 /* fall through */ 2677 /* fall through */
2700 case check_state_compute_result: 2678 case check_state_compute_result:
2701 sh->check_state = check_state_idle; 2679 sh->check_state = check_state_idle;
@@ -2767,7 +2745,7 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
2767 2745
2768static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, 2746static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
2769 struct stripe_head_state *s, 2747 struct stripe_head_state *s,
2770 struct r6_state *r6s, int disks) 2748 int disks)
2771{ 2749{
2772 int pd_idx = sh->pd_idx; 2750 int pd_idx = sh->pd_idx;
2773 int qd_idx = sh->qd_idx; 2751 int qd_idx = sh->qd_idx;
@@ -2786,14 +2764,14 @@ static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
2786 switch (sh->check_state) { 2764 switch (sh->check_state) {
2787 case check_state_idle: 2765 case check_state_idle:
2788 /* start a new check operation if there are < 2 failures */ 2766 /* start a new check operation if there are < 2 failures */
2789 if (s->failed == r6s->q_failed) { 2767 if (s->failed == s->q_failed) {
2790 /* The only possible failed device holds Q, so it 2768 /* The only possible failed device holds Q, so it
2791 * makes sense to check P (If anything else were failed, 2769 * makes sense to check P (If anything else were failed,
2792 * we would have used P to recreate it). 2770 * we would have used P to recreate it).
2793 */ 2771 */
2794 sh->check_state = check_state_run; 2772 sh->check_state = check_state_run;
2795 } 2773 }
2796 if (!r6s->q_failed && s->failed < 2) { 2774 if (!s->q_failed && s->failed < 2) {
2797 /* Q is not failed, and we didn't use it to generate 2775 /* Q is not failed, and we didn't use it to generate
2798 * anything, so it makes sense to check it 2776 * anything, so it makes sense to check it
2799 */ 2777 */
@@ -2835,13 +2813,13 @@ static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
2835 */ 2813 */
2836 BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */ 2814 BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
2837 if (s->failed == 2) { 2815 if (s->failed == 2) {
2838 dev = &sh->dev[r6s->failed_num[1]]; 2816 dev = &sh->dev[s->failed_num[1]];
2839 s->locked++; 2817 s->locked++;
2840 set_bit(R5_LOCKED, &dev->flags); 2818 set_bit(R5_LOCKED, &dev->flags);
2841 set_bit(R5_Wantwrite, &dev->flags); 2819 set_bit(R5_Wantwrite, &dev->flags);
2842 } 2820 }
2843 if (s->failed >= 1) { 2821 if (s->failed >= 1) {
2844 dev = &sh->dev[r6s->failed_num[0]]; 2822 dev = &sh->dev[s->failed_num[0]];
2845 s->locked++; 2823 s->locked++;
2846 set_bit(R5_LOCKED, &dev->flags); 2824 set_bit(R5_LOCKED, &dev->flags);
2847 set_bit(R5_Wantwrite, &dev->flags); 2825 set_bit(R5_Wantwrite, &dev->flags);
@@ -2928,8 +2906,7 @@ static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
2928 } 2906 }
2929} 2907}
2930 2908
2931static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, 2909static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh)
2932 struct r6_state *r6s)
2933{ 2910{
2934 int i; 2911 int i;
2935 2912
@@ -2971,7 +2948,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
2971 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); 2948 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
2972 for (j = 0; j < conf->raid_disks; j++) 2949 for (j = 0; j < conf->raid_disks; j++)
2973 if (j != sh2->pd_idx && 2950 if (j != sh2->pd_idx &&
2974 (!r6s || j != sh2->qd_idx) && 2951 j != sh2->qd_idx &&
2975 !test_bit(R5_Expanded, &sh2->dev[j].flags)) 2952 !test_bit(R5_Expanded, &sh2->dev[j].flags))
2976 break; 2953 break;
2977 if (j == conf->raid_disks) { 2954 if (j == conf->raid_disks) {
@@ -3006,43 +2983,35 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
3006 * 2983 *
3007 */ 2984 */
3008 2985
3009static void handle_stripe5(struct stripe_head *sh) 2986static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
3010{ 2987{
3011 raid5_conf_t *conf = sh->raid_conf; 2988 raid5_conf_t *conf = sh->raid_conf;
3012 int disks = sh->disks, i; 2989 int disks = sh->disks;
3013 struct bio *return_bi = NULL;
3014 struct stripe_head_state s;
3015 struct r5dev *dev; 2990 struct r5dev *dev;
3016 mdk_rdev_t *blocked_rdev = NULL; 2991 int i;
3017 int prexor;
3018 int dec_preread_active = 0;
3019 2992
3020 memset(&s, 0, sizeof(s)); 2993 memset(s, 0, sizeof(*s));
3021 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
3022 "reconstruct:%d\n", (unsigned long long)sh->sector, sh->state,
3023 atomic_read(&sh->count), sh->pd_idx, sh->check_state,
3024 sh->reconstruct_state);
3025 2994
3026 spin_lock(&sh->lock); 2995 s->syncing = test_bit(STRIPE_SYNCING, &sh->state);
3027 clear_bit(STRIPE_HANDLE, &sh->state); 2996 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
3028 clear_bit(STRIPE_DELAYED, &sh->state); 2997 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
3029 2998 s->failed_num[0] = -1;
3030 s.syncing = test_bit(STRIPE_SYNCING, &sh->state); 2999 s->failed_num[1] = -1;
3031 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
3032 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
3033 3000
3034 /* Now to look around and see what can be done */ 3001 /* Now to look around and see what can be done */
3035 rcu_read_lock(); 3002 rcu_read_lock();
3003 spin_lock_irq(&conf->device_lock);
3036 for (i=disks; i--; ) { 3004 for (i=disks; i--; ) {
3037 mdk_rdev_t *rdev; 3005 mdk_rdev_t *rdev;
3006 sector_t first_bad;
3007 int bad_sectors;
3008 int is_bad = 0;
3038 3009
3039 dev = &sh->dev[i]; 3010 dev = &sh->dev[i];
3040 3011
3041 pr_debug("check %d: state 0x%lx toread %p read %p write %p " 3012 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
3042 "written %p\n", i, dev->flags, dev->toread, dev->read, 3013 i, dev->flags, dev->toread, dev->towrite, dev->written);
3043 dev->towrite, dev->written); 3014 /* maybe we can reply to a read
3044
3045 /* maybe we can request a biofill operation
3046 * 3015 *
3047 * new wantfill requests are only permitted while 3016 * new wantfill requests are only permitted while
3048 * ops_complete_biofill is guaranteed to be inactive 3017 * ops_complete_biofill is guaranteed to be inactive
@@ -3052,37 +3021,74 @@ static void handle_stripe5(struct stripe_head *sh)
3052 set_bit(R5_Wantfill, &dev->flags); 3021 set_bit(R5_Wantfill, &dev->flags);
3053 3022
3054 /* now count some things */ 3023 /* now count some things */
3055 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; 3024 if (test_bit(R5_LOCKED, &dev->flags))
3056 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; 3025 s->locked++;
3057 if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++; 3026 if (test_bit(R5_UPTODATE, &dev->flags))
3027 s->uptodate++;
3028 if (test_bit(R5_Wantcompute, &dev->flags)) {
3029 s->compute++;
3030 BUG_ON(s->compute > 2);
3031 }
3058 3032
3059 if (test_bit(R5_Wantfill, &dev->flags)) 3033 if (test_bit(R5_Wantfill, &dev->flags))
3060 s.to_fill++; 3034 s->to_fill++;
3061 else if (dev->toread) 3035 else if (dev->toread)
3062 s.to_read++; 3036 s->to_read++;
3063 if (dev->towrite) { 3037 if (dev->towrite) {
3064 s.to_write++; 3038 s->to_write++;
3065 if (!test_bit(R5_OVERWRITE, &dev->flags)) 3039 if (!test_bit(R5_OVERWRITE, &dev->flags))
3066 s.non_overwrite++; 3040 s->non_overwrite++;
3067 } 3041 }
3068 if (dev->written) 3042 if (dev->written)
3069 s.written++; 3043 s->written++;
3070 rdev = rcu_dereference(conf->disks[i].rdev); 3044 rdev = rcu_dereference(conf->disks[i].rdev);
3071 if (blocked_rdev == NULL && 3045 if (rdev) {
3072 rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 3046 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
3073 blocked_rdev = rdev; 3047 &first_bad, &bad_sectors);
3074 atomic_inc(&rdev->nr_pending); 3048 if (s->blocked_rdev == NULL
3049 && (test_bit(Blocked, &rdev->flags)
3050 || is_bad < 0)) {
3051 if (is_bad < 0)
3052 set_bit(BlockedBadBlocks,
3053 &rdev->flags);
3054 s->blocked_rdev = rdev;
3055 atomic_inc(&rdev->nr_pending);
3056 }
3075 } 3057 }
3076 clear_bit(R5_Insync, &dev->flags); 3058 clear_bit(R5_Insync, &dev->flags);
3077 if (!rdev) 3059 if (!rdev)
3078 /* Not in-sync */; 3060 /* Not in-sync */;
3079 else if (test_bit(In_sync, &rdev->flags)) 3061 else if (is_bad) {
3062 /* also not in-sync */
3063 if (!test_bit(WriteErrorSeen, &rdev->flags)) {
3064 /* treat as in-sync, but with a read error
3065 * which we can now try to correct
3066 */
3067 set_bit(R5_Insync, &dev->flags);
3068 set_bit(R5_ReadError, &dev->flags);
3069 }
3070 } else if (test_bit(In_sync, &rdev->flags))
3080 set_bit(R5_Insync, &dev->flags); 3071 set_bit(R5_Insync, &dev->flags);
3081 else { 3072 else {
3082 /* could be in-sync depending on recovery/reshape status */ 3073 /* in sync if before recovery_offset */
3083 if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) 3074 if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
3084 set_bit(R5_Insync, &dev->flags); 3075 set_bit(R5_Insync, &dev->flags);
3085 } 3076 }
3077 if (test_bit(R5_WriteError, &dev->flags)) {
3078 clear_bit(R5_Insync, &dev->flags);
3079 if (!test_bit(Faulty, &rdev->flags)) {
3080 s->handle_bad_blocks = 1;
3081 atomic_inc(&rdev->nr_pending);
3082 } else
3083 clear_bit(R5_WriteError, &dev->flags);
3084 }
3085 if (test_bit(R5_MadeGood, &dev->flags)) {
3086 if (!test_bit(Faulty, &rdev->flags)) {
3087 s->handle_bad_blocks = 1;
3088 atomic_inc(&rdev->nr_pending);
3089 } else
3090 clear_bit(R5_MadeGood, &dev->flags);
3091 }
3086 if (!test_bit(R5_Insync, &dev->flags)) { 3092 if (!test_bit(R5_Insync, &dev->flags)) {
3087 /* The ReadError flag will just be confusing now */ 3093 /* The ReadError flag will just be confusing now */
3088 clear_bit(R5_ReadError, &dev->flags); 3094 clear_bit(R5_ReadError, &dev->flags);
@@ -3091,313 +3097,60 @@ static void handle_stripe5(struct stripe_head *sh)
3091 if (test_bit(R5_ReadError, &dev->flags)) 3097 if (test_bit(R5_ReadError, &dev->flags))
3092 clear_bit(R5_Insync, &dev->flags); 3098 clear_bit(R5_Insync, &dev->flags);
3093 if (!test_bit(R5_Insync, &dev->flags)) { 3099 if (!test_bit(R5_Insync, &dev->flags)) {
3094 s.failed++; 3100 if (s->failed < 2)
3095 s.failed_num = i; 3101 s->failed_num[s->failed] = i;
3102 s->failed++;
3096 } 3103 }
3097 } 3104 }
3105 spin_unlock_irq(&conf->device_lock);
3098 rcu_read_unlock(); 3106 rcu_read_unlock();
3099
3100 if (unlikely(blocked_rdev)) {
3101 if (s.syncing || s.expanding || s.expanded ||
3102 s.to_write || s.written) {
3103 set_bit(STRIPE_HANDLE, &sh->state);
3104 goto unlock;
3105 }
3106 /* There is nothing for the blocked_rdev to block */
3107 rdev_dec_pending(blocked_rdev, conf->mddev);
3108 blocked_rdev = NULL;
3109 }
3110
3111 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
3112 set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
3113 set_bit(STRIPE_BIOFILL_RUN, &sh->state);
3114 }
3115
3116 pr_debug("locked=%d uptodate=%d to_read=%d"
3117 " to_write=%d failed=%d failed_num=%d\n",
3118 s.locked, s.uptodate, s.to_read, s.to_write,
3119 s.failed, s.failed_num);
3120 /* check if the array has lost two devices and, if so, some requests might
3121 * need to be failed
3122 */
3123 if (s.failed > 1 && s.to_read+s.to_write+s.written)
3124 handle_failed_stripe(conf, sh, &s, disks, &return_bi);
3125 if (s.failed > 1 && s.syncing) {
3126 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
3127 clear_bit(STRIPE_SYNCING, &sh->state);
3128 s.syncing = 0;
3129 }
3130
3131 /* might be able to return some write requests if the parity block
3132 * is safe, or on a failed drive
3133 */
3134 dev = &sh->dev[sh->pd_idx];
3135 if ( s.written &&
3136 ((test_bit(R5_Insync, &dev->flags) &&
3137 !test_bit(R5_LOCKED, &dev->flags) &&
3138 test_bit(R5_UPTODATE, &dev->flags)) ||
3139 (s.failed == 1 && s.failed_num == sh->pd_idx)))
3140 handle_stripe_clean_event(conf, sh, disks, &return_bi);
3141
3142 /* Now we might consider reading some blocks, either to check/generate
3143 * parity, or to satisfy requests
3144 * or to load a block that is being partially written.
3145 */
3146 if (s.to_read || s.non_overwrite ||
3147 (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
3148 handle_stripe_fill5(sh, &s, disks);
3149
3150 /* Now we check to see if any write operations have recently
3151 * completed
3152 */
3153 prexor = 0;
3154 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
3155 prexor = 1;
3156 if (sh->reconstruct_state == reconstruct_state_drain_result ||
3157 sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
3158 sh->reconstruct_state = reconstruct_state_idle;
3159
3160 /* All the 'written' buffers and the parity block are ready to
3161 * be written back to disk
3162 */
3163 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
3164 for (i = disks; i--; ) {
3165 dev = &sh->dev[i];
3166 if (test_bit(R5_LOCKED, &dev->flags) &&
3167 (i == sh->pd_idx || dev->written)) {
3168 pr_debug("Writing block %d\n", i);
3169 set_bit(R5_Wantwrite, &dev->flags);
3170 if (prexor)
3171 continue;
3172 if (!test_bit(R5_Insync, &dev->flags) ||
3173 (i == sh->pd_idx && s.failed == 0))
3174 set_bit(STRIPE_INSYNC, &sh->state);
3175 }
3176 }
3177 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3178 dec_preread_active = 1;
3179 }
3180
3181 /* Now to consider new write requests and what else, if anything
3182 * should be read. We do not handle new writes when:
3183 * 1/ A 'write' operation (copy+xor) is already in flight.
3184 * 2/ A 'check' operation is in flight, as it may clobber the parity
3185 * block.
3186 */
3187 if (s.to_write && !sh->reconstruct_state && !sh->check_state)
3188 handle_stripe_dirtying5(conf, sh, &s, disks);
3189
3190 /* maybe we need to check and possibly fix the parity for this stripe
3191 * Any reads will already have been scheduled, so we just see if enough
3192 * data is available. The parity check is held off while parity
3193 * dependent operations are in flight.
3194 */
3195 if (sh->check_state ||
3196 (s.syncing && s.locked == 0 &&
3197 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
3198 !test_bit(STRIPE_INSYNC, &sh->state)))
3199 handle_parity_checks5(conf, sh, &s, disks);
3200
3201 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
3202 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
3203 clear_bit(STRIPE_SYNCING, &sh->state);
3204 }
3205
3206 /* If the failed drive is just a ReadError, then we might need to progress
3207 * the repair/check process
3208 */
3209 if (s.failed == 1 && !conf->mddev->ro &&
3210 test_bit(R5_ReadError, &sh->dev[s.failed_num].flags)
3211 && !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags)
3212 && test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags)
3213 ) {
3214 dev = &sh->dev[s.failed_num];
3215 if (!test_bit(R5_ReWrite, &dev->flags)) {
3216 set_bit(R5_Wantwrite, &dev->flags);
3217 set_bit(R5_ReWrite, &dev->flags);
3218 set_bit(R5_LOCKED, &dev->flags);
3219 s.locked++;
3220 } else {
3221 /* let's read it back */
3222 set_bit(R5_Wantread, &dev->flags);
3223 set_bit(R5_LOCKED, &dev->flags);
3224 s.locked++;
3225 }
3226 }
3227
3228 /* Finish reconstruct operations initiated by the expansion process */
3229 if (sh->reconstruct_state == reconstruct_state_result) {
3230 struct stripe_head *sh2
3231 = get_active_stripe(conf, sh->sector, 1, 1, 1);
3232 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
3233 /* sh cannot be written until sh2 has been read.
3234 * so arrange for sh to be delayed a little
3235 */
3236 set_bit(STRIPE_DELAYED, &sh->state);
3237 set_bit(STRIPE_HANDLE, &sh->state);
3238 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
3239 &sh2->state))
3240 atomic_inc(&conf->preread_active_stripes);
3241 release_stripe(sh2);
3242 goto unlock;
3243 }
3244 if (sh2)
3245 release_stripe(sh2);
3246
3247 sh->reconstruct_state = reconstruct_state_idle;
3248 clear_bit(STRIPE_EXPANDING, &sh->state);
3249 for (i = conf->raid_disks; i--; ) {
3250 set_bit(R5_Wantwrite, &sh->dev[i].flags);
3251 set_bit(R5_LOCKED, &sh->dev[i].flags);
3252 s.locked++;
3253 }
3254 }
3255
3256 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
3257 !sh->reconstruct_state) {
3258 /* Need to write out all blocks after computing parity */
3259 sh->disks = conf->raid_disks;
3260 stripe_set_idx(sh->sector, conf, 0, sh);
3261 schedule_reconstruction(sh, &s, 1, 1);
3262 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
3263 clear_bit(STRIPE_EXPAND_READY, &sh->state);
3264 atomic_dec(&conf->reshape_stripes);
3265 wake_up(&conf->wait_for_overlap);
3266 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3267 }
3268
3269 if (s.expanding && s.locked == 0 &&
3270 !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
3271 handle_stripe_expansion(conf, sh, NULL);
3272
3273 unlock:
3274 spin_unlock(&sh->lock);
3275
3276 /* wait for this device to become unblocked */
3277 if (unlikely(blocked_rdev))
3278 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
3279
3280 if (s.ops_request)
3281 raid_run_ops(sh, s.ops_request);
3282
3283 ops_run_io(sh, &s);
3284
3285 if (dec_preread_active) {
3286 /* We delay this until after ops_run_io so that if make_request
3287 * is waiting on a flush, it won't continue until the writes
3288 * have actually been submitted.
3289 */
3290 atomic_dec(&conf->preread_active_stripes);
3291 if (atomic_read(&conf->preread_active_stripes) <
3292 IO_THRESHOLD)
3293 md_wakeup_thread(conf->mddev->thread);
3294 }
3295 return_io(return_bi);
3296} 3107}
3297 3108
3298static void handle_stripe6(struct stripe_head *sh) 3109static void handle_stripe(struct stripe_head *sh)
3299{ 3110{
3111 struct stripe_head_state s;
3300 raid5_conf_t *conf = sh->raid_conf; 3112 raid5_conf_t *conf = sh->raid_conf;
3113 int i;
3114 int prexor;
3301 int disks = sh->disks; 3115 int disks = sh->disks;
3302 struct bio *return_bi = NULL; 3116 struct r5dev *pdev, *qdev;
3303 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx; 3117
3304 struct stripe_head_state s; 3118 clear_bit(STRIPE_HANDLE, &sh->state);
3305 struct r6_state r6s; 3119 if (test_and_set_bit(STRIPE_ACTIVE, &sh->state)) {
3306 struct r5dev *dev, *pdev, *qdev; 3120 /* already being handled, ensure it gets handled
3307 mdk_rdev_t *blocked_rdev = NULL; 3121 * again when current action finishes */
3308 int dec_preread_active = 0; 3122 set_bit(STRIPE_HANDLE, &sh->state);
3123 return;
3124 }
3125
3126 if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
3127 set_bit(STRIPE_SYNCING, &sh->state);
3128 clear_bit(STRIPE_INSYNC, &sh->state);
3129 }
3130 clear_bit(STRIPE_DELAYED, &sh->state);
3309 3131
3310 pr_debug("handling stripe %llu, state=%#lx cnt=%d, " 3132 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
3311 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n", 3133 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
3312 (unsigned long long)sh->sector, sh->state, 3134 (unsigned long long)sh->sector, sh->state,
3313 atomic_read(&sh->count), pd_idx, qd_idx, 3135 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
3314 sh->check_state, sh->reconstruct_state); 3136 sh->check_state, sh->reconstruct_state);
3315 memset(&s, 0, sizeof(s));
3316
3317 spin_lock(&sh->lock);
3318 clear_bit(STRIPE_HANDLE, &sh->state);
3319 clear_bit(STRIPE_DELAYED, &sh->state);
3320
3321 s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
3322 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
3323 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
3324 /* Now to look around and see what can be done */
3325
3326 rcu_read_lock();
3327 for (i=disks; i--; ) {
3328 mdk_rdev_t *rdev;
3329 dev = &sh->dev[i];
3330 3137
3331 pr_debug("check %d: state 0x%lx read %p write %p written %p\n", 3138 analyse_stripe(sh, &s);
3332 i, dev->flags, dev->toread, dev->towrite, dev->written);
3333 /* maybe we can reply to a read
3334 *
3335 * new wantfill requests are only permitted while
3336 * ops_complete_biofill is guaranteed to be inactive
3337 */
3338 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
3339 !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
3340 set_bit(R5_Wantfill, &dev->flags);
3341 3139
3342 /* now count some things */ 3140 if (s.handle_bad_blocks) {
3343 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; 3141 set_bit(STRIPE_HANDLE, &sh->state);
3344 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; 3142 goto finish;
3345 if (test_bit(R5_Wantcompute, &dev->flags)) {
3346 s.compute++;
3347 BUG_ON(s.compute > 2);
3348 }
3349
3350 if (test_bit(R5_Wantfill, &dev->flags)) {
3351 s.to_fill++;
3352 } else if (dev->toread)
3353 s.to_read++;
3354 if (dev->towrite) {
3355 s.to_write++;
3356 if (!test_bit(R5_OVERWRITE, &dev->flags))
3357 s.non_overwrite++;
3358 }
3359 if (dev->written)
3360 s.written++;
3361 rdev = rcu_dereference(conf->disks[i].rdev);
3362 if (blocked_rdev == NULL &&
3363 rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
3364 blocked_rdev = rdev;
3365 atomic_inc(&rdev->nr_pending);
3366 }
3367 clear_bit(R5_Insync, &dev->flags);
3368 if (!rdev)
3369 /* Not in-sync */;
3370 else if (test_bit(In_sync, &rdev->flags))
3371 set_bit(R5_Insync, &dev->flags);
3372 else {
3373 /* in sync if before recovery_offset */
3374 if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
3375 set_bit(R5_Insync, &dev->flags);
3376 }
3377 if (!test_bit(R5_Insync, &dev->flags)) {
3378 /* The ReadError flag will just be confusing now */
3379 clear_bit(R5_ReadError, &dev->flags);
3380 clear_bit(R5_ReWrite, &dev->flags);
3381 }
3382 if (test_bit(R5_ReadError, &dev->flags))
3383 clear_bit(R5_Insync, &dev->flags);
3384 if (!test_bit(R5_Insync, &dev->flags)) {
3385 if (s.failed < 2)
3386 r6s.failed_num[s.failed] = i;
3387 s.failed++;
3388 }
3389 } 3143 }
3390 rcu_read_unlock();
3391 3144
3392 if (unlikely(blocked_rdev)) { 3145 if (unlikely(s.blocked_rdev)) {
3393 if (s.syncing || s.expanding || s.expanded || 3146 if (s.syncing || s.expanding || s.expanded ||
3394 s.to_write || s.written) { 3147 s.to_write || s.written) {
3395 set_bit(STRIPE_HANDLE, &sh->state); 3148 set_bit(STRIPE_HANDLE, &sh->state);
3396 goto unlock; 3149 goto finish;
3397 } 3150 }
3398 /* There is nothing for the blocked_rdev to block */ 3151 /* There is nothing for the blocked_rdev to block */
3399 rdev_dec_pending(blocked_rdev, conf->mddev); 3152 rdev_dec_pending(s.blocked_rdev, conf->mddev);
3400 blocked_rdev = NULL; 3153 s.blocked_rdev = NULL;
3401 } 3154 }
3402 3155
3403 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { 3156 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
@@ -3408,83 +3161,88 @@ static void handle_stripe6(struct stripe_head *sh)
3408 pr_debug("locked=%d uptodate=%d to_read=%d" 3161 pr_debug("locked=%d uptodate=%d to_read=%d"
3409 " to_write=%d failed=%d failed_num=%d,%d\n", 3162 " to_write=%d failed=%d failed_num=%d,%d\n",
3410 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 3163 s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
3411 r6s.failed_num[0], r6s.failed_num[1]); 3164 s.failed_num[0], s.failed_num[1]);
3412 /* check if the array has lost >2 devices and, if so, some requests 3165 /* check if the array has lost more than max_degraded devices and,
3413 * might need to be failed 3166 * if so, some requests might need to be failed.
3414 */ 3167 */
3415 if (s.failed > 2 && s.to_read+s.to_write+s.written) 3168 if (s.failed > conf->max_degraded && s.to_read+s.to_write+s.written)
3416 handle_failed_stripe(conf, sh, &s, disks, &return_bi); 3169 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
3417 if (s.failed > 2 && s.syncing) { 3170 if (s.failed > conf->max_degraded && s.syncing)
3418 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 3171 handle_failed_sync(conf, sh, &s);
3419 clear_bit(STRIPE_SYNCING, &sh->state);
3420 s.syncing = 0;
3421 }
3422 3172
3423 /* 3173 /*
3424 * might be able to return some write requests if the parity blocks 3174 * might be able to return some write requests if the parity blocks
3425 * are safe, or on a failed drive 3175 * are safe, or on a failed drive
3426 */ 3176 */
3427 pdev = &sh->dev[pd_idx]; 3177 pdev = &sh->dev[sh->pd_idx];
3428 r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx) 3178 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
3429 || (s.failed >= 2 && r6s.failed_num[1] == pd_idx); 3179 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
3430 qdev = &sh->dev[qd_idx]; 3180 qdev = &sh->dev[sh->qd_idx];
3431 r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == qd_idx) 3181 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
3432 || (s.failed >= 2 && r6s.failed_num[1] == qd_idx); 3182 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
3433 3183 || conf->level < 6;
3434 if ( s.written && 3184
3435 ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags) 3185 if (s.written &&
3186 (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
3436 && !test_bit(R5_LOCKED, &pdev->flags) 3187 && !test_bit(R5_LOCKED, &pdev->flags)
3437 && test_bit(R5_UPTODATE, &pdev->flags)))) && 3188 && test_bit(R5_UPTODATE, &pdev->flags)))) &&
3438 ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags) 3189 (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
3439 && !test_bit(R5_LOCKED, &qdev->flags) 3190 && !test_bit(R5_LOCKED, &qdev->flags)
3440 && test_bit(R5_UPTODATE, &qdev->flags))))) 3191 && test_bit(R5_UPTODATE, &qdev->flags)))))
3441 handle_stripe_clean_event(conf, sh, disks, &return_bi); 3192 handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
3442 3193
3443 /* Now we might consider reading some blocks, either to check/generate 3194 /* Now we might consider reading some blocks, either to check/generate
3444 * parity, or to satisfy requests 3195 * parity, or to satisfy requests
3445 * or to load a block that is being partially written. 3196 * or to load a block that is being partially written.
3446 */ 3197 */
3447 if (s.to_read || s.non_overwrite || (s.to_write && s.failed) || 3198 if (s.to_read || s.non_overwrite
3448 (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding) 3199 || (conf->level == 6 && s.to_write && s.failed)
3449 handle_stripe_fill6(sh, &s, &r6s, disks); 3200 || (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
3201 handle_stripe_fill(sh, &s, disks);
3450 3202
3451 /* Now we check to see if any write operations have recently 3203 /* Now we check to see if any write operations have recently
3452 * completed 3204 * completed
3453 */ 3205 */
3454 if (sh->reconstruct_state == reconstruct_state_drain_result) { 3206 prexor = 0;
3455 3207 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
3208 prexor = 1;
3209 if (sh->reconstruct_state == reconstruct_state_drain_result ||
3210 sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
3456 sh->reconstruct_state = reconstruct_state_idle; 3211 sh->reconstruct_state = reconstruct_state_idle;
3457 /* All the 'written' buffers and the parity blocks are ready to 3212
3213 /* All the 'written' buffers and the parity block are ready to
3458 * be written back to disk 3214 * be written back to disk
3459 */ 3215 */
3460 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); 3216 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
3461 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags)); 3217 BUG_ON(sh->qd_idx >= 0 &&
3218 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags));
3462 for (i = disks; i--; ) { 3219 for (i = disks; i--; ) {
3463 dev = &sh->dev[i]; 3220 struct r5dev *dev = &sh->dev[i];
3464 if (test_bit(R5_LOCKED, &dev->flags) && 3221 if (test_bit(R5_LOCKED, &dev->flags) &&
3465 (i == sh->pd_idx || i == qd_idx || 3222 (i == sh->pd_idx || i == sh->qd_idx ||
3466 dev->written)) { 3223 dev->written)) {
3467 pr_debug("Writing block %d\n", i); 3224 pr_debug("Writing block %d\n", i);
3468 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
3469 set_bit(R5_Wantwrite, &dev->flags); 3225 set_bit(R5_Wantwrite, &dev->flags);
3226 if (prexor)
3227 continue;
3470 if (!test_bit(R5_Insync, &dev->flags) || 3228 if (!test_bit(R5_Insync, &dev->flags) ||
3471 ((i == sh->pd_idx || i == qd_idx) && 3229 ((i == sh->pd_idx || i == sh->qd_idx) &&
3472 s.failed == 0)) 3230 s.failed == 0))
3473 set_bit(STRIPE_INSYNC, &sh->state); 3231 set_bit(STRIPE_INSYNC, &sh->state);
3474 } 3232 }
3475 } 3233 }
3476 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3234 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3477 dec_preread_active = 1; 3235 s.dec_preread_active = 1;
3478 } 3236 }
3479 3237
3480 /* Now to consider new write requests and what else, if anything 3238 /* Now to consider new write requests and what else, if anything
3481 * should be read. We do not handle new writes when: 3239 * should be read. We do not handle new writes when:
3482 * 1/ A 'write' operation (copy+gen_syndrome) is already in flight. 3240 * 1/ A 'write' operation (copy+xor) is already in flight.
3483 * 2/ A 'check' operation is in flight, as it may clobber the parity 3241 * 2/ A 'check' operation is in flight, as it may clobber the parity
3484 * block. 3242 * block.
3485 */ 3243 */
3486 if (s.to_write && !sh->reconstruct_state && !sh->check_state) 3244 if (s.to_write && !sh->reconstruct_state && !sh->check_state)
3487 handle_stripe_dirtying6(conf, sh, &s, &r6s, disks); 3245 handle_stripe_dirtying(conf, sh, &s, disks);
3488 3246
3489 /* maybe we need to check and possibly fix the parity for this stripe 3247 /* maybe we need to check and possibly fix the parity for this stripe
3490 * Any reads will already have been scheduled, so we just see if enough 3248 * Any reads will already have been scheduled, so we just see if enough
@@ -3494,20 +3252,24 @@ static void handle_stripe6(struct stripe_head *sh)
3494 if (sh->check_state || 3252 if (sh->check_state ||
3495 (s.syncing && s.locked == 0 && 3253 (s.syncing && s.locked == 0 &&
3496 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 3254 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
3497 !test_bit(STRIPE_INSYNC, &sh->state))) 3255 !test_bit(STRIPE_INSYNC, &sh->state))) {
3498 handle_parity_checks6(conf, sh, &s, &r6s, disks); 3256 if (conf->level == 6)
3257 handle_parity_checks6(conf, sh, &s, disks);
3258 else
3259 handle_parity_checks5(conf, sh, &s, disks);
3260 }
3499 3261
3500 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 3262 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
3501 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 3263 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3502 clear_bit(STRIPE_SYNCING, &sh->state); 3264 clear_bit(STRIPE_SYNCING, &sh->state);
3503 } 3265 }
3504 3266
3505 /* If the failed drives are just a ReadError, then we might need 3267 /* If the failed drives are just a ReadError, then we might need
3506 * to progress the repair/check process 3268 * to progress the repair/check process
3507 */ 3269 */
3508 if (s.failed <= 2 && !conf->mddev->ro) 3270 if (s.failed <= conf->max_degraded && !conf->mddev->ro)
3509 for (i = 0; i < s.failed; i++) { 3271 for (i = 0; i < s.failed; i++) {
3510 dev = &sh->dev[r6s.failed_num[i]]; 3272 struct r5dev *dev = &sh->dev[s.failed_num[i]];
3511 if (test_bit(R5_ReadError, &dev->flags) 3273 if (test_bit(R5_ReadError, &dev->flags)
3512 && !test_bit(R5_LOCKED, &dev->flags) 3274 && !test_bit(R5_LOCKED, &dev->flags)
3513 && test_bit(R5_UPTODATE, &dev->flags) 3275 && test_bit(R5_UPTODATE, &dev->flags)
@@ -3526,8 +3288,26 @@ static void handle_stripe6(struct stripe_head *sh)
3526 } 3288 }
3527 } 3289 }
3528 3290
3291
3529 /* Finish reconstruct operations initiated by the expansion process */ 3292 /* Finish reconstruct operations initiated by the expansion process */
3530 if (sh->reconstruct_state == reconstruct_state_result) { 3293 if (sh->reconstruct_state == reconstruct_state_result) {
3294 struct stripe_head *sh_src
3295 = get_active_stripe(conf, sh->sector, 1, 1, 1);
3296 if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
3297 /* sh cannot be written until sh_src has been read.
3298 * so arrange for sh to be delayed a little
3299 */
3300 set_bit(STRIPE_DELAYED, &sh->state);
3301 set_bit(STRIPE_HANDLE, &sh->state);
3302 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
3303 &sh_src->state))
3304 atomic_inc(&conf->preread_active_stripes);
3305 release_stripe(sh_src);
3306 goto finish;
3307 }
3308 if (sh_src)
3309 release_stripe(sh_src);
3310
3531 sh->reconstruct_state = reconstruct_state_idle; 3311 sh->reconstruct_state = reconstruct_state_idle;
3532 clear_bit(STRIPE_EXPANDING, &sh->state); 3312 clear_bit(STRIPE_EXPANDING, &sh->state);
3533 for (i = conf->raid_disks; i--; ) { 3313 for (i = conf->raid_disks; i--; ) {
@@ -3539,24 +3319,7 @@ static void handle_stripe6(struct stripe_head *sh)
3539 3319
3540 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 3320 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
3541 !sh->reconstruct_state) { 3321 !sh->reconstruct_state) {
3542 struct stripe_head *sh2 3322 /* Need to write out all blocks after computing parity */
3543 = get_active_stripe(conf, sh->sector, 1, 1, 1);
3544 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
3545 /* sh cannot be written until sh2 has been read.
3546 * so arrange for sh to be delayed a little
3547 */
3548 set_bit(STRIPE_DELAYED, &sh->state);
3549 set_bit(STRIPE_HANDLE, &sh->state);
3550 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
3551 &sh2->state))
3552 atomic_inc(&conf->preread_active_stripes);
3553 release_stripe(sh2);
3554 goto unlock;
3555 }
3556 if (sh2)
3557 release_stripe(sh2);
3558
3559 /* Need to write out all blocks after computing P&Q */
3560 sh->disks = conf->raid_disks; 3323 sh->disks = conf->raid_disks;
3561 stripe_set_idx(sh->sector, conf, 0, sh); 3324 stripe_set_idx(sh->sector, conf, 0, sh);
3562 schedule_reconstruction(sh, &s, 1, 1); 3325 schedule_reconstruction(sh, &s, 1, 1);
@@ -3569,22 +3332,39 @@ static void handle_stripe6(struct stripe_head *sh)
3569 3332
3570 if (s.expanding && s.locked == 0 && 3333 if (s.expanding && s.locked == 0 &&
3571 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) 3334 !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
3572 handle_stripe_expansion(conf, sh, &r6s); 3335 handle_stripe_expansion(conf, sh);
3573
3574 unlock:
3575 spin_unlock(&sh->lock);
3576 3336
3337finish:
3577 /* wait for this device to become unblocked */ 3338 /* wait for this device to become unblocked */
3578 if (unlikely(blocked_rdev)) 3339 if (unlikely(s.blocked_rdev))
3579 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); 3340 md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
3341
3342 if (s.handle_bad_blocks)
3343 for (i = disks; i--; ) {
3344 mdk_rdev_t *rdev;
3345 struct r5dev *dev = &sh->dev[i];
3346 if (test_and_clear_bit(R5_WriteError, &dev->flags)) {
3347 /* We own a safe reference to the rdev */
3348 rdev = conf->disks[i].rdev;
3349 if (!rdev_set_badblocks(rdev, sh->sector,
3350 STRIPE_SECTORS, 0))
3351 md_error(conf->mddev, rdev);
3352 rdev_dec_pending(rdev, conf->mddev);
3353 }
3354 if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
3355 rdev = conf->disks[i].rdev;
3356 rdev_clear_badblocks(rdev, sh->sector,
3357 STRIPE_SECTORS);
3358 rdev_dec_pending(rdev, conf->mddev);
3359 }
3360 }
3580 3361
3581 if (s.ops_request) 3362 if (s.ops_request)
3582 raid_run_ops(sh, s.ops_request); 3363 raid_run_ops(sh, s.ops_request);
3583 3364
3584 ops_run_io(sh, &s); 3365 ops_run_io(sh, &s);
3585 3366
3586 3367 if (s.dec_preread_active) {
3587 if (dec_preread_active) {
3588 /* We delay this until after ops_run_io so that if make_request 3368 /* We delay this until after ops_run_io so that if make_request
3589 * is waiting on a flush, it won't continue until the writes 3369 * is waiting on a flush, it won't continue until the writes
3590 * have actually been submitted. 3370 * have actually been submitted.
@@ -3595,15 +3375,9 @@ static void handle_stripe6(struct stripe_head *sh)
3595 md_wakeup_thread(conf->mddev->thread); 3375 md_wakeup_thread(conf->mddev->thread);
3596 } 3376 }
3597 3377
3598 return_io(return_bi); 3378 return_io(s.return_bi);
3599}
3600 3379
3601static void handle_stripe(struct stripe_head *sh) 3380 clear_bit(STRIPE_ACTIVE, &sh->state);
3602{
3603 if (sh->raid_conf->level == 6)
3604 handle_stripe6(sh);
3605 else
3606 handle_stripe5(sh);
3607} 3381}
3608 3382
3609static void raid5_activate_delayed(raid5_conf_t *conf) 3383static void raid5_activate_delayed(raid5_conf_t *conf)
@@ -3833,6 +3607,9 @@ static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio)
3833 rcu_read_lock(); 3607 rcu_read_lock();
3834 rdev = rcu_dereference(conf->disks[dd_idx].rdev); 3608 rdev = rcu_dereference(conf->disks[dd_idx].rdev);
3835 if (rdev && test_bit(In_sync, &rdev->flags)) { 3609 if (rdev && test_bit(In_sync, &rdev->flags)) {
3610 sector_t first_bad;
3611 int bad_sectors;
3612
3836 atomic_inc(&rdev->nr_pending); 3613 atomic_inc(&rdev->nr_pending);
3837 rcu_read_unlock(); 3614 rcu_read_unlock();
3838 raid_bio->bi_next = (void*)rdev; 3615 raid_bio->bi_next = (void*)rdev;
@@ -3840,8 +3617,10 @@ static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio)
3840 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 3617 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
3841 align_bi->bi_sector += rdev->data_offset; 3618 align_bi->bi_sector += rdev->data_offset;
3842 3619
3843 if (!bio_fits_rdev(align_bi)) { 3620 if (!bio_fits_rdev(align_bi) ||
3844 /* too big in some way */ 3621 is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
3622 &first_bad, &bad_sectors)) {
3623 /* too big in some way, or has a known bad block */
3845 bio_put(align_bi); 3624 bio_put(align_bi);
3846 rdev_dec_pending(rdev, mddev); 3625 rdev_dec_pending(rdev, mddev);
3847 return 0; 3626 return 0;
@@ -4016,7 +3795,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
4016 } 3795 }
4017 } 3796 }
4018 3797
4019 if (bio_data_dir(bi) == WRITE && 3798 if (rw == WRITE &&
4020 logical_sector >= mddev->suspend_lo && 3799 logical_sector >= mddev->suspend_lo &&
4021 logical_sector < mddev->suspend_hi) { 3800 logical_sector < mddev->suspend_hi) {
4022 release_stripe(sh); 3801 release_stripe(sh);
@@ -4034,7 +3813,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
4034 } 3813 }
4035 3814
4036 if (test_bit(STRIPE_EXPANDING, &sh->state) || 3815 if (test_bit(STRIPE_EXPANDING, &sh->state) ||
4037 !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) { 3816 !add_stripe_bio(sh, bi, dd_idx, rw)) {
4038 /* Stripe is busy expanding or 3817 /* Stripe is busy expanding or
4039 * add failed due to overlap. Flush everything 3818 * add failed due to overlap. Flush everything
4040 * and wait a while 3819 * and wait a while
@@ -4375,10 +4154,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
4375 4154
4376 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 4155 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
4377 4156
4378 spin_lock(&sh->lock); 4157 set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
4379 set_bit(STRIPE_SYNCING, &sh->state);
4380 clear_bit(STRIPE_INSYNC, &sh->state);
4381 spin_unlock(&sh->lock);
4382 4158
4383 handle_stripe(sh); 4159 handle_stripe(sh);
4384 release_stripe(sh); 4160 release_stripe(sh);
@@ -4509,6 +4285,9 @@ static void raid5d(mddev_t *mddev)
4509 release_stripe(sh); 4285 release_stripe(sh);
4510 cond_resched(); 4286 cond_resched();
4511 4287
4288 if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
4289 md_check_recovery(mddev);
4290
4512 spin_lock_irq(&conf->device_lock); 4291 spin_lock_irq(&conf->device_lock);
4513 } 4292 }
4514 pr_debug("%d stripes handled\n", handled); 4293 pr_debug("%d stripes handled\n", handled);
@@ -5313,6 +5092,7 @@ static int raid5_remove_disk(mddev_t *mddev, int number)
5313 * isn't possible. 5092 * isn't possible.
5314 */ 5093 */
5315 if (!test_bit(Faulty, &rdev->flags) && 5094 if (!test_bit(Faulty, &rdev->flags) &&
5095 mddev->recovery_disabled != conf->recovery_disabled &&
5316 !has_failed(conf) && 5096 !has_failed(conf) &&
5317 number < conf->raid_disks) { 5097 number < conf->raid_disks) {
5318 err = -EBUSY; 5098 err = -EBUSY;
@@ -5341,6 +5121,9 @@ static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
5341 int first = 0; 5121 int first = 0;
5342 int last = conf->raid_disks - 1; 5122 int last = conf->raid_disks - 1;
5343 5123
5124 if (mddev->recovery_disabled == conf->recovery_disabled)
5125 return -EBUSY;
5126
5344 if (has_failed(conf)) 5127 if (has_failed(conf))
5345 /* no point adding a device */ 5128 /* no point adding a device */
5346 return -EINVAL; 5129 return -EINVAL;
@@ -5519,16 +5302,14 @@ static int raid5_start_reshape(mddev_t *mddev)
5519 if (rdev->raid_disk < 0 && 5302 if (rdev->raid_disk < 0 &&
5520 !test_bit(Faulty, &rdev->flags)) { 5303 !test_bit(Faulty, &rdev->flags)) {
5521 if (raid5_add_disk(mddev, rdev) == 0) { 5304 if (raid5_add_disk(mddev, rdev) == 0) {
5522 char nm[20];
5523 if (rdev->raid_disk 5305 if (rdev->raid_disk
5524 >= conf->previous_raid_disks) { 5306 >= conf->previous_raid_disks) {
5525 set_bit(In_sync, &rdev->flags); 5307 set_bit(In_sync, &rdev->flags);
5526 added_devices++; 5308 added_devices++;
5527 } else 5309 } else
5528 rdev->recovery_offset = 0; 5310 rdev->recovery_offset = 0;
5529 sprintf(nm, "rd%d", rdev->raid_disk); 5311
5530 if (sysfs_create_link(&mddev->kobj, 5312 if (sysfs_link_rdev(mddev, rdev))
5531 &rdev->kobj, nm))
5532 /* Failure here is OK */; 5313 /* Failure here is OK */;
5533 } 5314 }
5534 } else if (rdev->raid_disk >= conf->previous_raid_disks 5315 } else if (rdev->raid_disk >= conf->previous_raid_disks
@@ -5624,9 +5405,7 @@ static void raid5_finish_reshape(mddev_t *mddev)
5624 d++) { 5405 d++) {
5625 mdk_rdev_t *rdev = conf->disks[d].rdev; 5406 mdk_rdev_t *rdev = conf->disks[d].rdev;
5626 if (rdev && raid5_remove_disk(mddev, d) == 0) { 5407 if (rdev && raid5_remove_disk(mddev, d) == 0) {
5627 char nm[20]; 5408 sysfs_unlink_rdev(mddev, rdev);
5628 sprintf(nm, "rd%d", rdev->raid_disk);
5629 sysfs_remove_link(&mddev->kobj, nm);
5630 rdev->raid_disk = -1; 5409 rdev->raid_disk = -1;
5631 } 5410 }
5632 } 5411 }
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 3ca77a2613ba..11b9566184b2 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -6,11 +6,11 @@
6 6
7/* 7/*
8 * 8 *
9 * Each stripe contains one buffer per disc. Each buffer can be in 9 * Each stripe contains one buffer per device. Each buffer can be in
10 * one of a number of states stored in "flags". Changes between 10 * one of a number of states stored in "flags". Changes between
11 * these states happen *almost* exclusively under a per-stripe 11 * these states happen *almost* exclusively under the protection of the
12 * spinlock. Some very specific changes can happen in bi_end_io, and 12 * STRIPE_ACTIVE flag. Some very specific changes can happen in bi_end_io, and
13 * these are not protected by the spin lock. 13 * these are not protected by STRIPE_ACTIVE.
14 * 14 *
15 * The flag bits that are used to represent these states are: 15 * The flag bits that are used to represent these states are:
16 * R5_UPTODATE and R5_LOCKED 16 * R5_UPTODATE and R5_LOCKED
@@ -76,12 +76,10 @@
76 * block and the cached buffer are successfully written, any buffer on 76 * block and the cached buffer are successfully written, any buffer on
77 * a written list can be returned with b_end_io. 77 * a written list can be returned with b_end_io.
78 * 78 *
79 * The write list and read list both act as fifos. The read list is 79 * The write list and read list both act as fifos. The read list,
80 * protected by the device_lock. The write and written lists are 80 * write list and written list are protected by the device_lock.
81 * protected by the stripe lock. The device_lock, which can be 81 * The device_lock is only for list manipulations and will only be
82 * claimed while the stipe lock is held, is only for list 82 * held for a very short time. It can be claimed from interrupts.
83 * manipulations and will only be held for a very short time. It can
84 * be claimed from interrupts.
85 * 83 *
86 * 84 *
87 * Stripes in the stripe cache can be on one of two lists (or on 85 * Stripes in the stripe cache can be on one of two lists (or on
@@ -96,7 +94,6 @@
96 * 94 *
97 * The inactive_list, handle_list and hash bucket lists are all protected by the 95 * The inactive_list, handle_list and hash bucket lists are all protected by the
98 * device_lock. 96 * device_lock.
99 * - stripes on the inactive_list never have their stripe_lock held.
100 * - stripes have a reference counter. If count==0, they are on a list. 97 * - stripes have a reference counter. If count==0, they are on a list.
101 * - If a stripe might need handling, STRIPE_HANDLE is set. 98 * - If a stripe might need handling, STRIPE_HANDLE is set.
102 * - When refcount reaches zero, then if STRIPE_HANDLE it is put on 99 * - When refcount reaches zero, then if STRIPE_HANDLE it is put on
@@ -116,10 +113,10 @@
116 * attach a request to an active stripe (add_stripe_bh()) 113 * attach a request to an active stripe (add_stripe_bh())
117 * lockdev attach-buffer unlockdev 114 * lockdev attach-buffer unlockdev
118 * handle a stripe (handle_stripe()) 115 * handle a stripe (handle_stripe())
119 * lockstripe clrSTRIPE_HANDLE ... 116 * setSTRIPE_ACTIVE, clrSTRIPE_HANDLE ...
120 * (lockdev check-buffers unlockdev) .. 117 * (lockdev check-buffers unlockdev) ..
121 * change-state .. 118 * change-state ..
122 * record io/ops needed unlockstripe schedule io/ops 119 * record io/ops needed clearSTRIPE_ACTIVE schedule io/ops
123 * release an active stripe (release_stripe()) 120 * release an active stripe (release_stripe())
124 * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev 121 * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
125 * 122 *
@@ -128,8 +125,7 @@
128 * on a cached buffer, and plus one if the stripe is undergoing stripe 125 * on a cached buffer, and plus one if the stripe is undergoing stripe
129 * operations. 126 * operations.
130 * 127 *
131 * Stripe operations are performed outside the stripe lock, 128 * The stripe operations are:
132 * the stripe operations are:
133 * -copying data between the stripe cache and user application buffers 129 * -copying data between the stripe cache and user application buffers
134 * -computing blocks to save a disk access, or to recover a missing block 130 * -computing blocks to save a disk access, or to recover a missing block
135 * -updating the parity on a write operation (reconstruct write and 131 * -updating the parity on a write operation (reconstruct write and
@@ -159,7 +155,8 @@
159 */ 155 */
160 156
161/* 157/*
162 * Operations state - intermediate states that are visible outside of sh->lock 158 * Operations state - intermediate states that are visible outside of
159 * STRIPE_ACTIVE.
163 * In general _idle indicates nothing is running, _run indicates a data 160 * In general _idle indicates nothing is running, _run indicates a data
164 * processing operation is active, and _result means the data processing result 161 * processing operation is active, and _result means the data processing result
165 * is stable and can be acted upon. For simple operations like biofill and 162 * is stable and can be acted upon. For simple operations like biofill and
@@ -209,7 +206,6 @@ struct stripe_head {
209 short ddf_layout;/* use DDF ordering to calculate Q */ 206 short ddf_layout;/* use DDF ordering to calculate Q */
210 unsigned long state; /* state flags */ 207 unsigned long state; /* state flags */
211 atomic_t count; /* nr of active thread/requests */ 208 atomic_t count; /* nr of active thread/requests */
212 spinlock_t lock;
213 int bm_seq; /* sequence number for bitmap flushes */ 209 int bm_seq; /* sequence number for bitmap flushes */
214 int disks; /* disks in stripe */ 210 int disks; /* disks in stripe */
215 enum check_states check_state; 211 enum check_states check_state;
@@ -240,19 +236,20 @@ struct stripe_head {
240}; 236};
241 237
242/* stripe_head_state - collects and tracks the dynamic state of a stripe_head 238/* stripe_head_state - collects and tracks the dynamic state of a stripe_head
243 * for handle_stripe. It is only valid under spin_lock(sh->lock); 239 * for handle_stripe.
244 */ 240 */
245struct stripe_head_state { 241struct stripe_head_state {
246 int syncing, expanding, expanded; 242 int syncing, expanding, expanded;
247 int locked, uptodate, to_read, to_write, failed, written; 243 int locked, uptodate, to_read, to_write, failed, written;
248 int to_fill, compute, req_compute, non_overwrite; 244 int to_fill, compute, req_compute, non_overwrite;
249 int failed_num; 245 int failed_num[2];
246 int p_failed, q_failed;
247 int dec_preread_active;
250 unsigned long ops_request; 248 unsigned long ops_request;
251};
252 249
253/* r6_state - extra state data only relevant to r6 */ 250 struct bio *return_bi;
254struct r6_state { 251 mdk_rdev_t *blocked_rdev;
255 int p_failed, q_failed, failed_num[2]; 252 int handle_bad_blocks;
256}; 253};
257 254
258/* Flags */ 255/* Flags */
@@ -268,14 +265,16 @@ struct r6_state {
268#define R5_ReWrite 9 /* have tried to over-write the readerror */ 265#define R5_ReWrite 9 /* have tried to over-write the readerror */
269 266
270#define R5_Expanded 10 /* This block now has post-expand data */ 267#define R5_Expanded 10 /* This block now has post-expand data */
271#define R5_Wantcompute 11 /* compute_block in progress treat as 268#define R5_Wantcompute 11 /* compute_block in progress treat as
272 * uptodate 269 * uptodate
273 */ 270 */
274#define R5_Wantfill 12 /* dev->toread contains a bio that needs 271#define R5_Wantfill 12 /* dev->toread contains a bio that needs
275 * filling 272 * filling
276 */ 273 */
277#define R5_Wantdrain 13 /* dev->towrite needs to be drained */ 274#define R5_Wantdrain 13 /* dev->towrite needs to be drained */
278#define R5_WantFUA 14 /* Write should be FUA */ 275#define R5_WantFUA 14 /* Write should be FUA */
276#define R5_WriteError 15 /* got a write error - need to record it */
277#define R5_MadeGood 16 /* A bad block has been fixed by writing to it*/
279/* 278/*
280 * Write method 279 * Write method
281 */ 280 */
@@ -289,21 +288,25 @@ struct r6_state {
289/* 288/*
290 * Stripe state 289 * Stripe state
291 */ 290 */
292#define STRIPE_HANDLE 2 291enum {
293#define STRIPE_SYNCING 3 292 STRIPE_ACTIVE,
294#define STRIPE_INSYNC 4 293 STRIPE_HANDLE,
295#define STRIPE_PREREAD_ACTIVE 5 294 STRIPE_SYNC_REQUESTED,
296#define STRIPE_DELAYED 6 295 STRIPE_SYNCING,
297#define STRIPE_DEGRADED 7 296 STRIPE_INSYNC,
298#define STRIPE_BIT_DELAY 8 297 STRIPE_PREREAD_ACTIVE,
299#define STRIPE_EXPANDING 9 298 STRIPE_DELAYED,
300#define STRIPE_EXPAND_SOURCE 10 299 STRIPE_DEGRADED,
301#define STRIPE_EXPAND_READY 11 300 STRIPE_BIT_DELAY,
302#define STRIPE_IO_STARTED 12 /* do not count towards 'bypass_count' */ 301 STRIPE_EXPANDING,
303#define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */ 302 STRIPE_EXPAND_SOURCE,
304#define STRIPE_BIOFILL_RUN 14 303 STRIPE_EXPAND_READY,
305#define STRIPE_COMPUTE_RUN 15 304 STRIPE_IO_STARTED, /* do not count towards 'bypass_count' */
306#define STRIPE_OPS_REQ_PENDING 16 305 STRIPE_FULL_WRITE, /* all blocks are set to be overwritten */
306 STRIPE_BIOFILL_RUN,
307 STRIPE_COMPUTE_RUN,
308 STRIPE_OPS_REQ_PENDING,
309};
307 310
308/* 311/*
309 * Operation request flags 312 * Operation request flags
@@ -336,7 +339,7 @@ struct r6_state {
336 * PREREAD_ACTIVE. 339 * PREREAD_ACTIVE.
337 * In stripe_handle, if we find pre-reading is necessary, we do it if 340 * In stripe_handle, if we find pre-reading is necessary, we do it if
338 * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue. 341 * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
339 * HANDLE gets cleared if stripe_handle leave nothing locked. 342 * HANDLE gets cleared if stripe_handle leaves nothing locked.
340 */ 343 */
341 344
342 345
@@ -399,7 +402,7 @@ struct raid5_private_data {
399 * (fresh device added). 402 * (fresh device added).
400 * Cleared when a sync completes. 403 * Cleared when a sync completes.
401 */ 404 */
402 405 int recovery_disabled;
403 /* per cpu variables */ 406 /* per cpu variables */
404 struct raid5_percpu { 407 struct raid5_percpu {
405 struct page *spare_page; /* Used when checking P/Q in raid6 */ 408 struct page *spare_page; /* Used when checking P/Q in raid6 */
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 6995940b633a..9575db429df4 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -68,7 +68,6 @@ config VIDEO_V4L2_SUBDEV_API
68 68
69config DVB_CORE 69config DVB_CORE
70 tristate "DVB for Linux" 70 tristate "DVB for Linux"
71 depends on NET && INET
72 select CRC32 71 select CRC32
73 help 72 help
74 DVB core utility functions for device handling, software fallbacks etc. 73 DVB core utility functions for device handling, software fallbacks etc.
@@ -85,6 +84,19 @@ config DVB_CORE
85 84
86 If unsure say N. 85 If unsure say N.
87 86
87config DVB_NET
88 bool "DVB Network Support"
89 default (NET && INET)
90 depends on NET && INET && DVB_CORE
91 help
92 This option enables DVB Network Support which is a part of the DVB
93 standard. It is used, for example, by automatic firmware updates used
94 on Set-Top-Boxes. It can also be used to access the Internet via the
95 DVB card, if the network provider supports it.
96
97 You may want to disable the network support on embedded devices. If
98 unsure say Y.
99
88config VIDEO_MEDIA 100config VIDEO_MEDIA
89 tristate 101 tristate
90 default (DVB_CORE && (VIDEO_DEV = n)) || (VIDEO_DEV && (DVB_CORE = n)) || (DVB_CORE && VIDEO_DEV) 102 default (DVB_CORE && (VIDEO_DEV = n)) || (VIDEO_DEV && (DVB_CORE = n)) || (DVB_CORE && VIDEO_DEV)
diff --git a/drivers/media/common/tuners/Kconfig b/drivers/media/common/tuners/Kconfig
index 22d3ca36370e..996302ae210e 100644
--- a/drivers/media/common/tuners/Kconfig
+++ b/drivers/media/common/tuners/Kconfig
@@ -23,6 +23,7 @@ config MEDIA_TUNER
23 depends on VIDEO_MEDIA && I2C 23 depends on VIDEO_MEDIA && I2C
24 select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE 24 select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE
25 select MEDIA_TUNER_XC5000 if !MEDIA_TUNER_CUSTOMISE 25 select MEDIA_TUNER_XC5000 if !MEDIA_TUNER_CUSTOMISE
26 select MEDIA_TUNER_XC4000 if !MEDIA_TUNER_CUSTOMISE
26 select MEDIA_TUNER_MT20XX if !MEDIA_TUNER_CUSTOMISE 27 select MEDIA_TUNER_MT20XX if !MEDIA_TUNER_CUSTOMISE
27 select MEDIA_TUNER_TDA8290 if !MEDIA_TUNER_CUSTOMISE 28 select MEDIA_TUNER_TDA8290 if !MEDIA_TUNER_CUSTOMISE
28 select MEDIA_TUNER_TEA5761 if !MEDIA_TUNER_CUSTOMISE 29 select MEDIA_TUNER_TEA5761 if !MEDIA_TUNER_CUSTOMISE
@@ -152,6 +153,15 @@ config MEDIA_TUNER_XC5000
152 This device is only used inside a SiP called together with a 153 This device is only used inside a SiP called together with a
153 demodulator for now. 154 demodulator for now.
154 155
156config MEDIA_TUNER_XC4000
157 tristate "Xceive XC4000 silicon tuner"
158 depends on VIDEO_MEDIA && I2C
159 default m if MEDIA_TUNER_CUSTOMISE
160 help
161 A driver for the silicon tuner XC4000 from Xceive.
162 This device is only used inside a SiP called together with a
163 demodulator for now.
164
155config MEDIA_TUNER_MXL5005S 165config MEDIA_TUNER_MXL5005S
156 tristate "MaxLinear MSL5005S silicon tuner" 166 tristate "MaxLinear MSL5005S silicon tuner"
157 depends on VIDEO_MEDIA && I2C 167 depends on VIDEO_MEDIA && I2C
diff --git a/drivers/media/common/tuners/Makefile b/drivers/media/common/tuners/Makefile
index 2cb4f5327843..20d24fca2cfb 100644
--- a/drivers/media/common/tuners/Makefile
+++ b/drivers/media/common/tuners/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_MEDIA_TUNER_TDA9887) += tda9887.o
16obj-$(CONFIG_MEDIA_TUNER_TDA827X) += tda827x.o 16obj-$(CONFIG_MEDIA_TUNER_TDA827X) += tda827x.o
17obj-$(CONFIG_MEDIA_TUNER_TDA18271) += tda18271.o 17obj-$(CONFIG_MEDIA_TUNER_TDA18271) += tda18271.o
18obj-$(CONFIG_MEDIA_TUNER_XC5000) += xc5000.o 18obj-$(CONFIG_MEDIA_TUNER_XC5000) += xc5000.o
19obj-$(CONFIG_MEDIA_TUNER_XC4000) += xc4000.o
19obj-$(CONFIG_MEDIA_TUNER_MT2060) += mt2060.o 20obj-$(CONFIG_MEDIA_TUNER_MT2060) += mt2060.o
20obj-$(CONFIG_MEDIA_TUNER_MT2266) += mt2266.o 21obj-$(CONFIG_MEDIA_TUNER_MT2266) += mt2266.o
21obj-$(CONFIG_MEDIA_TUNER_QT1010) += qt1010.o 22obj-$(CONFIG_MEDIA_TUNER_QT1010) += qt1010.o
diff --git a/drivers/media/common/tuners/tuner-types.c b/drivers/media/common/tuners/tuner-types.c
index afba6dc5e080..94a603a60842 100644
--- a/drivers/media/common/tuners/tuner-types.c
+++ b/drivers/media/common/tuners/tuner-types.c
@@ -1805,6 +1805,10 @@ struct tunertype tuners[] = {
1805 .name = "Xceive 5000 tuner", 1805 .name = "Xceive 5000 tuner",
1806 /* see xc5000.c for details */ 1806 /* see xc5000.c for details */
1807 }, 1807 },
1808 [TUNER_XC4000] = { /* Xceive 4000 */
1809 .name = "Xceive 4000 tuner",
1810 /* see xc4000.c for details */
1811 },
1808 [TUNER_TCL_MF02GIP_5N] = { /* TCL tuner MF02GIP-5N-E */ 1812 [TUNER_TCL_MF02GIP_5N] = { /* TCL tuner MF02GIP-5N-E */
1809 .name = "TCL tuner MF02GIP-5N-E", 1813 .name = "TCL tuner MF02GIP-5N-E",
1810 .params = tuner_tcl_mf02gip_5n_params, 1814 .params = tuner_tcl_mf02gip_5n_params,
diff --git a/drivers/media/common/tuners/xc4000.c b/drivers/media/common/tuners/xc4000.c
new file mode 100644
index 000000000000..634f4d9b6c63
--- /dev/null
+++ b/drivers/media/common/tuners/xc4000.c
@@ -0,0 +1,1691 @@
1/*
2 * Driver for Xceive XC4000 "QAM/8VSB single chip tuner"
3 *
4 * Copyright (c) 2007 Xceive Corporation
5 * Copyright (c) 2007 Steven Toth <stoth@linuxtv.org>
6 * Copyright (c) 2009 Devin Heitmueller <dheitmueller@kernellabs.com>
7 * Copyright (c) 2009 Davide Ferri <d.ferri@zero11.it>
8 * Copyright (c) 2010 Istvan Varga <istvan_v@mailbox.hu>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/videodev2.h>
28#include <linux/delay.h>
29#include <linux/dvb/frontend.h>
30#include <linux/i2c.h>
31#include <linux/mutex.h>
32#include <asm/unaligned.h>
33
34#include "dvb_frontend.h"
35
36#include "xc4000.h"
37#include "tuner-i2c.h"
38#include "tuner-xc2028-types.h"
39
40static int debug;
41module_param(debug, int, 0644);
42MODULE_PARM_DESC(debug, "Debugging level (0 to 2, default: 0 (off)).");
43
44static int no_poweroff;
45module_param(no_poweroff, int, 0644);
46MODULE_PARM_DESC(no_poweroff, "Power management (1: disabled, 2: enabled, "
47 "0 (default): use device-specific default mode).");
48
49static int audio_std;
50module_param(audio_std, int, 0644);
51MODULE_PARM_DESC(audio_std, "Audio standard. XC4000 audio decoder explicitly "
52 "needs to know what audio standard is needed for some video standards "
53 "with audio A2 or NICAM. The valid settings are a sum of:\n"
54 " 1: use NICAM/B or A2/B instead of NICAM/A or A2/A\n"
55 " 2: use A2 instead of NICAM or BTSC\n"
56 " 4: use SECAM/K3 instead of K1\n"
57 " 8: use PAL-D/K audio for SECAM-D/K\n"
58 "16: use FM radio input 1 instead of input 2\n"
59 "32: use mono audio (the lower three bits are ignored)");
60
61static char firmware_name[30];
62module_param_string(firmware_name, firmware_name, sizeof(firmware_name), 0);
63MODULE_PARM_DESC(firmware_name, "Firmware file name. Allows overriding the "
64 "default firmware name.");
65
66static DEFINE_MUTEX(xc4000_list_mutex);
67static LIST_HEAD(hybrid_tuner_instance_list);
68
69#define dprintk(level, fmt, arg...) if (debug >= level) \
70 printk(KERN_INFO "%s: " fmt, "xc4000", ## arg)
71
72/* struct for storing firmware table */
73struct firmware_description {
74 unsigned int type;
75 v4l2_std_id id;
76 __u16 int_freq;
77 unsigned char *ptr;
78 unsigned int size;
79};
80
81struct firmware_properties {
82 unsigned int type;
83 v4l2_std_id id;
84 v4l2_std_id std_req;
85 __u16 int_freq;
86 unsigned int scode_table;
87 int scode_nr;
88};
89
90struct xc4000_priv {
91 struct tuner_i2c_props i2c_props;
92 struct list_head hybrid_tuner_instance_list;
93 struct firmware_description *firm;
94 int firm_size;
95 u32 if_khz;
96 u32 freq_hz;
97 u32 bandwidth;
98 u8 video_standard;
99 u8 rf_mode;
100 u8 default_pm;
101 u8 dvb_amplitude;
102 u8 set_smoothedcvbs;
103 u8 ignore_i2c_write_errors;
104 __u16 firm_version;
105 struct firmware_properties cur_fw;
106 __u16 hwmodel;
107 __u16 hwvers;
108 struct mutex lock;
109};
110
111#define XC4000_AUDIO_STD_B 1
112#define XC4000_AUDIO_STD_A2 2
113#define XC4000_AUDIO_STD_K3 4
114#define XC4000_AUDIO_STD_L 8
115#define XC4000_AUDIO_STD_INPUT1 16
116#define XC4000_AUDIO_STD_MONO 32
117
118#define XC4000_DEFAULT_FIRMWARE "dvb-fe-xc4000-1.4.fw"
119
120/* Misc Defines */
121#define MAX_TV_STANDARD 24
122#define XC_MAX_I2C_WRITE_LENGTH 64
123#define XC_POWERED_DOWN 0x80000000U
124
125/* Signal Types */
126#define XC_RF_MODE_AIR 0
127#define XC_RF_MODE_CABLE 1
128
129/* Product id */
130#define XC_PRODUCT_ID_FW_NOT_LOADED 0x2000
131#define XC_PRODUCT_ID_XC4000 0x0FA0
132#define XC_PRODUCT_ID_XC4100 0x1004
133
134/* Registers (Write-only) */
135#define XREG_INIT 0x00
136#define XREG_VIDEO_MODE 0x01
137#define XREG_AUDIO_MODE 0x02
138#define XREG_RF_FREQ 0x03
139#define XREG_D_CODE 0x04
140#define XREG_DIRECTSITTING_MODE 0x05
141#define XREG_SEEK_MODE 0x06
142#define XREG_POWER_DOWN 0x08
143#define XREG_SIGNALSOURCE 0x0A
144#define XREG_SMOOTHEDCVBS 0x0E
145#define XREG_AMPLITUDE 0x10
146
147/* Registers (Read-only) */
148#define XREG_ADC_ENV 0x00
149#define XREG_QUALITY 0x01
150#define XREG_FRAME_LINES 0x02
151#define XREG_HSYNC_FREQ 0x03
152#define XREG_LOCK 0x04
153#define XREG_FREQ_ERROR 0x05
154#define XREG_SNR 0x06
155#define XREG_VERSION 0x07
156#define XREG_PRODUCT_ID 0x08
157
158/*
159 Basic firmware description. This will remain with
160 the driver for documentation purposes.
161
162 This represents an I2C firmware file encoded as a
163 string of unsigned char. Format is as follows:
164
165 char[0 ]=len0_MSB -> len = len_MSB * 256 + len_LSB
166 char[1 ]=len0_LSB -> length of first write transaction
167 char[2 ]=data0 -> first byte to be sent
168 char[3 ]=data1
169 char[4 ]=data2
170 char[ ]=...
171 char[M ]=dataN -> last byte to be sent
172 char[M+1]=len1_MSB -> len = len_MSB * 256 + len_LSB
173 char[M+2]=len1_LSB -> length of second write transaction
174 char[M+3]=data0
175 char[M+4]=data1
176 ...
177 etc.
178
179 The [len] value should be interpreted as follows:
180
181 len= len_MSB _ len_LSB
182 len=1111_1111_1111_1111 : End of I2C_SEQUENCE
183 len=0000_0000_0000_0000 : Reset command: Do hardware reset
184 len=0NNN_NNNN_NNNN_NNNN : Normal transaction: number of bytes = {1:32767)
185 len=1WWW_WWWW_WWWW_WWWW : Wait command: wait for {1:32767} ms
186
187 For the RESET and WAIT commands, the two following bytes will contain
188 immediately the length of the following transaction.
189*/
190
191struct XC_TV_STANDARD {
192 const char *Name;
193 u16 audio_mode;
194 u16 video_mode;
195 u16 int_freq;
196};
197
198/* Tuner standards */
199#define XC4000_MN_NTSC_PAL_BTSC 0
200#define XC4000_MN_NTSC_PAL_A2 1
201#define XC4000_MN_NTSC_PAL_EIAJ 2
202#define XC4000_MN_NTSC_PAL_Mono 3
203#define XC4000_BG_PAL_A2 4
204#define XC4000_BG_PAL_NICAM 5
205#define XC4000_BG_PAL_MONO 6
206#define XC4000_I_PAL_NICAM 7
207#define XC4000_I_PAL_NICAM_MONO 8
208#define XC4000_DK_PAL_A2 9
209#define XC4000_DK_PAL_NICAM 10
210#define XC4000_DK_PAL_MONO 11
211#define XC4000_DK_SECAM_A2DK1 12
212#define XC4000_DK_SECAM_A2LDK3 13
213#define XC4000_DK_SECAM_A2MONO 14
214#define XC4000_DK_SECAM_NICAM 15
215#define XC4000_L_SECAM_NICAM 16
216#define XC4000_LC_SECAM_NICAM 17
217#define XC4000_DTV6 18
218#define XC4000_DTV8 19
219#define XC4000_DTV7_8 20
220#define XC4000_DTV7 21
221#define XC4000_FM_Radio_INPUT2 22
222#define XC4000_FM_Radio_INPUT1 23
223
224static struct XC_TV_STANDARD xc4000_standard[MAX_TV_STANDARD] = {
225 {"M/N-NTSC/PAL-BTSC", 0x0000, 0x80A0, 4500},
226 {"M/N-NTSC/PAL-A2", 0x0000, 0x80A0, 4600},
227 {"M/N-NTSC/PAL-EIAJ", 0x0040, 0x80A0, 4500},
228 {"M/N-NTSC/PAL-Mono", 0x0078, 0x80A0, 4500},
229 {"B/G-PAL-A2", 0x0000, 0x8159, 5640},
230 {"B/G-PAL-NICAM", 0x0004, 0x8159, 5740},
231 {"B/G-PAL-MONO", 0x0078, 0x8159, 5500},
232 {"I-PAL-NICAM", 0x0080, 0x8049, 6240},
233 {"I-PAL-NICAM-MONO", 0x0078, 0x8049, 6000},
234 {"D/K-PAL-A2", 0x0000, 0x8049, 6380},
235 {"D/K-PAL-NICAM", 0x0080, 0x8049, 6200},
236 {"D/K-PAL-MONO", 0x0078, 0x8049, 6500},
237 {"D/K-SECAM-A2 DK1", 0x0000, 0x8049, 6340},
238 {"D/K-SECAM-A2 L/DK3", 0x0000, 0x8049, 6000},
239 {"D/K-SECAM-A2 MONO", 0x0078, 0x8049, 6500},
240 {"D/K-SECAM-NICAM", 0x0080, 0x8049, 6200},
241 {"L-SECAM-NICAM", 0x8080, 0x0009, 6200},
242 {"L'-SECAM-NICAM", 0x8080, 0x4009, 6200},
243 {"DTV6", 0x00C0, 0x8002, 0},
244 {"DTV8", 0x00C0, 0x800B, 0},
245 {"DTV7/8", 0x00C0, 0x801B, 0},
246 {"DTV7", 0x00C0, 0x8007, 0},
247 {"FM Radio-INPUT2", 0x0008, 0x9800, 10700},
248 {"FM Radio-INPUT1", 0x0008, 0x9000, 10700}
249};
250
251static int xc4000_readreg(struct xc4000_priv *priv, u16 reg, u16 *val);
252static int xc4000_tuner_reset(struct dvb_frontend *fe);
253static void xc_debug_dump(struct xc4000_priv *priv);
254
255static int xc_send_i2c_data(struct xc4000_priv *priv, u8 *buf, int len)
256{
257 struct i2c_msg msg = { .addr = priv->i2c_props.addr,
258 .flags = 0, .buf = buf, .len = len };
259 if (i2c_transfer(priv->i2c_props.adap, &msg, 1) != 1) {
260 if (priv->ignore_i2c_write_errors == 0) {
261 printk(KERN_ERR "xc4000: I2C write failed (len=%i)\n",
262 len);
263 if (len == 4) {
264 printk(KERN_ERR "bytes %02x %02x %02x %02x\n", buf[0],
265 buf[1], buf[2], buf[3]);
266 }
267 return -EREMOTEIO;
268 }
269 }
270 return 0;
271}
272
273static int xc4000_tuner_reset(struct dvb_frontend *fe)
274{
275 struct xc4000_priv *priv = fe->tuner_priv;
276 int ret;
277
278 dprintk(1, "%s()\n", __func__);
279
280 if (fe->callback) {
281 ret = fe->callback(((fe->dvb) && (fe->dvb->priv)) ?
282 fe->dvb->priv :
283 priv->i2c_props.adap->algo_data,
284 DVB_FRONTEND_COMPONENT_TUNER,
285 XC4000_TUNER_RESET, 0);
286 if (ret) {
287 printk(KERN_ERR "xc4000: reset failed\n");
288 return -EREMOTEIO;
289 }
290 } else {
291 printk(KERN_ERR "xc4000: no tuner reset callback function, "
292 "fatal\n");
293 return -EINVAL;
294 }
295 return 0;
296}
297
298static int xc_write_reg(struct xc4000_priv *priv, u16 regAddr, u16 i2cData)
299{
300 u8 buf[4];
301 int result;
302
303 buf[0] = (regAddr >> 8) & 0xFF;
304 buf[1] = regAddr & 0xFF;
305 buf[2] = (i2cData >> 8) & 0xFF;
306 buf[3] = i2cData & 0xFF;
307 result = xc_send_i2c_data(priv, buf, 4);
308
309 return result;
310}
311
312static int xc_load_i2c_sequence(struct dvb_frontend *fe, const u8 *i2c_sequence)
313{
314 struct xc4000_priv *priv = fe->tuner_priv;
315
316 int i, nbytes_to_send, result;
317 unsigned int len, pos, index;
318 u8 buf[XC_MAX_I2C_WRITE_LENGTH];
319
320 index = 0;
321 while ((i2c_sequence[index] != 0xFF) ||
322 (i2c_sequence[index + 1] != 0xFF)) {
323 len = i2c_sequence[index] * 256 + i2c_sequence[index+1];
324 if (len == 0x0000) {
325 /* RESET command */
326 /* NOTE: this is ignored, as the reset callback was */
327 /* already called by check_firmware() */
328 index += 2;
329 } else if (len & 0x8000) {
330 /* WAIT command */
331 msleep(len & 0x7FFF);
332 index += 2;
333 } else {
334 /* Send i2c data whilst ensuring individual transactions
335 * do not exceed XC_MAX_I2C_WRITE_LENGTH bytes.
336 */
337 index += 2;
338 buf[0] = i2c_sequence[index];
339 buf[1] = i2c_sequence[index + 1];
340 pos = 2;
341 while (pos < len) {
342 if ((len - pos) > XC_MAX_I2C_WRITE_LENGTH - 2)
343 nbytes_to_send =
344 XC_MAX_I2C_WRITE_LENGTH;
345 else
346 nbytes_to_send = (len - pos + 2);
347 for (i = 2; i < nbytes_to_send; i++) {
348 buf[i] = i2c_sequence[index + pos +
349 i - 2];
350 }
351 result = xc_send_i2c_data(priv, buf,
352 nbytes_to_send);
353
354 if (result != 0)
355 return result;
356
357 pos += nbytes_to_send - 2;
358 }
359 index += len;
360 }
361 }
362 return 0;
363}
364
365static int xc_set_tv_standard(struct xc4000_priv *priv,
366 u16 video_mode, u16 audio_mode)
367{
368 int ret;
369 dprintk(1, "%s(0x%04x,0x%04x)\n", __func__, video_mode, audio_mode);
370 dprintk(1, "%s() Standard = %s\n",
371 __func__,
372 xc4000_standard[priv->video_standard].Name);
373
374 /* Don't complain when the request fails because of i2c stretching */
375 priv->ignore_i2c_write_errors = 1;
376
377 ret = xc_write_reg(priv, XREG_VIDEO_MODE, video_mode);
378 if (ret == 0)
379 ret = xc_write_reg(priv, XREG_AUDIO_MODE, audio_mode);
380
381 priv->ignore_i2c_write_errors = 0;
382
383 return ret;
384}
385
386static int xc_set_signal_source(struct xc4000_priv *priv, u16 rf_mode)
387{
388 dprintk(1, "%s(%d) Source = %s\n", __func__, rf_mode,
389 rf_mode == XC_RF_MODE_AIR ? "ANTENNA" : "CABLE");
390
391 if ((rf_mode != XC_RF_MODE_AIR) && (rf_mode != XC_RF_MODE_CABLE)) {
392 rf_mode = XC_RF_MODE_CABLE;
393 printk(KERN_ERR
394 "%s(), Invalid mode, defaulting to CABLE",
395 __func__);
396 }
397 return xc_write_reg(priv, XREG_SIGNALSOURCE, rf_mode);
398}
399
400static const struct dvb_tuner_ops xc4000_tuner_ops;
401
402static int xc_set_rf_frequency(struct xc4000_priv *priv, u32 freq_hz)
403{
404 u16 freq_code;
405
406 dprintk(1, "%s(%u)\n", __func__, freq_hz);
407
408 if ((freq_hz > xc4000_tuner_ops.info.frequency_max) ||
409 (freq_hz < xc4000_tuner_ops.info.frequency_min))
410 return -EINVAL;
411
412 freq_code = (u16)(freq_hz / 15625);
413
414 /* WAS: Starting in firmware version 1.1.44, Xceive recommends using the
415 FINERFREQ for all normal tuning (the doc indicates reg 0x03 should
416 only be used for fast scanning for channel lock) */
417 /* WAS: XREG_FINERFREQ */
418 return xc_write_reg(priv, XREG_RF_FREQ, freq_code);
419}
420
421static int xc_get_adc_envelope(struct xc4000_priv *priv, u16 *adc_envelope)
422{
423 return xc4000_readreg(priv, XREG_ADC_ENV, adc_envelope);
424}
425
426static int xc_get_frequency_error(struct xc4000_priv *priv, u32 *freq_error_hz)
427{
428 int result;
429 u16 regData;
430 u32 tmp;
431
432 result = xc4000_readreg(priv, XREG_FREQ_ERROR, &regData);
433 if (result != 0)
434 return result;
435
436 tmp = (u32)regData & 0xFFFFU;
437 tmp = (tmp < 0x8000U ? tmp : 0x10000U - tmp);
438 (*freq_error_hz) = tmp * 15625;
439 return result;
440}
441
442static int xc_get_lock_status(struct xc4000_priv *priv, u16 *lock_status)
443{
444 return xc4000_readreg(priv, XREG_LOCK, lock_status);
445}
446
447static int xc_get_version(struct xc4000_priv *priv,
448 u8 *hw_majorversion, u8 *hw_minorversion,
449 u8 *fw_majorversion, u8 *fw_minorversion)
450{
451 u16 data;
452 int result;
453
454 result = xc4000_readreg(priv, XREG_VERSION, &data);
455 if (result != 0)
456 return result;
457
458 (*hw_majorversion) = (data >> 12) & 0x0F;
459 (*hw_minorversion) = (data >> 8) & 0x0F;
460 (*fw_majorversion) = (data >> 4) & 0x0F;
461 (*fw_minorversion) = data & 0x0F;
462
463 return 0;
464}
465
466static int xc_get_hsync_freq(struct xc4000_priv *priv, u32 *hsync_freq_hz)
467{
468 u16 regData;
469 int result;
470
471 result = xc4000_readreg(priv, XREG_HSYNC_FREQ, &regData);
472 if (result != 0)
473 return result;
474
475 (*hsync_freq_hz) = ((regData & 0x0fff) * 763)/100;
476 return result;
477}
478
479static int xc_get_frame_lines(struct xc4000_priv *priv, u16 *frame_lines)
480{
481 return xc4000_readreg(priv, XREG_FRAME_LINES, frame_lines);
482}
483
484static int xc_get_quality(struct xc4000_priv *priv, u16 *quality)
485{
486 return xc4000_readreg(priv, XREG_QUALITY, quality);
487}
488
489static u16 xc_wait_for_lock(struct xc4000_priv *priv)
490{
491 u16 lock_state = 0;
492 int watchdog_count = 40;
493
494 while ((lock_state == 0) && (watchdog_count > 0)) {
495 xc_get_lock_status(priv, &lock_state);
496 if (lock_state != 1) {
497 msleep(5);
498 watchdog_count--;
499 }
500 }
501 return lock_state;
502}
503
504static int xc_tune_channel(struct xc4000_priv *priv, u32 freq_hz)
505{
506 int found = 1;
507 int result;
508
509 dprintk(1, "%s(%u)\n", __func__, freq_hz);
510
511 /* Don't complain when the request fails because of i2c stretching */
512 priv->ignore_i2c_write_errors = 1;
513 result = xc_set_rf_frequency(priv, freq_hz);
514 priv->ignore_i2c_write_errors = 0;
515
516 if (result != 0)
517 return 0;
518
519 /* wait for lock only in analog TV mode */
520 if ((priv->cur_fw.type & (FM | DTV6 | DTV7 | DTV78 | DTV8)) == 0) {
521 if (xc_wait_for_lock(priv) != 1)
522 found = 0;
523 }
524
525 /* Wait for stats to stabilize.
526 * Frame Lines needs two frame times after initial lock
527 * before it is valid.
528 */
529 msleep(debug ? 100 : 10);
530
531 if (debug)
532 xc_debug_dump(priv);
533
534 return found;
535}
536
537static int xc4000_readreg(struct xc4000_priv *priv, u16 reg, u16 *val)
538{
539 u8 buf[2] = { reg >> 8, reg & 0xff };
540 u8 bval[2] = { 0, 0 };
541 struct i2c_msg msg[2] = {
542 { .addr = priv->i2c_props.addr,
543 .flags = 0, .buf = &buf[0], .len = 2 },
544 { .addr = priv->i2c_props.addr,
545 .flags = I2C_M_RD, .buf = &bval[0], .len = 2 },
546 };
547
548 if (i2c_transfer(priv->i2c_props.adap, msg, 2) != 2) {
549 printk(KERN_ERR "xc4000: I2C read failed\n");
550 return -EREMOTEIO;
551 }
552
553 *val = (bval[0] << 8) | bval[1];
554 return 0;
555}
556
557#define dump_firm_type(t) dump_firm_type_and_int_freq(t, 0)
558static void dump_firm_type_and_int_freq(unsigned int type, u16 int_freq)
559{
560 if (type & BASE)
561 printk(KERN_CONT "BASE ");
562 if (type & INIT1)
563 printk(KERN_CONT "INIT1 ");
564 if (type & F8MHZ)
565 printk(KERN_CONT "F8MHZ ");
566 if (type & MTS)
567 printk(KERN_CONT "MTS ");
568 if (type & D2620)
569 printk(KERN_CONT "D2620 ");
570 if (type & D2633)
571 printk(KERN_CONT "D2633 ");
572 if (type & DTV6)
573 printk(KERN_CONT "DTV6 ");
574 if (type & QAM)
575 printk(KERN_CONT "QAM ");
576 if (type & DTV7)
577 printk(KERN_CONT "DTV7 ");
578 if (type & DTV78)
579 printk(KERN_CONT "DTV78 ");
580 if (type & DTV8)
581 printk(KERN_CONT "DTV8 ");
582 if (type & FM)
583 printk(KERN_CONT "FM ");
584 if (type & INPUT1)
585 printk(KERN_CONT "INPUT1 ");
586 if (type & LCD)
587 printk(KERN_CONT "LCD ");
588 if (type & NOGD)
589 printk(KERN_CONT "NOGD ");
590 if (type & MONO)
591 printk(KERN_CONT "MONO ");
592 if (type & ATSC)
593 printk(KERN_CONT "ATSC ");
594 if (type & IF)
595 printk(KERN_CONT "IF ");
596 if (type & LG60)
597 printk(KERN_CONT "LG60 ");
598 if (type & ATI638)
599 printk(KERN_CONT "ATI638 ");
600 if (type & OREN538)
601 printk(KERN_CONT "OREN538 ");
602 if (type & OREN36)
603 printk(KERN_CONT "OREN36 ");
604 if (type & TOYOTA388)
605 printk(KERN_CONT "TOYOTA388 ");
606 if (type & TOYOTA794)
607 printk(KERN_CONT "TOYOTA794 ");
608 if (type & DIBCOM52)
609 printk(KERN_CONT "DIBCOM52 ");
610 if (type & ZARLINK456)
611 printk(KERN_CONT "ZARLINK456 ");
612 if (type & CHINA)
613 printk(KERN_CONT "CHINA ");
614 if (type & F6MHZ)
615 printk(KERN_CONT "F6MHZ ");
616 if (type & INPUT2)
617 printk(KERN_CONT "INPUT2 ");
618 if (type & SCODE)
619 printk(KERN_CONT "SCODE ");
620 if (type & HAS_IF)
621 printk(KERN_CONT "HAS_IF_%d ", int_freq);
622}
623
624static int seek_firmware(struct dvb_frontend *fe, unsigned int type,
625 v4l2_std_id *id)
626{
627 struct xc4000_priv *priv = fe->tuner_priv;
628 int i, best_i = -1;
629 unsigned int best_nr_diffs = 255U;
630
631 if (!priv->firm) {
632 printk(KERN_ERR "Error! firmware not loaded\n");
633 return -EINVAL;
634 }
635
636 if (((type & ~SCODE) == 0) && (*id == 0))
637 *id = V4L2_STD_PAL;
638
639 /* Seek for generic video standard match */
640 for (i = 0; i < priv->firm_size; i++) {
641 v4l2_std_id id_diff_mask =
642 (priv->firm[i].id ^ (*id)) & (*id);
643 unsigned int type_diff_mask =
644 (priv->firm[i].type ^ type)
645 & (BASE_TYPES | DTV_TYPES | LCD | NOGD | MONO | SCODE);
646 unsigned int nr_diffs;
647
648 if (type_diff_mask
649 & (BASE | INIT1 | FM | DTV6 | DTV7 | DTV78 | DTV8 | SCODE))
650 continue;
651
652 nr_diffs = hweight64(id_diff_mask) + hweight32(type_diff_mask);
653 if (!nr_diffs) /* Supports all the requested standards */
654 goto found;
655
656 if (nr_diffs < best_nr_diffs) {
657 best_nr_diffs = nr_diffs;
658 best_i = i;
659 }
660 }
661
662 /* FIXME: Would make sense to seek for type "hint" match ? */
663 if (best_i < 0) {
664 i = -ENOENT;
665 goto ret;
666 }
667
668 if (best_nr_diffs > 0U) {
669 printk(KERN_WARNING
670 "Selecting best matching firmware (%u bits differ) for "
671 "type=(%x), id %016llx:\n",
672 best_nr_diffs, type, (unsigned long long)*id);
673 i = best_i;
674 }
675
676found:
677 *id = priv->firm[i].id;
678
679ret:
680 if (debug) {
681 printk(KERN_DEBUG "%s firmware for type=",
682 (i < 0) ? "Can't find" : "Found");
683 dump_firm_type(type);
684 printk(KERN_DEBUG "(%x), id %016llx.\n", type, (unsigned long long)*id);
685 }
686 return i;
687}
688
689static int load_firmware(struct dvb_frontend *fe, unsigned int type,
690 v4l2_std_id *id)
691{
692 struct xc4000_priv *priv = fe->tuner_priv;
693 int pos, rc;
694 unsigned char *p;
695
696 pos = seek_firmware(fe, type, id);
697 if (pos < 0)
698 return pos;
699
700 p = priv->firm[pos].ptr;
701
702 /* Don't complain when the request fails because of i2c stretching */
703 priv->ignore_i2c_write_errors = 1;
704
705 rc = xc_load_i2c_sequence(fe, p);
706
707 priv->ignore_i2c_write_errors = 0;
708
709 return rc;
710}
711
712static int xc4000_fwupload(struct dvb_frontend *fe)
713{
714 struct xc4000_priv *priv = fe->tuner_priv;
715 const struct firmware *fw = NULL;
716 const unsigned char *p, *endp;
717 int rc = 0;
718 int n, n_array;
719 char name[33];
720 const char *fname;
721
722 if (firmware_name[0] != '\0')
723 fname = firmware_name;
724 else
725 fname = XC4000_DEFAULT_FIRMWARE;
726
727 dprintk(1, "Reading firmware %s\n", fname);
728 rc = request_firmware(&fw, fname, priv->i2c_props.adap->dev.parent);
729 if (rc < 0) {
730 if (rc == -ENOENT)
731 printk(KERN_ERR "Error: firmware %s not found.\n", fname);
732 else
733 printk(KERN_ERR "Error %d while requesting firmware %s\n",
734 rc, fname);
735
736 return rc;
737 }
738 p = fw->data;
739 endp = p + fw->size;
740
741 if (fw->size < sizeof(name) - 1 + 2 + 2) {
742 printk(KERN_ERR "Error: firmware file %s has invalid size!\n",
743 fname);
744 goto corrupt;
745 }
746
747 memcpy(name, p, sizeof(name) - 1);
748 name[sizeof(name) - 1] = '\0';
749 p += sizeof(name) - 1;
750
751 priv->firm_version = get_unaligned_le16(p);
752 p += 2;
753
754 n_array = get_unaligned_le16(p);
755 p += 2;
756
757 dprintk(1, "Loading %d firmware images from %s, type: %s, ver %d.%d\n",
758 n_array, fname, name,
759 priv->firm_version >> 8, priv->firm_version & 0xff);
760
761 priv->firm = kzalloc(sizeof(*priv->firm) * n_array, GFP_KERNEL);
762 if (priv->firm == NULL) {
763 printk(KERN_ERR "Not enough memory to load firmware file.\n");
764 rc = -ENOMEM;
765 goto done;
766 }
767 priv->firm_size = n_array;
768
769 n = -1;
770 while (p < endp) {
771 __u32 type, size;
772 v4l2_std_id id;
773 __u16 int_freq = 0;
774
775 n++;
776 if (n >= n_array) {
777 printk(KERN_ERR "More firmware images in file than "
778 "were expected!\n");
779 goto corrupt;
780 }
781
782 /* Checks if there's enough bytes to read */
783 if (endp - p < sizeof(type) + sizeof(id) + sizeof(size))
784 goto header;
785
786 type = get_unaligned_le32(p);
787 p += sizeof(type);
788
789 id = get_unaligned_le64(p);
790 p += sizeof(id);
791
792 if (type & HAS_IF) {
793 int_freq = get_unaligned_le16(p);
794 p += sizeof(int_freq);
795 if (endp - p < sizeof(size))
796 goto header;
797 }
798
799 size = get_unaligned_le32(p);
800 p += sizeof(size);
801
802 if (!size || size > endp - p) {
803 printk(KERN_ERR "Firmware type (%x), id %llx is corrupted (size=%d, expected %d)\n",
804 type, (unsigned long long)id,
805 (unsigned)(endp - p), size);
806 goto corrupt;
807 }
808
809 priv->firm[n].ptr = kzalloc(size, GFP_KERNEL);
810 if (priv->firm[n].ptr == NULL) {
811 printk(KERN_ERR "Not enough memory to load firmware file.\n");
812 rc = -ENOMEM;
813 goto done;
814 }
815
816 if (debug) {
817 printk(KERN_DEBUG "Reading firmware type ");
818 dump_firm_type_and_int_freq(type, int_freq);
819 printk(KERN_DEBUG "(%x), id %llx, size=%d.\n",
820 type, (unsigned long long)id, size);
821 }
822
823 memcpy(priv->firm[n].ptr, p, size);
824 priv->firm[n].type = type;
825 priv->firm[n].id = id;
826 priv->firm[n].size = size;
827 priv->firm[n].int_freq = int_freq;
828
829 p += size;
830 }
831
832 if (n + 1 != priv->firm_size) {
833 printk(KERN_ERR "Firmware file is incomplete!\n");
834 goto corrupt;
835 }
836
837 goto done;
838
839header:
840 printk(KERN_ERR "Firmware header is incomplete!\n");
841corrupt:
842 rc = -EINVAL;
843 printk(KERN_ERR "Error: firmware file is corrupted!\n");
844
845done:
846 release_firmware(fw);
847 if (rc == 0)
848 dprintk(1, "Firmware files loaded.\n");
849
850 return rc;
851}
852
853static int load_scode(struct dvb_frontend *fe, unsigned int type,
854 v4l2_std_id *id, __u16 int_freq, int scode)
855{
856 struct xc4000_priv *priv = fe->tuner_priv;
857 int pos, rc;
858 unsigned char *p;
859 u8 scode_buf[13];
860 u8 indirect_mode[5];
861
862 dprintk(1, "%s called int_freq=%d\n", __func__, int_freq);
863
864 if (!int_freq) {
865 pos = seek_firmware(fe, type, id);
866 if (pos < 0)
867 return pos;
868 } else {
869 for (pos = 0; pos < priv->firm_size; pos++) {
870 if ((priv->firm[pos].int_freq == int_freq) &&
871 (priv->firm[pos].type & HAS_IF))
872 break;
873 }
874 if (pos == priv->firm_size)
875 return -ENOENT;
876 }
877
878 p = priv->firm[pos].ptr;
879
880 if (priv->firm[pos].size != 12 * 16 || scode >= 16)
881 return -EINVAL;
882 p += 12 * scode;
883
884 if (debug) {
885 tuner_info("Loading SCODE for type=");
886 dump_firm_type_and_int_freq(priv->firm[pos].type,
887 priv->firm[pos].int_freq);
888 printk(KERN_CONT "(%x), id %016llx.\n", priv->firm[pos].type,
889 (unsigned long long)*id);
890 }
891
892 scode_buf[0] = 0x00;
893 memcpy(&scode_buf[1], p, 12);
894
895 /* Enter direct-mode */
896 rc = xc_write_reg(priv, XREG_DIRECTSITTING_MODE, 0);
897 if (rc < 0) {
898 printk(KERN_ERR "failed to put device into direct mode!\n");
899 return -EIO;
900 }
901
902 rc = xc_send_i2c_data(priv, scode_buf, 13);
903 if (rc != 0) {
904 /* Even if the send failed, make sure we set back to indirect
905 mode */
906 printk(KERN_ERR "Failed to set scode %d\n", rc);
907 }
908
909 /* Switch back to indirect-mode */
910 memset(indirect_mode, 0, sizeof(indirect_mode));
911 indirect_mode[4] = 0x88;
912 xc_send_i2c_data(priv, indirect_mode, sizeof(indirect_mode));
913 msleep(10);
914
915 return 0;
916}
917
918static int check_firmware(struct dvb_frontend *fe, unsigned int type,
919 v4l2_std_id std, __u16 int_freq)
920{
921 struct xc4000_priv *priv = fe->tuner_priv;
922 struct firmware_properties new_fw;
923 int rc = 0, is_retry = 0;
924 u16 hwmodel;
925 v4l2_std_id std0;
926 u8 hw_major, hw_minor, fw_major, fw_minor;
927
928 dprintk(1, "%s called\n", __func__);
929
930 if (!priv->firm) {
931 rc = xc4000_fwupload(fe);
932 if (rc < 0)
933 return rc;
934 }
935
936retry:
937 new_fw.type = type;
938 new_fw.id = std;
939 new_fw.std_req = std;
940 new_fw.scode_table = SCODE;
941 new_fw.scode_nr = 0;
942 new_fw.int_freq = int_freq;
943
944 dprintk(1, "checking firmware, user requested type=");
945 if (debug) {
946 dump_firm_type(new_fw.type);
947 printk(KERN_CONT "(%x), id %016llx, ", new_fw.type,
948 (unsigned long long)new_fw.std_req);
949 if (!int_freq)
950 printk(KERN_CONT "scode_tbl ");
951 else
952 printk(KERN_CONT "int_freq %d, ", new_fw.int_freq);
953 printk(KERN_CONT "scode_nr %d\n", new_fw.scode_nr);
954 }
955
956 /* No need to reload base firmware if it matches */
957 if (priv->cur_fw.type & BASE) {
958 dprintk(1, "BASE firmware not changed.\n");
959 goto skip_base;
960 }
961
962 /* Updating BASE - forget about all currently loaded firmware */
963 memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
964
965 /* Reset is needed before loading firmware */
966 rc = xc4000_tuner_reset(fe);
967 if (rc < 0)
968 goto fail;
969
970 /* BASE firmwares are all std0 */
971 std0 = 0;
972 rc = load_firmware(fe, BASE, &std0);
973 if (rc < 0) {
974 printk(KERN_ERR "Error %d while loading base firmware\n", rc);
975 goto fail;
976 }
977
978 /* Load INIT1, if needed */
979 dprintk(1, "Load init1 firmware, if exists\n");
980
981 rc = load_firmware(fe, BASE | INIT1, &std0);
982 if (rc == -ENOENT)
983 rc = load_firmware(fe, BASE | INIT1, &std0);
984 if (rc < 0 && rc != -ENOENT) {
985 tuner_err("Error %d while loading init1 firmware\n",
986 rc);
987 goto fail;
988 }
989
990skip_base:
991 /*
992 * No need to reload standard specific firmware if base firmware
993 * was not reloaded and requested video standards have not changed.
994 */
995 if (priv->cur_fw.type == (BASE | new_fw.type) &&
996 priv->cur_fw.std_req == std) {
997 dprintk(1, "Std-specific firmware already loaded.\n");
998 goto skip_std_specific;
999 }
1000
1001 /* Reloading std-specific firmware forces a SCODE update */
1002 priv->cur_fw.scode_table = 0;
1003
1004 /* Load the standard firmware */
1005 rc = load_firmware(fe, new_fw.type, &new_fw.id);
1006
1007 if (rc < 0)
1008 goto fail;
1009
1010skip_std_specific:
1011 if (priv->cur_fw.scode_table == new_fw.scode_table &&
1012 priv->cur_fw.scode_nr == new_fw.scode_nr) {
1013 dprintk(1, "SCODE firmware already loaded.\n");
1014 goto check_device;
1015 }
1016
1017 /* Load SCODE firmware, if exists */
1018 rc = load_scode(fe, new_fw.type | new_fw.scode_table, &new_fw.id,
1019 new_fw.int_freq, new_fw.scode_nr);
1020 if (rc != 0)
1021 dprintk(1, "load scode failed %d\n", rc);
1022
1023check_device:
1024 rc = xc4000_readreg(priv, XREG_PRODUCT_ID, &hwmodel);
1025
1026 if (xc_get_version(priv, &hw_major, &hw_minor, &fw_major,
1027 &fw_minor) != 0) {
1028 printk(KERN_ERR "Unable to read tuner registers.\n");
1029 goto fail;
1030 }
1031
1032 dprintk(1, "Device is Xceive %d version %d.%d, "
1033 "firmware version %d.%d\n",
1034 hwmodel, hw_major, hw_minor, fw_major, fw_minor);
1035
1036 /* Check firmware version against what we downloaded. */
1037 if (priv->firm_version != ((fw_major << 8) | fw_minor)) {
1038 printk(KERN_WARNING
1039 "Incorrect readback of firmware version %d.%d.\n",
1040 fw_major, fw_minor);
1041 goto fail;
1042 }
1043
1044 /* Check that the tuner hardware model remains consistent over time. */
1045 if (priv->hwmodel == 0 &&
1046 (hwmodel == XC_PRODUCT_ID_XC4000 ||
1047 hwmodel == XC_PRODUCT_ID_XC4100)) {
1048 priv->hwmodel = hwmodel;
1049 priv->hwvers = (hw_major << 8) | hw_minor;
1050 } else if (priv->hwmodel == 0 || priv->hwmodel != hwmodel ||
1051 priv->hwvers != ((hw_major << 8) | hw_minor)) {
1052 printk(KERN_WARNING
1053 "Read invalid device hardware information - tuner "
1054 "hung?\n");
1055 goto fail;
1056 }
1057
1058 memcpy(&priv->cur_fw, &new_fw, sizeof(priv->cur_fw));
1059
1060 /*
1061 * By setting BASE in cur_fw.type only after successfully loading all
1062 * firmwares, we can:
1063 * 1. Identify that BASE firmware with type=0 has been loaded;
1064 * 2. Tell whether BASE firmware was just changed the next time through.
1065 */
1066 priv->cur_fw.type |= BASE;
1067
1068 return 0;
1069
1070fail:
1071 memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
1072 if (!is_retry) {
1073 msleep(50);
1074 is_retry = 1;
1075 dprintk(1, "Retrying firmware load\n");
1076 goto retry;
1077 }
1078
1079 if (rc == -ENOENT)
1080 rc = -EINVAL;
1081 return rc;
1082}
1083
1084static void xc_debug_dump(struct xc4000_priv *priv)
1085{
1086 u16 adc_envelope;
1087 u32 freq_error_hz = 0;
1088 u16 lock_status;
1089 u32 hsync_freq_hz = 0;
1090 u16 frame_lines;
1091 u16 quality;
1092 u8 hw_majorversion = 0, hw_minorversion = 0;
1093 u8 fw_majorversion = 0, fw_minorversion = 0;
1094
1095 xc_get_adc_envelope(priv, &adc_envelope);
1096 dprintk(1, "*** ADC envelope (0-1023) = %d\n", adc_envelope);
1097
1098 xc_get_frequency_error(priv, &freq_error_hz);
1099 dprintk(1, "*** Frequency error = %d Hz\n", freq_error_hz);
1100
1101 xc_get_lock_status(priv, &lock_status);
1102 dprintk(1, "*** Lock status (0-Wait, 1-Locked, 2-No-signal) = %d\n",
1103 lock_status);
1104
1105 xc_get_version(priv, &hw_majorversion, &hw_minorversion,
1106 &fw_majorversion, &fw_minorversion);
1107 dprintk(1, "*** HW: V%02x.%02x, FW: V%02x.%02x\n",
1108 hw_majorversion, hw_minorversion,
1109 fw_majorversion, fw_minorversion);
1110
1111 if (priv->video_standard < XC4000_DTV6) {
1112 xc_get_hsync_freq(priv, &hsync_freq_hz);
1113 dprintk(1, "*** Horizontal sync frequency = %d Hz\n",
1114 hsync_freq_hz);
1115
1116 xc_get_frame_lines(priv, &frame_lines);
1117 dprintk(1, "*** Frame lines = %d\n", frame_lines);
1118 }
1119
1120 xc_get_quality(priv, &quality);
1121 dprintk(1, "*** Quality (0:<8dB, 7:>56dB) = %d\n", quality);
1122}
1123
1124static int xc4000_set_params(struct dvb_frontend *fe,
1125 struct dvb_frontend_parameters *params)
1126{
1127 struct xc4000_priv *priv = fe->tuner_priv;
1128 unsigned int type;
1129 int ret = -EREMOTEIO;
1130
1131 dprintk(1, "%s() frequency=%d (Hz)\n", __func__, params->frequency);
1132
1133 mutex_lock(&priv->lock);
1134
1135 if (fe->ops.info.type == FE_ATSC) {
1136 dprintk(1, "%s() ATSC\n", __func__);
1137 switch (params->u.vsb.modulation) {
1138 case VSB_8:
1139 case VSB_16:
1140 dprintk(1, "%s() VSB modulation\n", __func__);
1141 priv->rf_mode = XC_RF_MODE_AIR;
1142 priv->freq_hz = params->frequency - 1750000;
1143 priv->bandwidth = BANDWIDTH_6_MHZ;
1144 priv->video_standard = XC4000_DTV6;
1145 type = DTV6;
1146 break;
1147 case QAM_64:
1148 case QAM_256:
1149 case QAM_AUTO:
1150 dprintk(1, "%s() QAM modulation\n", __func__);
1151 priv->rf_mode = XC_RF_MODE_CABLE;
1152 priv->freq_hz = params->frequency - 1750000;
1153 priv->bandwidth = BANDWIDTH_6_MHZ;
1154 priv->video_standard = XC4000_DTV6;
1155 type = DTV6;
1156 break;
1157 default:
1158 ret = -EINVAL;
1159 goto fail;
1160 }
1161 } else if (fe->ops.info.type == FE_OFDM) {
1162 dprintk(1, "%s() OFDM\n", __func__);
1163 switch (params->u.ofdm.bandwidth) {
1164 case BANDWIDTH_6_MHZ:
1165 priv->bandwidth = BANDWIDTH_6_MHZ;
1166 priv->video_standard = XC4000_DTV6;
1167 priv->freq_hz = params->frequency - 1750000;
1168 type = DTV6;
1169 break;
1170 case BANDWIDTH_7_MHZ:
1171 priv->bandwidth = BANDWIDTH_7_MHZ;
1172 priv->video_standard = XC4000_DTV7;
1173 priv->freq_hz = params->frequency - 2250000;
1174 type = DTV7;
1175 break;
1176 case BANDWIDTH_8_MHZ:
1177 priv->bandwidth = BANDWIDTH_8_MHZ;
1178 priv->video_standard = XC4000_DTV8;
1179 priv->freq_hz = params->frequency - 2750000;
1180 type = DTV8;
1181 break;
1182 case BANDWIDTH_AUTO:
1183 if (params->frequency < 400000000) {
1184 priv->bandwidth = BANDWIDTH_7_MHZ;
1185 priv->freq_hz = params->frequency - 2250000;
1186 } else {
1187 priv->bandwidth = BANDWIDTH_8_MHZ;
1188 priv->freq_hz = params->frequency - 2750000;
1189 }
1190 priv->video_standard = XC4000_DTV7_8;
1191 type = DTV78;
1192 break;
1193 default:
1194 printk(KERN_ERR "xc4000 bandwidth not set!\n");
1195 ret = -EINVAL;
1196 goto fail;
1197 }
1198 priv->rf_mode = XC_RF_MODE_AIR;
1199 } else {
1200 printk(KERN_ERR "xc4000 modulation type not supported!\n");
1201 ret = -EINVAL;
1202 goto fail;
1203 }
1204
1205 dprintk(1, "%s() frequency=%d (compensated)\n",
1206 __func__, priv->freq_hz);
1207
1208 /* Make sure the correct firmware type is loaded */
1209 if (check_firmware(fe, type, 0, priv->if_khz) != 0)
1210 goto fail;
1211
1212 ret = xc_set_signal_source(priv, priv->rf_mode);
1213 if (ret != 0) {
1214 printk(KERN_ERR "xc4000: xc_set_signal_source(%d) failed\n",
1215 priv->rf_mode);
1216 goto fail;
1217 } else {
1218 u16 video_mode, audio_mode;
1219 video_mode = xc4000_standard[priv->video_standard].video_mode;
1220 audio_mode = xc4000_standard[priv->video_standard].audio_mode;
1221 if (type == DTV6 && priv->firm_version != 0x0102)
1222 video_mode |= 0x0001;
1223 ret = xc_set_tv_standard(priv, video_mode, audio_mode);
1224 if (ret != 0) {
1225 printk(KERN_ERR "xc4000: xc_set_tv_standard failed\n");
1226 /* DJH - do not return when it fails... */
1227 /* goto fail; */
1228 }
1229 }
1230
1231 if (xc_write_reg(priv, XREG_D_CODE, 0) == 0)
1232 ret = 0;
1233 if (priv->dvb_amplitude != 0) {
1234 if (xc_write_reg(priv, XREG_AMPLITUDE,
1235 (priv->firm_version != 0x0102 ||
1236 priv->dvb_amplitude != 134 ?
1237 priv->dvb_amplitude : 132)) != 0)
1238 ret = -EREMOTEIO;
1239 }
1240 if (priv->set_smoothedcvbs != 0) {
1241 if (xc_write_reg(priv, XREG_SMOOTHEDCVBS, 1) != 0)
1242 ret = -EREMOTEIO;
1243 }
1244 if (ret != 0) {
1245 printk(KERN_ERR "xc4000: setting registers failed\n");
1246 /* goto fail; */
1247 }
1248
1249 xc_tune_channel(priv, priv->freq_hz);
1250
1251 ret = 0;
1252
1253fail:
1254 mutex_unlock(&priv->lock);
1255
1256 return ret;
1257}
1258
1259static int xc4000_set_analog_params(struct dvb_frontend *fe,
1260 struct analog_parameters *params)
1261{
1262 struct xc4000_priv *priv = fe->tuner_priv;
1263 unsigned int type = 0;
1264 int ret = -EREMOTEIO;
1265
1266 if (params->mode == V4L2_TUNER_RADIO) {
1267 dprintk(1, "%s() frequency=%d (in units of 62.5Hz)\n",
1268 __func__, params->frequency);
1269
1270 mutex_lock(&priv->lock);
1271
1272 params->std = 0;
1273 priv->freq_hz = params->frequency * 125L / 2;
1274
1275 if (audio_std & XC4000_AUDIO_STD_INPUT1) {
1276 priv->video_standard = XC4000_FM_Radio_INPUT1;
1277 type = FM | INPUT1;
1278 } else {
1279 priv->video_standard = XC4000_FM_Radio_INPUT2;
1280 type = FM | INPUT2;
1281 }
1282
1283 goto tune_channel;
1284 }
1285
1286 dprintk(1, "%s() frequency=%d (in units of 62.5khz)\n",
1287 __func__, params->frequency);
1288
1289 mutex_lock(&priv->lock);
1290
1291 /* params->frequency is in units of 62.5khz */
1292 priv->freq_hz = params->frequency * 62500;
1293
1294 params->std &= V4L2_STD_ALL;
1295 /* if std is not defined, choose one */
1296 if (!params->std)
1297 params->std = V4L2_STD_PAL_BG;
1298
1299 if (audio_std & XC4000_AUDIO_STD_MONO)
1300 type = MONO;
1301
1302 if (params->std & V4L2_STD_MN) {
1303 params->std = V4L2_STD_MN;
1304 if (audio_std & XC4000_AUDIO_STD_MONO) {
1305 priv->video_standard = XC4000_MN_NTSC_PAL_Mono;
1306 } else if (audio_std & XC4000_AUDIO_STD_A2) {
1307 params->std |= V4L2_STD_A2;
1308 priv->video_standard = XC4000_MN_NTSC_PAL_A2;
1309 } else {
1310 params->std |= V4L2_STD_BTSC;
1311 priv->video_standard = XC4000_MN_NTSC_PAL_BTSC;
1312 }
1313 goto tune_channel;
1314 }
1315
1316 if (params->std & V4L2_STD_PAL_BG) {
1317 params->std = V4L2_STD_PAL_BG;
1318 if (audio_std & XC4000_AUDIO_STD_MONO) {
1319 priv->video_standard = XC4000_BG_PAL_MONO;
1320 } else if (!(audio_std & XC4000_AUDIO_STD_A2)) {
1321 if (!(audio_std & XC4000_AUDIO_STD_B)) {
1322 params->std |= V4L2_STD_NICAM_A;
1323 priv->video_standard = XC4000_BG_PAL_NICAM;
1324 } else {
1325 params->std |= V4L2_STD_NICAM_B;
1326 priv->video_standard = XC4000_BG_PAL_NICAM;
1327 }
1328 } else {
1329 if (!(audio_std & XC4000_AUDIO_STD_B)) {
1330 params->std |= V4L2_STD_A2_A;
1331 priv->video_standard = XC4000_BG_PAL_A2;
1332 } else {
1333 params->std |= V4L2_STD_A2_B;
1334 priv->video_standard = XC4000_BG_PAL_A2;
1335 }
1336 }
1337 goto tune_channel;
1338 }
1339
1340 if (params->std & V4L2_STD_PAL_I) {
1341 /* default to NICAM audio standard */
1342 params->std = V4L2_STD_PAL_I | V4L2_STD_NICAM;
1343 if (audio_std & XC4000_AUDIO_STD_MONO)
1344 priv->video_standard = XC4000_I_PAL_NICAM_MONO;
1345 else
1346 priv->video_standard = XC4000_I_PAL_NICAM;
1347 goto tune_channel;
1348 }
1349
1350 if (params->std & V4L2_STD_PAL_DK) {
1351 params->std = V4L2_STD_PAL_DK;
1352 if (audio_std & XC4000_AUDIO_STD_MONO) {
1353 priv->video_standard = XC4000_DK_PAL_MONO;
1354 } else if (audio_std & XC4000_AUDIO_STD_A2) {
1355 params->std |= V4L2_STD_A2;
1356 priv->video_standard = XC4000_DK_PAL_A2;
1357 } else {
1358 params->std |= V4L2_STD_NICAM;
1359 priv->video_standard = XC4000_DK_PAL_NICAM;
1360 }
1361 goto tune_channel;
1362 }
1363
1364 if (params->std & V4L2_STD_SECAM_DK) {
1365 /* default to A2 audio standard */
1366 params->std = V4L2_STD_SECAM_DK | V4L2_STD_A2;
1367 if (audio_std & XC4000_AUDIO_STD_L) {
1368 type = 0;
1369 priv->video_standard = XC4000_DK_SECAM_NICAM;
1370 } else if (audio_std & XC4000_AUDIO_STD_MONO) {
1371 priv->video_standard = XC4000_DK_SECAM_A2MONO;
1372 } else if (audio_std & XC4000_AUDIO_STD_K3) {
1373 params->std |= V4L2_STD_SECAM_K3;
1374 priv->video_standard = XC4000_DK_SECAM_A2LDK3;
1375 } else {
1376 priv->video_standard = XC4000_DK_SECAM_A2DK1;
1377 }
1378 goto tune_channel;
1379 }
1380
1381 if (params->std & V4L2_STD_SECAM_L) {
1382 /* default to NICAM audio standard */
1383 type = 0;
1384 params->std = V4L2_STD_SECAM_L | V4L2_STD_NICAM;
1385 priv->video_standard = XC4000_L_SECAM_NICAM;
1386 goto tune_channel;
1387 }
1388
1389 if (params->std & V4L2_STD_SECAM_LC) {
1390 /* default to NICAM audio standard */
1391 type = 0;
1392 params->std = V4L2_STD_SECAM_LC | V4L2_STD_NICAM;
1393 priv->video_standard = XC4000_LC_SECAM_NICAM;
1394 goto tune_channel;
1395 }
1396
1397tune_channel:
1398 /* FIXME: it could be air. */
1399 priv->rf_mode = XC_RF_MODE_CABLE;
1400
1401 if (check_firmware(fe, type, params->std,
1402 xc4000_standard[priv->video_standard].int_freq) != 0)
1403 goto fail;
1404
1405 ret = xc_set_signal_source(priv, priv->rf_mode);
1406 if (ret != 0) {
1407 printk(KERN_ERR
1408 "xc4000: xc_set_signal_source(%d) failed\n",
1409 priv->rf_mode);
1410 goto fail;
1411 } else {
1412 u16 video_mode, audio_mode;
1413 video_mode = xc4000_standard[priv->video_standard].video_mode;
1414 audio_mode = xc4000_standard[priv->video_standard].audio_mode;
1415 if (priv->video_standard < XC4000_BG_PAL_A2) {
1416 if (type & NOGD)
1417 video_mode &= 0xFF7F;
1418 } else if (priv->video_standard < XC4000_I_PAL_NICAM) {
1419 if (priv->firm_version == 0x0102)
1420 video_mode &= 0xFEFF;
1421 if (audio_std & XC4000_AUDIO_STD_B)
1422 video_mode |= 0x0080;
1423 }
1424 ret = xc_set_tv_standard(priv, video_mode, audio_mode);
1425 if (ret != 0) {
1426 printk(KERN_ERR "xc4000: xc_set_tv_standard failed\n");
1427 goto fail;
1428 }
1429 }
1430
1431 if (xc_write_reg(priv, XREG_D_CODE, 0) == 0)
1432 ret = 0;
1433 if (xc_write_reg(priv, XREG_AMPLITUDE, 1) != 0)
1434 ret = -EREMOTEIO;
1435 if (priv->set_smoothedcvbs != 0) {
1436 if (xc_write_reg(priv, XREG_SMOOTHEDCVBS, 1) != 0)
1437 ret = -EREMOTEIO;
1438 }
1439 if (ret != 0) {
1440 printk(KERN_ERR "xc4000: setting registers failed\n");
1441 goto fail;
1442 }
1443
1444 xc_tune_channel(priv, priv->freq_hz);
1445
1446 ret = 0;
1447
1448fail:
1449 mutex_unlock(&priv->lock);
1450
1451 return ret;
1452}
1453
1454static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
1455{
1456 struct xc4000_priv *priv = fe->tuner_priv;
1457
1458 *freq = priv->freq_hz;
1459
1460 if (debug) {
1461 mutex_lock(&priv->lock);
1462 if ((priv->cur_fw.type
1463 & (BASE | FM | DTV6 | DTV7 | DTV78 | DTV8)) == BASE) {
1464 u16 snr = 0;
1465 if (xc4000_readreg(priv, XREG_SNR, &snr) == 0) {
1466 mutex_unlock(&priv->lock);
1467 dprintk(1, "%s() freq = %u, SNR = %d\n",
1468 __func__, *freq, snr);
1469 return 0;
1470 }
1471 }
1472 mutex_unlock(&priv->lock);
1473 }
1474
1475 dprintk(1, "%s()\n", __func__);
1476
1477 return 0;
1478}
1479
1480static int xc4000_get_bandwidth(struct dvb_frontend *fe, u32 *bw)
1481{
1482 struct xc4000_priv *priv = fe->tuner_priv;
1483 dprintk(1, "%s()\n", __func__);
1484
1485 *bw = priv->bandwidth;
1486 return 0;
1487}
1488
1489static int xc4000_get_status(struct dvb_frontend *fe, u32 *status)
1490{
1491 struct xc4000_priv *priv = fe->tuner_priv;
1492 u16 lock_status = 0;
1493
1494 mutex_lock(&priv->lock);
1495
1496 if (priv->cur_fw.type & BASE)
1497 xc_get_lock_status(priv, &lock_status);
1498
1499 *status = (lock_status == 1 ?
1500 TUNER_STATUS_LOCKED | TUNER_STATUS_STEREO : 0);
1501 if (priv->cur_fw.type & (DTV6 | DTV7 | DTV78 | DTV8))
1502 *status &= (~TUNER_STATUS_STEREO);
1503
1504 mutex_unlock(&priv->lock);
1505
1506 dprintk(2, "%s() lock_status = %d\n", __func__, lock_status);
1507
1508 return 0;
1509}
1510
1511static int xc4000_sleep(struct dvb_frontend *fe)
1512{
1513 struct xc4000_priv *priv = fe->tuner_priv;
1514 int ret = 0;
1515
1516 dprintk(1, "%s()\n", __func__);
1517
1518 mutex_lock(&priv->lock);
1519
1520 /* Avoid firmware reload on slow devices */
1521 if ((no_poweroff == 2 ||
1522 (no_poweroff == 0 && priv->default_pm != 0)) &&
1523 (priv->cur_fw.type & BASE) != 0) {
1524 /* force reset and firmware reload */
1525 priv->cur_fw.type = XC_POWERED_DOWN;
1526
1527 if (xc_write_reg(priv, XREG_POWER_DOWN, 0) != 0) {
1528 printk(KERN_ERR
1529 "xc4000: %s() unable to shutdown tuner\n",
1530 __func__);
1531 ret = -EREMOTEIO;
1532 }
1533 msleep(20);
1534 }
1535
1536 mutex_unlock(&priv->lock);
1537
1538 return ret;
1539}
1540
1541static int xc4000_init(struct dvb_frontend *fe)
1542{
1543 dprintk(1, "%s()\n", __func__);
1544
1545 return 0;
1546}
1547
1548static int xc4000_release(struct dvb_frontend *fe)
1549{
1550 struct xc4000_priv *priv = fe->tuner_priv;
1551
1552 dprintk(1, "%s()\n", __func__);
1553
1554 mutex_lock(&xc4000_list_mutex);
1555
1556 if (priv)
1557 hybrid_tuner_release_state(priv);
1558
1559 mutex_unlock(&xc4000_list_mutex);
1560
1561 fe->tuner_priv = NULL;
1562
1563 return 0;
1564}
1565
1566static const struct dvb_tuner_ops xc4000_tuner_ops = {
1567 .info = {
1568 .name = "Xceive XC4000",
1569 .frequency_min = 1000000,
1570 .frequency_max = 1023000000,
1571 .frequency_step = 50000,
1572 },
1573
1574 .release = xc4000_release,
1575 .init = xc4000_init,
1576 .sleep = xc4000_sleep,
1577
1578 .set_params = xc4000_set_params,
1579 .set_analog_params = xc4000_set_analog_params,
1580 .get_frequency = xc4000_get_frequency,
1581 .get_bandwidth = xc4000_get_bandwidth,
1582 .get_status = xc4000_get_status
1583};
1584
1585struct dvb_frontend *xc4000_attach(struct dvb_frontend *fe,
1586 struct i2c_adapter *i2c,
1587 struct xc4000_config *cfg)
1588{
1589 struct xc4000_priv *priv = NULL;
1590 int instance;
1591 u16 id = 0;
1592
1593 dprintk(1, "%s(%d-%04x)\n", __func__,
1594 i2c ? i2c_adapter_id(i2c) : -1,
1595 cfg ? cfg->i2c_address : -1);
1596
1597 mutex_lock(&xc4000_list_mutex);
1598
1599 instance = hybrid_tuner_request_state(struct xc4000_priv, priv,
1600 hybrid_tuner_instance_list,
1601 i2c, cfg->i2c_address, "xc4000");
1602 switch (instance) {
1603 case 0:
1604 goto fail;
1605 break;
1606 case 1:
1607 /* new tuner instance */
1608 priv->bandwidth = BANDWIDTH_6_MHZ;
1609 /* set default configuration */
1610 priv->if_khz = 4560;
1611 priv->default_pm = 0;
1612 priv->dvb_amplitude = 134;
1613 priv->set_smoothedcvbs = 1;
1614 mutex_init(&priv->lock);
1615 fe->tuner_priv = priv;
1616 break;
1617 default:
1618 /* existing tuner instance */
1619 fe->tuner_priv = priv;
1620 break;
1621 }
1622
1623 if (cfg->if_khz != 0) {
1624 /* copy configuration if provided by the caller */
1625 priv->if_khz = cfg->if_khz;
1626 priv->default_pm = cfg->default_pm;
1627 priv->dvb_amplitude = cfg->dvb_amplitude;
1628 priv->set_smoothedcvbs = cfg->set_smoothedcvbs;
1629 }
1630
1631 /* Check if firmware has been loaded. It is possible that another
1632 instance of the driver has loaded the firmware.
1633 */
1634
1635 if (instance == 1) {
1636 if (xc4000_readreg(priv, XREG_PRODUCT_ID, &id) != 0)
1637 goto fail;
1638 } else {
1639 id = ((priv->cur_fw.type & BASE) != 0 ?
1640 priv->hwmodel : XC_PRODUCT_ID_FW_NOT_LOADED);
1641 }
1642
1643 switch (id) {
1644 case XC_PRODUCT_ID_XC4000:
1645 case XC_PRODUCT_ID_XC4100:
1646 printk(KERN_INFO
1647 "xc4000: Successfully identified at address 0x%02x\n",
1648 cfg->i2c_address);
1649 printk(KERN_INFO
1650 "xc4000: Firmware has been loaded previously\n");
1651 break;
1652 case XC_PRODUCT_ID_FW_NOT_LOADED:
1653 printk(KERN_INFO
1654 "xc4000: Successfully identified at address 0x%02x\n",
1655 cfg->i2c_address);
1656 printk(KERN_INFO
1657 "xc4000: Firmware has not been loaded previously\n");
1658 break;
1659 default:
1660 printk(KERN_ERR
1661 "xc4000: Device not found at addr 0x%02x (0x%x)\n",
1662 cfg->i2c_address, id);
1663 goto fail;
1664 }
1665
1666 mutex_unlock(&xc4000_list_mutex);
1667
1668 memcpy(&fe->ops.tuner_ops, &xc4000_tuner_ops,
1669 sizeof(struct dvb_tuner_ops));
1670
1671 if (instance == 1) {
1672 int ret;
1673 mutex_lock(&priv->lock);
1674 ret = xc4000_fwupload(fe);
1675 mutex_unlock(&priv->lock);
1676 if (ret != 0)
1677 goto fail2;
1678 }
1679
1680 return fe;
1681fail:
1682 mutex_unlock(&xc4000_list_mutex);
1683fail2:
1684 xc4000_release(fe);
1685 return NULL;
1686}
1687EXPORT_SYMBOL(xc4000_attach);
1688
1689MODULE_AUTHOR("Steven Toth, Davide Ferri");
1690MODULE_DESCRIPTION("Xceive xc4000 silicon tuner driver");
1691MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/tuners/xc4000.h b/drivers/media/common/tuners/xc4000.h
new file mode 100644
index 000000000000..e6a44d151cbd
--- /dev/null
+++ b/drivers/media/common/tuners/xc4000.h
@@ -0,0 +1,67 @@
1/*
2 * Driver for Xceive XC4000 "QAM/8VSB single chip tuner"
3 *
4 * Copyright (c) 2007 Steven Toth <stoth@linuxtv.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 *
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#ifndef __XC4000_H__
23#define __XC4000_H__
24
25#include <linux/firmware.h>
26
27struct dvb_frontend;
28struct i2c_adapter;
29
30struct xc4000_config {
31 u8 i2c_address;
32 /* if non-zero, power management is enabled by default */
33 u8 default_pm;
34 /* value to be written to XREG_AMPLITUDE in DVB-T mode (0: no write) */
35 u8 dvb_amplitude;
36 /* if non-zero, register 0x0E is set to filter analog TV video output */
37 u8 set_smoothedcvbs;
38 /* IF for DVB-T */
39 u32 if_khz;
40};
41
42/* xc4000 callback command */
43#define XC4000_TUNER_RESET 0
44
45/* For each bridge framework, when it attaches either analog or digital,
46 * it has to store a reference back to its _core equivalent structure,
47 * so that it can service the hardware by steering gpio's etc.
48 * Each bridge implementation is different so cast devptr accordingly.
49 * The xc4000 driver cares not for this value, other than ensuring
50 * it's passed back to a bridge during tuner_callback().
51 */
52
53#if defined(CONFIG_MEDIA_TUNER_XC4000) || (defined(CONFIG_MEDIA_TUNER_XC4000_MODULE) && defined(MODULE))
54extern struct dvb_frontend *xc4000_attach(struct dvb_frontend *fe,
55 struct i2c_adapter *i2c,
56 struct xc4000_config *cfg);
57#else
58static inline struct dvb_frontend *xc4000_attach(struct dvb_frontend *fe,
59 struct i2c_adapter *i2c,
60 struct xc4000_config *cfg)
61{
62 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
63 return NULL;
64}
65#endif
66
67#endif
diff --git a/drivers/media/dvb/Kconfig b/drivers/media/dvb/Kconfig
index ee214c3b63d7..f6e40b3a44cc 100644
--- a/drivers/media/dvb/Kconfig
+++ b/drivers/media/dvb/Kconfig
@@ -80,6 +80,10 @@ comment "Supported nGene Adapters"
80 depends on DVB_CORE && PCI && I2C 80 depends on DVB_CORE && PCI && I2C
81 source "drivers/media/dvb/ngene/Kconfig" 81 source "drivers/media/dvb/ngene/Kconfig"
82 82
83comment "Supported ddbridge ('Octopus') Adapters"
84 depends on DVB_CORE && PCI && I2C
85 source "drivers/media/dvb/ddbridge/Kconfig"
86
83comment "Supported DVB Frontends" 87comment "Supported DVB Frontends"
84 depends on DVB_CORE 88 depends on DVB_CORE
85source "drivers/media/dvb/frontends/Kconfig" 89source "drivers/media/dvb/frontends/Kconfig"
diff --git a/drivers/media/dvb/Makefile b/drivers/media/dvb/Makefile
index a1a08758a6f2..b2cefe637a64 100644
--- a/drivers/media/dvb/Makefile
+++ b/drivers/media/dvb/Makefile
@@ -15,6 +15,7 @@ obj-y := dvb-core/ \
15 dm1105/ \ 15 dm1105/ \
16 pt1/ \ 16 pt1/ \
17 mantis/ \ 17 mantis/ \
18 ngene/ 18 ngene/ \
19 ddbridge/
19 20
20obj-$(CONFIG_DVB_FIREDTV) += firewire/ 21obj-$(CONFIG_DVB_FIREDTV) += firewire/
diff --git a/drivers/media/dvb/bt8xx/dvb-bt8xx.c b/drivers/media/dvb/bt8xx/dvb-bt8xx.c
index 1e1106dcd063..521d69104982 100644
--- a/drivers/media/dvb/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/dvb/bt8xx/dvb-bt8xx.c
@@ -892,7 +892,7 @@ static int __devinit dvb_bt8xx_probe(struct bttv_sub_device *sub)
892 if (!(bttv_pci_dev = bttv_get_pcidev(card->bttv_nr))) { 892 if (!(bttv_pci_dev = bttv_get_pcidev(card->bttv_nr))) {
893 printk("dvb_bt8xx: no pci device for card %d\n", card->bttv_nr); 893 printk("dvb_bt8xx: no pci device for card %d\n", card->bttv_nr);
894 kfree(card); 894 kfree(card);
895 return -EFAULT; 895 return -ENODEV;
896 } 896 }
897 897
898 if (!(card->bt = dvb_bt8xx_878_match(card->bttv_nr, bttv_pci_dev))) { 898 if (!(card->bt = dvb_bt8xx_878_match(card->bttv_nr, bttv_pci_dev))) {
@@ -902,7 +902,7 @@ static int __devinit dvb_bt8xx_probe(struct bttv_sub_device *sub)
902 "installed, try removing it.\n"); 902 "installed, try removing it.\n");
903 903
904 kfree(card); 904 kfree(card);
905 return -EFAULT; 905 return -ENODEV;
906 } 906 }
907 907
908 mutex_init(&card->bt->gpio_lock); 908 mutex_init(&card->bt->gpio_lock);
diff --git a/drivers/media/dvb/ddbridge/Kconfig b/drivers/media/dvb/ddbridge/Kconfig
new file mode 100644
index 000000000000..d099e1a12c85
--- /dev/null
+++ b/drivers/media/dvb/ddbridge/Kconfig
@@ -0,0 +1,18 @@
1config DVB_DDBRIDGE
2 tristate "Digital Devices bridge support"
3 depends on DVB_CORE && PCI && I2C
4 select DVB_LNBP21 if !DVB_FE_CUSTOMISE
5 select DVB_STV6110x if !DVB_FE_CUSTOMISE
6 select DVB_STV090x if !DVB_FE_CUSTOMISE
7 select DVB_DRXK if !DVB_FE_CUSTOMISE
8 select DVB_TDA18271C2DD if !DVB_FE_CUSTOMISE
9 ---help---
10 Support for cards with the Digital Devices PCI express bridge:
11 - Octopus PCIe Bridge
12 - Octopus mini PCIe Bridge
13 - Octopus LE
14 - DuoFlex S2 Octopus
15 - DuoFlex CT Octopus
16 - cineS2(v6)
17
18 Say Y if you own such a card and want to use it.
diff --git a/drivers/media/dvb/ddbridge/Makefile b/drivers/media/dvb/ddbridge/Makefile
new file mode 100644
index 000000000000..de4fe193c3ef
--- /dev/null
+++ b/drivers/media/dvb/ddbridge/Makefile
@@ -0,0 +1,14 @@
1#
2# Makefile for the ddbridge device driver
3#
4
5ddbridge-objs := ddbridge-core.o
6
7obj-$(CONFIG_DVB_DDBRIDGE) += ddbridge.o
8
9EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/
10EXTRA_CFLAGS += -Idrivers/media/dvb/frontends/
11EXTRA_CFLAGS += -Idrivers/media/common/tuners/
12
13# For the staging CI driver cxd2099
14EXTRA_CFLAGS += -Idrivers/staging/cxd2099/
diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
new file mode 100644
index 000000000000..573d540f213e
--- /dev/null
+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
@@ -0,0 +1,1719 @@
1/*
2 * ddbridge.c: Digital Devices PCIe bridge driver
3 *
4 * Copyright (C) 2010-2011 Digital Devices GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 only, as published by the Free Software Foundation.
9 *
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA
21 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
22 */
23
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/interrupt.h>
27#include <linux/delay.h>
28#include <linux/slab.h>
29#include <linux/poll.h>
30#include <linux/io.h>
31#include <linux/pci.h>
32#include <linux/pci_ids.h>
33#include <linux/timer.h>
34#include <linux/version.h>
35#include <linux/i2c.h>
36#include <linux/swab.h>
37#include <linux/vmalloc.h>
38#include "ddbridge.h"
39
40#include "ddbridge-regs.h"
41
42#include "tda18271c2dd.h"
43#include "stv6110x.h"
44#include "stv090x.h"
45#include "lnbh24.h"
46#include "drxk.h"
47
48DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
49
50/* MSI had problems with lost interrupts, fixed but needs testing */
51#undef CONFIG_PCI_MSI
52
53/******************************************************************************/
54
55static int i2c_read(struct i2c_adapter *adapter, u8 adr, u8 *val)
56{
57 struct i2c_msg msgs[1] = {{.addr = adr, .flags = I2C_M_RD,
58 .buf = val, .len = 1 } };
59 return (i2c_transfer(adapter, msgs, 1) == 1) ? 0 : -1;
60}
61
62static int i2c_read_reg(struct i2c_adapter *adapter, u8 adr, u8 reg, u8 *val)
63{
64 struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
65 .buf = &reg, .len = 1 },
66 {.addr = adr, .flags = I2C_M_RD,
67 .buf = val, .len = 1 } };
68 return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1;
69}
70
71static int i2c_read_reg16(struct i2c_adapter *adapter, u8 adr,
72 u16 reg, u8 *val)
73{
74 u8 msg[2] = {reg>>8, reg&0xff};
75 struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
76 .buf = msg, .len = 2},
77 {.addr = adr, .flags = I2C_M_RD,
78 .buf = val, .len = 1} };
79 return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1;
80}
81
82static int ddb_i2c_cmd(struct ddb_i2c *i2c, u32 adr, u32 cmd)
83{
84 struct ddb *dev = i2c->dev;
85 int stat;
86 u32 val;
87
88 i2c->done = 0;
89 ddbwritel((adr << 9) | cmd, i2c->regs + I2C_COMMAND);
90 stat = wait_event_timeout(i2c->wq, i2c->done == 1, HZ);
91 if (stat <= 0) {
92 printk(KERN_ERR "I2C timeout\n");
93 { /* MSI debugging*/
94 u32 istat = ddbreadl(INTERRUPT_STATUS);
95 printk(KERN_ERR "IRS %08x\n", istat);
96 ddbwritel(istat, INTERRUPT_ACK);
97 }
98 return -EIO;
99 }
100 val = ddbreadl(i2c->regs+I2C_COMMAND);
101 if (val & 0x70000)
102 return -EIO;
103 return 0;
104}
105
106static int ddb_i2c_master_xfer(struct i2c_adapter *adapter,
107 struct i2c_msg msg[], int num)
108{
109 struct ddb_i2c *i2c = (struct ddb_i2c *)i2c_get_adapdata(adapter);
110 struct ddb *dev = i2c->dev;
111 u8 addr = 0;
112
113 if (num)
114 addr = msg[0].addr;
115
116 if (num == 2 && msg[1].flags & I2C_M_RD &&
117 !(msg[0].flags & I2C_M_RD)) {
118 memcpy_toio(dev->regs + I2C_TASKMEM_BASE + i2c->wbuf,
119 msg[0].buf, msg[0].len);
120 ddbwritel(msg[0].len|(msg[1].len << 16),
121 i2c->regs+I2C_TASKLENGTH);
122 if (!ddb_i2c_cmd(i2c, addr, 1)) {
123 memcpy_fromio(msg[1].buf,
124 dev->regs + I2C_TASKMEM_BASE + i2c->rbuf,
125 msg[1].len);
126 return num;
127 }
128 }
129
130 if (num == 1 && !(msg[0].flags & I2C_M_RD)) {
131 ddbcpyto(I2C_TASKMEM_BASE + i2c->wbuf, msg[0].buf, msg[0].len);
132 ddbwritel(msg[0].len, i2c->regs + I2C_TASKLENGTH);
133 if (!ddb_i2c_cmd(i2c, addr, 2))
134 return num;
135 }
136 if (num == 1 && (msg[0].flags & I2C_M_RD)) {
137 ddbwritel(msg[0].len << 16, i2c->regs + I2C_TASKLENGTH);
138 if (!ddb_i2c_cmd(i2c, addr, 3)) {
139 ddbcpyfrom(msg[0].buf,
140 I2C_TASKMEM_BASE + i2c->rbuf, msg[0].len);
141 return num;
142 }
143 }
144 return -EIO;
145}
146
147
148static u32 ddb_i2c_functionality(struct i2c_adapter *adap)
149{
150 return I2C_FUNC_SMBUS_EMUL;
151}
152
153struct i2c_algorithm ddb_i2c_algo = {
154 .master_xfer = ddb_i2c_master_xfer,
155 .functionality = ddb_i2c_functionality,
156};
157
158static void ddb_i2c_release(struct ddb *dev)
159{
160 int i;
161 struct ddb_i2c *i2c;
162 struct i2c_adapter *adap;
163
164 for (i = 0; i < dev->info->port_num; i++) {
165 i2c = &dev->i2c[i];
166 adap = &i2c->adap;
167 i2c_del_adapter(adap);
168 }
169}
170
171static int ddb_i2c_init(struct ddb *dev)
172{
173 int i, j, stat = 0;
174 struct ddb_i2c *i2c;
175 struct i2c_adapter *adap;
176
177 for (i = 0; i < dev->info->port_num; i++) {
178 i2c = &dev->i2c[i];
179 i2c->dev = dev;
180 i2c->nr = i;
181 i2c->wbuf = i * (I2C_TASKMEM_SIZE / 4);
182 i2c->rbuf = i2c->wbuf + (I2C_TASKMEM_SIZE / 8);
183 i2c->regs = 0x80 + i * 0x20;
184 ddbwritel(I2C_SPEED_100, i2c->regs + I2C_TIMING);
185 ddbwritel((i2c->rbuf << 16) | i2c->wbuf,
186 i2c->regs + I2C_TASKADDRESS);
187 init_waitqueue_head(&i2c->wq);
188
189 adap = &i2c->adap;
190 i2c_set_adapdata(adap, i2c);
191#ifdef I2C_ADAP_CLASS_TV_DIGITAL
192 adap->class = I2C_ADAP_CLASS_TV_DIGITAL|I2C_CLASS_TV_ANALOG;
193#else
194#ifdef I2C_CLASS_TV_ANALOG
195 adap->class = I2C_CLASS_TV_ANALOG;
196#endif
197#endif
198 strcpy(adap->name, "ddbridge");
199 adap->algo = &ddb_i2c_algo;
200 adap->algo_data = (void *)i2c;
201 adap->dev.parent = &dev->pdev->dev;
202 stat = i2c_add_adapter(adap);
203 if (stat)
204 break;
205 }
206 if (stat)
207 for (j = 0; j < i; j++) {
208 i2c = &dev->i2c[j];
209 adap = &i2c->adap;
210 i2c_del_adapter(adap);
211 }
212 return stat;
213}
214
215
216/******************************************************************************/
217/******************************************************************************/
218/******************************************************************************/
219
220#if 0
221static void set_table(struct ddb *dev, u32 off,
222 dma_addr_t *pbuf, u32 num)
223{
224 u32 i, base;
225 u64 mem;
226
227 base = DMA_BASE_ADDRESS_TABLE + off;
228 for (i = 0; i < num; i++) {
229 mem = pbuf[i];
230 ddbwritel(mem & 0xffffffff, base + i * 8);
231 ddbwritel(mem >> 32, base + i * 8 + 4);
232 }
233}
234#endif
235
236static void ddb_address_table(struct ddb *dev)
237{
238 u32 i, j, base;
239 u64 mem;
240 dma_addr_t *pbuf;
241
242 for (i = 0; i < dev->info->port_num * 2; i++) {
243 base = DMA_BASE_ADDRESS_TABLE + i * 0x100;
244 pbuf = dev->input[i].pbuf;
245 for (j = 0; j < dev->input[i].dma_buf_num; j++) {
246 mem = pbuf[j];
247 ddbwritel(mem & 0xffffffff, base + j * 8);
248 ddbwritel(mem >> 32, base + j * 8 + 4);
249 }
250 }
251 for (i = 0; i < dev->info->port_num; i++) {
252 base = DMA_BASE_ADDRESS_TABLE + 0x800 + i * 0x100;
253 pbuf = dev->output[i].pbuf;
254 for (j = 0; j < dev->output[i].dma_buf_num; j++) {
255 mem = pbuf[j];
256 ddbwritel(mem & 0xffffffff, base + j * 8);
257 ddbwritel(mem >> 32, base + j * 8 + 4);
258 }
259 }
260}
261
262static void io_free(struct pci_dev *pdev, u8 **vbuf,
263 dma_addr_t *pbuf, u32 size, int num)
264{
265 int i;
266
267 for (i = 0; i < num; i++) {
268 if (vbuf[i]) {
269 pci_free_consistent(pdev, size, vbuf[i], pbuf[i]);
270 vbuf[i] = 0;
271 }
272 }
273}
274
275static int io_alloc(struct pci_dev *pdev, u8 **vbuf,
276 dma_addr_t *pbuf, u32 size, int num)
277{
278 int i;
279
280 for (i = 0; i < num; i++) {
281 vbuf[i] = pci_alloc_consistent(pdev, size, &pbuf[i]);
282 if (!vbuf[i])
283 return -ENOMEM;
284 }
285 return 0;
286}
287
288static int ddb_buffers_alloc(struct ddb *dev)
289{
290 int i;
291 struct ddb_port *port;
292
293 for (i = 0; i < dev->info->port_num; i++) {
294 port = &dev->port[i];
295 switch (port->class) {
296 case DDB_PORT_TUNER:
297 if (io_alloc(dev->pdev, port->input[0]->vbuf,
298 port->input[0]->pbuf,
299 port->input[0]->dma_buf_size,
300 port->input[0]->dma_buf_num) < 0)
301 return -1;
302 if (io_alloc(dev->pdev, port->input[1]->vbuf,
303 port->input[1]->pbuf,
304 port->input[1]->dma_buf_size,
305 port->input[1]->dma_buf_num) < 0)
306 return -1;
307 break;
308 case DDB_PORT_CI:
309 if (io_alloc(dev->pdev, port->input[0]->vbuf,
310 port->input[0]->pbuf,
311 port->input[0]->dma_buf_size,
312 port->input[0]->dma_buf_num) < 0)
313 return -1;
314 if (io_alloc(dev->pdev, port->output->vbuf,
315 port->output->pbuf,
316 port->output->dma_buf_size,
317 port->output->dma_buf_num) < 0)
318 return -1;
319 break;
320 default:
321 break;
322 }
323 }
324 ddb_address_table(dev);
325 return 0;
326}
327
328static void ddb_buffers_free(struct ddb *dev)
329{
330 int i;
331 struct ddb_port *port;
332
333 for (i = 0; i < dev->info->port_num; i++) {
334 port = &dev->port[i];
335 io_free(dev->pdev, port->input[0]->vbuf,
336 port->input[0]->pbuf,
337 port->input[0]->dma_buf_size,
338 port->input[0]->dma_buf_num);
339 io_free(dev->pdev, port->input[1]->vbuf,
340 port->input[1]->pbuf,
341 port->input[1]->dma_buf_size,
342 port->input[1]->dma_buf_num);
343 io_free(dev->pdev, port->output->vbuf,
344 port->output->pbuf,
345 port->output->dma_buf_size,
346 port->output->dma_buf_num);
347 }
348}
349
350static void ddb_input_start(struct ddb_input *input)
351{
352 struct ddb *dev = input->port->dev;
353
354 spin_lock_irq(&input->lock);
355 input->cbuf = 0;
356 input->coff = 0;
357
358 /* reset */
359 ddbwritel(0, TS_INPUT_CONTROL(input->nr));
360 ddbwritel(2, TS_INPUT_CONTROL(input->nr));
361 ddbwritel(0, TS_INPUT_CONTROL(input->nr));
362
363 ddbwritel((1 << 16) |
364 (input->dma_buf_num << 11) |
365 (input->dma_buf_size >> 7),
366 DMA_BUFFER_SIZE(input->nr));
367 ddbwritel(0, DMA_BUFFER_ACK(input->nr));
368
369 ddbwritel(1, DMA_BASE_WRITE);
370 ddbwritel(3, DMA_BUFFER_CONTROL(input->nr));
371 ddbwritel(9, TS_INPUT_CONTROL(input->nr));
372 input->running = 1;
373 spin_unlock_irq(&input->lock);
374}
375
376static void ddb_input_stop(struct ddb_input *input)
377{
378 struct ddb *dev = input->port->dev;
379
380 spin_lock_irq(&input->lock);
381 ddbwritel(0, TS_INPUT_CONTROL(input->nr));
382 ddbwritel(0, DMA_BUFFER_CONTROL(input->nr));
383 input->running = 0;
384 spin_unlock_irq(&input->lock);
385}
386
387static void ddb_output_start(struct ddb_output *output)
388{
389 struct ddb *dev = output->port->dev;
390
391 spin_lock_irq(&output->lock);
392 output->cbuf = 0;
393 output->coff = 0;
394 ddbwritel(0, TS_OUTPUT_CONTROL(output->nr));
395 ddbwritel(2, TS_OUTPUT_CONTROL(output->nr));
396 ddbwritel(0, TS_OUTPUT_CONTROL(output->nr));
397 ddbwritel(0x3c, TS_OUTPUT_CONTROL(output->nr));
398 ddbwritel((1 << 16) |
399 (output->dma_buf_num << 11) |
400 (output->dma_buf_size >> 7),
401 DMA_BUFFER_SIZE(output->nr + 8));
402 ddbwritel(0, DMA_BUFFER_ACK(output->nr + 8));
403
404 ddbwritel(1, DMA_BASE_READ);
405 ddbwritel(3, DMA_BUFFER_CONTROL(output->nr + 8));
406 /* ddbwritel(0xbd, TS_OUTPUT_CONTROL(output->nr)); */
407 ddbwritel(0x1d, TS_OUTPUT_CONTROL(output->nr));
408 output->running = 1;
409 spin_unlock_irq(&output->lock);
410}
411
412static void ddb_output_stop(struct ddb_output *output)
413{
414 struct ddb *dev = output->port->dev;
415
416 spin_lock_irq(&output->lock);
417 ddbwritel(0, TS_OUTPUT_CONTROL(output->nr));
418 ddbwritel(0, DMA_BUFFER_CONTROL(output->nr + 8));
419 output->running = 0;
420 spin_unlock_irq(&output->lock);
421}
422
423static u32 ddb_output_free(struct ddb_output *output)
424{
425 u32 idx, off, stat = output->stat;
426 s32 diff;
427
428 idx = (stat >> 11) & 0x1f;
429 off = (stat & 0x7ff) << 7;
430
431 if (output->cbuf != idx) {
432 if ((((output->cbuf + 1) % output->dma_buf_num) == idx) &&
433 (output->dma_buf_size - output->coff <= 188))
434 return 0;
435 return 188;
436 }
437 diff = off - output->coff;
438 if (diff <= 0 || diff > 188)
439 return 188;
440 return 0;
441}
442
443static ssize_t ddb_output_write(struct ddb_output *output,
444 const u8 *buf, size_t count)
445{
446 struct ddb *dev = output->port->dev;
447 u32 idx, off, stat = output->stat;
448 u32 left = count, len;
449
450 idx = (stat >> 11) & 0x1f;
451 off = (stat & 0x7ff) << 7;
452
453 while (left) {
454 len = output->dma_buf_size - output->coff;
455 if ((((output->cbuf + 1) % output->dma_buf_num) == idx) &&
456 (off == 0)) {
457 if (len <= 188)
458 break;
459 len -= 188;
460 }
461 if (output->cbuf == idx) {
462 if (off > output->coff) {
463#if 1
464 len = off - output->coff;
465 len -= (len % 188);
466 if (len <= 188)
467
468#endif
469 break;
470 len -= 188;
471 }
472 }
473 if (len > left)
474 len = left;
475 if (copy_from_user(output->vbuf[output->cbuf] + output->coff,
476 buf, len))
477 return -EIO;
478 left -= len;
479 buf += len;
480 output->coff += len;
481 if (output->coff == output->dma_buf_size) {
482 output->coff = 0;
483 output->cbuf = ((output->cbuf + 1) % output->dma_buf_num);
484 }
485 ddbwritel((output->cbuf << 11) | (output->coff >> 7),
486 DMA_BUFFER_ACK(output->nr + 8));
487 }
488 return count - left;
489}
490
491static u32 ddb_input_avail(struct ddb_input *input)
492{
493 struct ddb *dev = input->port->dev;
494 u32 idx, off, stat = input->stat;
495 u32 ctrl = ddbreadl(DMA_BUFFER_CONTROL(input->nr));
496
497 idx = (stat >> 11) & 0x1f;
498 off = (stat & 0x7ff) << 7;
499
500 if (ctrl & 4) {
501 printk(KERN_ERR "IA %d %d %08x\n", idx, off, ctrl);
502 ddbwritel(input->stat, DMA_BUFFER_ACK(input->nr));
503 return 0;
504 }
505 if (input->cbuf != idx)
506 return 188;
507 return 0;
508}
509
510static size_t ddb_input_read(struct ddb_input *input, u8 *buf, size_t count)
511{
512 struct ddb *dev = input->port->dev;
513 u32 left = count;
514 u32 idx, off, free, stat = input->stat;
515 int ret;
516
517 idx = (stat >> 11) & 0x1f;
518 off = (stat & 0x7ff) << 7;
519
520 while (left) {
521 if (input->cbuf == idx)
522 return count - left;
523 free = input->dma_buf_size - input->coff;
524 if (free > left)
525 free = left;
526 ret = copy_to_user(buf, input->vbuf[input->cbuf] +
527 input->coff, free);
528 input->coff += free;
529 if (input->coff == input->dma_buf_size) {
530 input->coff = 0;
531 input->cbuf = (input->cbuf+1) % input->dma_buf_num;
532 }
533 left -= free;
534 ddbwritel((input->cbuf << 11) | (input->coff >> 7),
535 DMA_BUFFER_ACK(input->nr));
536 }
537 return count;
538}
539
540/******************************************************************************/
541/******************************************************************************/
542/******************************************************************************/
543
544#if 0
545static struct ddb_input *fe2input(struct ddb *dev, struct dvb_frontend *fe)
546{
547 int i;
548
549 for (i = 0; i < dev->info->port_num * 2; i++) {
550 if (dev->input[i].fe == fe)
551 return &dev->input[i];
552 }
553 return NULL;
554}
555#endif
556
557static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
558{
559 struct ddb_input *input = fe->sec_priv;
560 struct ddb_port *port = input->port;
561 int status;
562
563 if (enable) {
564 mutex_lock(&port->i2c_gate_lock);
565 status = input->gate_ctrl(fe, 1);
566 } else {
567 status = input->gate_ctrl(fe, 0);
568 mutex_unlock(&port->i2c_gate_lock);
569 }
570 return status;
571}
572
573static int demod_attach_drxk(struct ddb_input *input)
574{
575 struct i2c_adapter *i2c = &input->port->i2c->adap;
576 struct dvb_frontend *fe;
577 struct drxk_config config;
578
579 memset(&config, 0, sizeof(config));
580 config.adr = 0x29 + (input->nr & 1);
581
582 fe = input->fe = dvb_attach(drxk_attach, &config, i2c, &input->fe2);
583 if (!input->fe) {
584 printk(KERN_ERR "No DRXK found!\n");
585 return -ENODEV;
586 }
587 fe->sec_priv = input;
588 input->gate_ctrl = fe->ops.i2c_gate_ctrl;
589 fe->ops.i2c_gate_ctrl = drxk_gate_ctrl;
590 return 0;
591}
592
593static int tuner_attach_tda18271(struct ddb_input *input)
594{
595 struct i2c_adapter *i2c = &input->port->i2c->adap;
596 struct dvb_frontend *fe;
597
598 if (input->fe->ops.i2c_gate_ctrl)
599 input->fe->ops.i2c_gate_ctrl(input->fe, 1);
600 fe = dvb_attach(tda18271c2dd_attach, input->fe, i2c, 0x60);
601 if (!fe) {
602 printk(KERN_ERR "No TDA18271 found!\n");
603 return -ENODEV;
604 }
605 if (input->fe->ops.i2c_gate_ctrl)
606 input->fe->ops.i2c_gate_ctrl(input->fe, 0);
607 return 0;
608}
609
610/******************************************************************************/
611/******************************************************************************/
612/******************************************************************************/
613
614static struct stv090x_config stv0900 = {
615 .device = STV0900,
616 .demod_mode = STV090x_DUAL,
617 .clk_mode = STV090x_CLK_EXT,
618
619 .xtal = 27000000,
620 .address = 0x69,
621
622 .ts1_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
623 .ts2_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
624
625 .repeater_level = STV090x_RPTLEVEL_16,
626
627 .adc1_range = STV090x_ADC_1Vpp,
628 .adc2_range = STV090x_ADC_1Vpp,
629
630 .diseqc_envelope_mode = true,
631};
632
633static struct stv090x_config stv0900_aa = {
634 .device = STV0900,
635 .demod_mode = STV090x_DUAL,
636 .clk_mode = STV090x_CLK_EXT,
637
638 .xtal = 27000000,
639 .address = 0x68,
640
641 .ts1_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
642 .ts2_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
643
644 .repeater_level = STV090x_RPTLEVEL_16,
645
646 .adc1_range = STV090x_ADC_1Vpp,
647 .adc2_range = STV090x_ADC_1Vpp,
648
649 .diseqc_envelope_mode = true,
650};
651
652static struct stv6110x_config stv6110a = {
653 .addr = 0x60,
654 .refclk = 27000000,
655 .clk_div = 1,
656};
657
658static struct stv6110x_config stv6110b = {
659 .addr = 0x63,
660 .refclk = 27000000,
661 .clk_div = 1,
662};
663
664static int demod_attach_stv0900(struct ddb_input *input, int type)
665{
666 struct i2c_adapter *i2c = &input->port->i2c->adap;
667 struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900;
668
669 input->fe = dvb_attach(stv090x_attach, feconf, i2c,
670 (input->nr & 1) ? STV090x_DEMODULATOR_1
671 : STV090x_DEMODULATOR_0);
672 if (!input->fe) {
673 printk(KERN_ERR "No STV0900 found!\n");
674 return -ENODEV;
675 }
676 if (!dvb_attach(lnbh24_attach, input->fe, i2c, 0,
677 0, (input->nr & 1) ?
678 (0x09 - type) : (0x0b - type))) {
679 printk(KERN_ERR "No LNBH24 found!\n");
680 return -ENODEV;
681 }
682 return 0;
683}
684
685static int tuner_attach_stv6110(struct ddb_input *input, int type)
686{
687 struct i2c_adapter *i2c = &input->port->i2c->adap;
688 struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900;
689 struct stv6110x_config *tunerconf = (input->nr & 1) ?
690 &stv6110b : &stv6110a;
691 struct stv6110x_devctl *ctl;
692
693 ctl = dvb_attach(stv6110x_attach, input->fe, tunerconf, i2c);
694 if (!ctl) {
695 printk(KERN_ERR "No STV6110X found!\n");
696 return -ENODEV;
697 }
698 printk(KERN_INFO "attach tuner input %d adr %02x\n",
699 input->nr, tunerconf->addr);
700
701 feconf->tuner_init = ctl->tuner_init;
702 feconf->tuner_sleep = ctl->tuner_sleep;
703 feconf->tuner_set_mode = ctl->tuner_set_mode;
704 feconf->tuner_set_frequency = ctl->tuner_set_frequency;
705 feconf->tuner_get_frequency = ctl->tuner_get_frequency;
706 feconf->tuner_set_bandwidth = ctl->tuner_set_bandwidth;
707 feconf->tuner_get_bandwidth = ctl->tuner_get_bandwidth;
708 feconf->tuner_set_bbgain = ctl->tuner_set_bbgain;
709 feconf->tuner_get_bbgain = ctl->tuner_get_bbgain;
710 feconf->tuner_set_refclk = ctl->tuner_set_refclk;
711 feconf->tuner_get_status = ctl->tuner_get_status;
712
713 return 0;
714}
715
716static int my_dvb_dmx_ts_card_init(struct dvb_demux *dvbdemux, char *id,
717 int (*start_feed)(struct dvb_demux_feed *),
718 int (*stop_feed)(struct dvb_demux_feed *),
719 void *priv)
720{
721 dvbdemux->priv = priv;
722
723 dvbdemux->filternum = 256;
724 dvbdemux->feednum = 256;
725 dvbdemux->start_feed = start_feed;
726 dvbdemux->stop_feed = stop_feed;
727 dvbdemux->write_to_decoder = NULL;
728 dvbdemux->dmx.capabilities = (DMX_TS_FILTERING |
729 DMX_SECTION_FILTERING |
730 DMX_MEMORY_BASED_FILTERING);
731 return dvb_dmx_init(dvbdemux);
732}
733
734static int my_dvb_dmxdev_ts_card_init(struct dmxdev *dmxdev,
735 struct dvb_demux *dvbdemux,
736 struct dmx_frontend *hw_frontend,
737 struct dmx_frontend *mem_frontend,
738 struct dvb_adapter *dvb_adapter)
739{
740 int ret;
741
742 dmxdev->filternum = 256;
743 dmxdev->demux = &dvbdemux->dmx;
744 dmxdev->capabilities = 0;
745 ret = dvb_dmxdev_init(dmxdev, dvb_adapter);
746 if (ret < 0)
747 return ret;
748
749 hw_frontend->source = DMX_FRONTEND_0;
750 dvbdemux->dmx.add_frontend(&dvbdemux->dmx, hw_frontend);
751 mem_frontend->source = DMX_MEMORY_FE;
752 dvbdemux->dmx.add_frontend(&dvbdemux->dmx, mem_frontend);
753 return dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, hw_frontend);
754}
755
756static int start_feed(struct dvb_demux_feed *dvbdmxfeed)
757{
758 struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
759 struct ddb_input *input = dvbdmx->priv;
760
761 if (!input->users)
762 ddb_input_start(input);
763
764 return ++input->users;
765}
766
767static int stop_feed(struct dvb_demux_feed *dvbdmxfeed)
768{
769 struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
770 struct ddb_input *input = dvbdmx->priv;
771
772 if (--input->users)
773 return input->users;
774
775 ddb_input_stop(input);
776 return 0;
777}
778
779
780static void dvb_input_detach(struct ddb_input *input)
781{
782 struct dvb_adapter *adap = &input->adap;
783 struct dvb_demux *dvbdemux = &input->demux;
784
785 switch (input->attached) {
786 case 5:
787 if (input->fe2)
788 dvb_unregister_frontend(input->fe2);
789 if (input->fe) {
790 dvb_unregister_frontend(input->fe);
791 dvb_frontend_detach(input->fe);
792 input->fe = NULL;
793 }
794 case 4:
795 dvb_net_release(&input->dvbnet);
796
797 case 3:
798 dvbdemux->dmx.close(&dvbdemux->dmx);
799 dvbdemux->dmx.remove_frontend(&dvbdemux->dmx,
800 &input->hw_frontend);
801 dvbdemux->dmx.remove_frontend(&dvbdemux->dmx,
802 &input->mem_frontend);
803 dvb_dmxdev_release(&input->dmxdev);
804
805 case 2:
806 dvb_dmx_release(&input->demux);
807
808 case 1:
809 dvb_unregister_adapter(adap);
810 }
811 input->attached = 0;
812}
813
814static int dvb_input_attach(struct ddb_input *input)
815{
816 int ret;
817 struct ddb_port *port = input->port;
818 struct dvb_adapter *adap = &input->adap;
819 struct dvb_demux *dvbdemux = &input->demux;
820
821 ret = dvb_register_adapter(adap, "DDBridge", THIS_MODULE,
822 &input->port->dev->pdev->dev,
823 adapter_nr);
824 if (ret < 0) {
825 printk(KERN_ERR "ddbridge: Could not register adapter."
826 "Check if you enabled enough adapters in dvb-core!\n");
827 return ret;
828 }
829 input->attached = 1;
830
831 ret = my_dvb_dmx_ts_card_init(dvbdemux, "SW demux",
832 start_feed,
833 stop_feed, input);
834 if (ret < 0)
835 return ret;
836 input->attached = 2;
837
838 ret = my_dvb_dmxdev_ts_card_init(&input->dmxdev, &input->demux,
839 &input->hw_frontend,
840 &input->mem_frontend, adap);
841 if (ret < 0)
842 return ret;
843 input->attached = 3;
844
845 ret = dvb_net_init(adap, &input->dvbnet, input->dmxdev.demux);
846 if (ret < 0)
847 return ret;
848 input->attached = 4;
849
850 input->fe = 0;
851 switch (port->type) {
852 case DDB_TUNER_DVBS_ST:
853 if (demod_attach_stv0900(input, 0) < 0)
854 return -ENODEV;
855 if (tuner_attach_stv6110(input, 0) < 0)
856 return -ENODEV;
857 if (input->fe) {
858 if (dvb_register_frontend(adap, input->fe) < 0)
859 return -ENODEV;
860 }
861 break;
862 case DDB_TUNER_DVBS_ST_AA:
863 if (demod_attach_stv0900(input, 1) < 0)
864 return -ENODEV;
865 if (tuner_attach_stv6110(input, 1) < 0)
866 return -ENODEV;
867 if (input->fe) {
868 if (dvb_register_frontend(adap, input->fe) < 0)
869 return -ENODEV;
870 }
871 break;
872 case DDB_TUNER_DVBCT_TR:
873 if (demod_attach_drxk(input) < 0)
874 return -ENODEV;
875 if (tuner_attach_tda18271(input) < 0)
876 return -ENODEV;
877 if (input->fe) {
878 if (dvb_register_frontend(adap, input->fe) < 0)
879 return -ENODEV;
880 }
881 if (input->fe2) {
882 if (dvb_register_frontend(adap, input->fe2) < 0)
883 return -ENODEV;
884 input->fe2->tuner_priv = input->fe->tuner_priv;
885 memcpy(&input->fe2->ops.tuner_ops,
886 &input->fe->ops.tuner_ops,
887 sizeof(struct dvb_tuner_ops));
888 }
889 break;
890 }
891 input->attached = 5;
892 return 0;
893}
894
895/****************************************************************************/
896/****************************************************************************/
897
898static ssize_t ts_write(struct file *file, const char *buf,
899 size_t count, loff_t *ppos)
900{
901 struct dvb_device *dvbdev = file->private_data;
902 struct ddb_output *output = dvbdev->priv;
903 size_t left = count;
904 int stat;
905
906 while (left) {
907 if (ddb_output_free(output) < 188) {
908 if (file->f_flags & O_NONBLOCK)
909 break;
910 if (wait_event_interruptible(
911 output->wq, ddb_output_free(output) >= 188) < 0)
912 break;
913 }
914 stat = ddb_output_write(output, buf, left);
915 if (stat < 0)
916 break;
917 buf += stat;
918 left -= stat;
919 }
920 return (left == count) ? -EAGAIN : (count - left);
921}
922
923static ssize_t ts_read(struct file *file, char *buf,
924 size_t count, loff_t *ppos)
925{
926 struct dvb_device *dvbdev = file->private_data;
927 struct ddb_output *output = dvbdev->priv;
928 struct ddb_input *input = output->port->input[0];
929 int left, read;
930
931 count -= count % 188;
932 left = count;
933 while (left) {
934 if (ddb_input_avail(input) < 188) {
935 if (file->f_flags & O_NONBLOCK)
936 break;
937 if (wait_event_interruptible(
938 input->wq, ddb_input_avail(input) >= 188) < 0)
939 break;
940 }
941 read = ddb_input_read(input, buf, left);
942 left -= read;
943 buf += read;
944 }
945 return (left == count) ? -EAGAIN : (count - left);
946}
947
948static unsigned int ts_poll(struct file *file, poll_table *wait)
949{
950 /*
951 struct dvb_device *dvbdev = file->private_data;
952 struct ddb_output *output = dvbdev->priv;
953 struct ddb_input *input = output->port->input[0];
954 */
955 unsigned int mask = 0;
956
957#if 0
958 if (data_avail_to_read)
959 mask |= POLLIN | POLLRDNORM;
960 if (data_avail_to_write)
961 mask |= POLLOUT | POLLWRNORM;
962
963 poll_wait(file, &read_queue, wait);
964 poll_wait(file, &write_queue, wait);
965#endif
966 return mask;
967}
968
969static const struct file_operations ci_fops = {
970 .owner = THIS_MODULE,
971 .read = ts_read,
972 .write = ts_write,
973 .open = dvb_generic_open,
974 .release = dvb_generic_release,
975 .poll = ts_poll,
976 .mmap = 0,
977};
978
979static struct dvb_device dvbdev_ci = {
980 .priv = 0,
981 .readers = -1,
982 .writers = -1,
983 .users = -1,
984 .fops = &ci_fops,
985};
986
987/****************************************************************************/
988/****************************************************************************/
989/****************************************************************************/
990
991static void input_tasklet(unsigned long data)
992{
993 struct ddb_input *input = (struct ddb_input *) data;
994 struct ddb *dev = input->port->dev;
995
996 spin_lock(&input->lock);
997 if (!input->running) {
998 spin_unlock(&input->lock);
999 return;
1000 }
1001 input->stat = ddbreadl(DMA_BUFFER_CURRENT(input->nr));
1002
1003 if (input->port->class == DDB_PORT_TUNER) {
1004 if (4&ddbreadl(DMA_BUFFER_CONTROL(input->nr)))
1005 printk(KERN_ERR "Overflow input %d\n", input->nr);
1006 while (input->cbuf != ((input->stat >> 11) & 0x1f)
1007 || (4&ddbreadl(DMA_BUFFER_CONTROL(input->nr)))) {
1008 dvb_dmx_swfilter_packets(&input->demux,
1009 input->vbuf[input->cbuf],
1010 input->dma_buf_size / 188);
1011
1012 input->cbuf = (input->cbuf + 1) % input->dma_buf_num;
1013 ddbwritel((input->cbuf << 11),
1014 DMA_BUFFER_ACK(input->nr));
1015 input->stat = ddbreadl(DMA_BUFFER_CURRENT(input->nr));
1016 }
1017 }
1018 if (input->port->class == DDB_PORT_CI)
1019 wake_up(&input->wq);
1020 spin_unlock(&input->lock);
1021}
1022
1023static void output_tasklet(unsigned long data)
1024{
1025 struct ddb_output *output = (struct ddb_output *) data;
1026 struct ddb *dev = output->port->dev;
1027
1028 spin_lock(&output->lock);
1029 if (!output->running) {
1030 spin_unlock(&output->lock);
1031 return;
1032 }
1033 output->stat = ddbreadl(DMA_BUFFER_CURRENT(output->nr + 8));
1034 wake_up(&output->wq);
1035 spin_unlock(&output->lock);
1036}
1037
1038
1039struct cxd2099_cfg cxd_cfg = {
1040 .bitrate = 62000,
1041 .adr = 0x40,
1042 .polarity = 1,
1043 .clock_mode = 1,
1044};
1045
1046static int ddb_ci_attach(struct ddb_port *port)
1047{
1048 int ret;
1049
1050 ret = dvb_register_adapter(&port->output->adap,
1051 "DDBridge",
1052 THIS_MODULE,
1053 &port->dev->pdev->dev,
1054 adapter_nr);
1055 if (ret < 0)
1056 return ret;
1057 port->en = cxd2099_attach(&cxd_cfg, port, &port->i2c->adap);
1058 if (!port->en) {
1059 dvb_unregister_adapter(&port->output->adap);
1060 return -ENODEV;
1061 }
1062 ddb_input_start(port->input[0]);
1063 ddb_output_start(port->output);
1064 dvb_ca_en50221_init(&port->output->adap,
1065 port->en, 0, 1);
1066 ret = dvb_register_device(&port->output->adap, &port->output->dev,
1067 &dvbdev_ci, (void *) port->output,
1068 DVB_DEVICE_SEC);
1069 return ret;
1070}
1071
1072static int ddb_port_attach(struct ddb_port *port)
1073{
1074 int ret = 0;
1075
1076 switch (port->class) {
1077 case DDB_PORT_TUNER:
1078 ret = dvb_input_attach(port->input[0]);
1079 if (ret < 0)
1080 break;
1081 ret = dvb_input_attach(port->input[1]);
1082 break;
1083 case DDB_PORT_CI:
1084 ret = ddb_ci_attach(port);
1085 break;
1086 default:
1087 break;
1088 }
1089 if (ret < 0)
1090 printk(KERN_ERR "port_attach on port %d failed\n", port->nr);
1091 return ret;
1092}
1093
1094static int ddb_ports_attach(struct ddb *dev)
1095{
1096 int i, ret = 0;
1097 struct ddb_port *port;
1098
1099 for (i = 0; i < dev->info->port_num; i++) {
1100 port = &dev->port[i];
1101 ret = ddb_port_attach(port);
1102 if (ret < 0)
1103 break;
1104 }
1105 return ret;
1106}
1107
1108static void ddb_ports_detach(struct ddb *dev)
1109{
1110 int i;
1111 struct ddb_port *port;
1112
1113 for (i = 0; i < dev->info->port_num; i++) {
1114 port = &dev->port[i];
1115 switch (port->class) {
1116 case DDB_PORT_TUNER:
1117 dvb_input_detach(port->input[0]);
1118 dvb_input_detach(port->input[1]);
1119 break;
1120 case DDB_PORT_CI:
1121 if (port->output->dev)
1122 dvb_unregister_device(port->output->dev);
1123 if (port->en) {
1124 ddb_input_stop(port->input[0]);
1125 ddb_output_stop(port->output);
1126 dvb_ca_en50221_release(port->en);
1127 kfree(port->en);
1128 port->en = 0;
1129 dvb_unregister_adapter(&port->output->adap);
1130 }
1131 break;
1132 }
1133 }
1134}
1135
1136/****************************************************************************/
1137/****************************************************************************/
1138
1139static int port_has_ci(struct ddb_port *port)
1140{
1141 u8 val;
1142 return i2c_read_reg(&port->i2c->adap, 0x40, 0, &val) ? 0 : 1;
1143}
1144
1145static int port_has_stv0900(struct ddb_port *port)
1146{
1147 u8 val;
1148 if (i2c_read_reg16(&port->i2c->adap, 0x69, 0xf100, &val) < 0)
1149 return 0;
1150 return 1;
1151}
1152
1153static int port_has_stv0900_aa(struct ddb_port *port)
1154{
1155 u8 val;
1156 if (i2c_read_reg16(&port->i2c->adap, 0x68, 0xf100, &val) < 0)
1157 return 0;
1158 return 1;
1159}
1160
1161static int port_has_drxks(struct ddb_port *port)
1162{
1163 u8 val;
1164 if (i2c_read(&port->i2c->adap, 0x29, &val) < 0)
1165 return 0;
1166 if (i2c_read(&port->i2c->adap, 0x2a, &val) < 0)
1167 return 0;
1168 return 1;
1169}
1170
1171static void ddb_port_probe(struct ddb_port *port)
1172{
1173 struct ddb *dev = port->dev;
1174 char *modname = "NO MODULE";
1175
1176 port->class = DDB_PORT_NONE;
1177
1178 if (port_has_ci(port)) {
1179 modname = "CI";
1180 port->class = DDB_PORT_CI;
1181 ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING);
1182 } else if (port_has_stv0900(port)) {
1183 modname = "DUAL DVB-S2";
1184 port->class = DDB_PORT_TUNER;
1185 port->type = DDB_TUNER_DVBS_ST;
1186 ddbwritel(I2C_SPEED_100, port->i2c->regs + I2C_TIMING);
1187 } else if (port_has_stv0900_aa(port)) {
1188 modname = "DUAL DVB-S2";
1189 port->class = DDB_PORT_TUNER;
1190 port->type = DDB_TUNER_DVBS_ST_AA;
1191 ddbwritel(I2C_SPEED_100, port->i2c->regs + I2C_TIMING);
1192 } else if (port_has_drxks(port)) {
1193 modname = "DUAL DVB-C/T";
1194 port->class = DDB_PORT_TUNER;
1195 port->type = DDB_TUNER_DVBCT_TR;
1196 ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING);
1197 }
1198 printk(KERN_INFO "Port %d (TAB %d): %s\n",
1199 port->nr, port->nr+1, modname);
1200}
1201
1202static void ddb_input_init(struct ddb_port *port, int nr)
1203{
1204 struct ddb *dev = port->dev;
1205 struct ddb_input *input = &dev->input[nr];
1206
1207 input->nr = nr;
1208 input->port = port;
1209 input->dma_buf_num = INPUT_DMA_BUFS;
1210 input->dma_buf_size = INPUT_DMA_SIZE;
1211 ddbwritel(0, TS_INPUT_CONTROL(nr));
1212 ddbwritel(2, TS_INPUT_CONTROL(nr));
1213 ddbwritel(0, TS_INPUT_CONTROL(nr));
1214 ddbwritel(0, DMA_BUFFER_ACK(nr));
1215 tasklet_init(&input->tasklet, input_tasklet, (unsigned long) input);
1216 spin_lock_init(&input->lock);
1217 init_waitqueue_head(&input->wq);
1218}
1219
1220static void ddb_output_init(struct ddb_port *port, int nr)
1221{
1222 struct ddb *dev = port->dev;
1223 struct ddb_output *output = &dev->output[nr];
1224 output->nr = nr;
1225 output->port = port;
1226 output->dma_buf_num = OUTPUT_DMA_BUFS;
1227 output->dma_buf_size = OUTPUT_DMA_SIZE;
1228
1229 ddbwritel(0, TS_OUTPUT_CONTROL(nr));
1230 ddbwritel(2, TS_OUTPUT_CONTROL(nr));
1231 ddbwritel(0, TS_OUTPUT_CONTROL(nr));
1232 tasklet_init(&output->tasklet, output_tasklet, (unsigned long) output);
1233 init_waitqueue_head(&output->wq);
1234}
1235
1236static void ddb_ports_init(struct ddb *dev)
1237{
1238 int i;
1239 struct ddb_port *port;
1240
1241 for (i = 0; i < dev->info->port_num; i++) {
1242 port = &dev->port[i];
1243 port->dev = dev;
1244 port->nr = i;
1245 port->i2c = &dev->i2c[i];
1246 port->input[0] = &dev->input[2 * i];
1247 port->input[1] = &dev->input[2 * i + 1];
1248 port->output = &dev->output[i];
1249
1250 mutex_init(&port->i2c_gate_lock);
1251 ddb_port_probe(port);
1252 ddb_input_init(port, 2 * i);
1253 ddb_input_init(port, 2 * i + 1);
1254 ddb_output_init(port, i);
1255 }
1256}
1257
1258static void ddb_ports_release(struct ddb *dev)
1259{
1260 int i;
1261 struct ddb_port *port;
1262
1263 for (i = 0; i < dev->info->port_num; i++) {
1264 port = &dev->port[i];
1265 port->dev = dev;
1266 tasklet_kill(&port->input[0]->tasklet);
1267 tasklet_kill(&port->input[1]->tasklet);
1268 tasklet_kill(&port->output->tasklet);
1269 }
1270}
1271
1272/****************************************************************************/
1273/****************************************************************************/
1274/****************************************************************************/
1275
1276static void irq_handle_i2c(struct ddb *dev, int n)
1277{
1278 struct ddb_i2c *i2c = &dev->i2c[n];
1279
1280 i2c->done = 1;
1281 wake_up(&i2c->wq);
1282}
1283
1284static irqreturn_t irq_handler(int irq, void *dev_id)
1285{
1286 struct ddb *dev = (struct ddb *) dev_id;
1287 u32 s = ddbreadl(INTERRUPT_STATUS);
1288
1289 if (!s)
1290 return IRQ_NONE;
1291
1292 do {
1293 ddbwritel(s, INTERRUPT_ACK);
1294
1295 if (s & 0x00000001)
1296 irq_handle_i2c(dev, 0);
1297 if (s & 0x00000002)
1298 irq_handle_i2c(dev, 1);
1299 if (s & 0x00000004)
1300 irq_handle_i2c(dev, 2);
1301 if (s & 0x00000008)
1302 irq_handle_i2c(dev, 3);
1303
1304 if (s & 0x00000100)
1305 tasklet_schedule(&dev->input[0].tasklet);
1306 if (s & 0x00000200)
1307 tasklet_schedule(&dev->input[1].tasklet);
1308 if (s & 0x00000400)
1309 tasklet_schedule(&dev->input[2].tasklet);
1310 if (s & 0x00000800)
1311 tasklet_schedule(&dev->input[3].tasklet);
1312 if (s & 0x00001000)
1313 tasklet_schedule(&dev->input[4].tasklet);
1314 if (s & 0x00002000)
1315 tasklet_schedule(&dev->input[5].tasklet);
1316 if (s & 0x00004000)
1317 tasklet_schedule(&dev->input[6].tasklet);
1318 if (s & 0x00008000)
1319 tasklet_schedule(&dev->input[7].tasklet);
1320
1321 if (s & 0x00010000)
1322 tasklet_schedule(&dev->output[0].tasklet);
1323 if (s & 0x00020000)
1324 tasklet_schedule(&dev->output[1].tasklet);
1325 if (s & 0x00040000)
1326 tasklet_schedule(&dev->output[2].tasklet);
1327 if (s & 0x00080000)
1328 tasklet_schedule(&dev->output[3].tasklet);
1329
1330 /* if (s & 0x000f0000) printk(KERN_DEBUG "%08x\n", istat); */
1331 } while ((s = ddbreadl(INTERRUPT_STATUS)));
1332
1333 return IRQ_HANDLED;
1334}
1335
1336/******************************************************************************/
1337/******************************************************************************/
1338/******************************************************************************/
1339
1340static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen)
1341{
1342 u32 data, shift;
1343
1344 if (wlen > 4)
1345 ddbwritel(1, SPI_CONTROL);
1346 while (wlen > 4) {
1347 /* FIXME: check for big-endian */
1348 data = swab32(*(u32 *)wbuf);
1349 wbuf += 4;
1350 wlen -= 4;
1351 ddbwritel(data, SPI_DATA);
1352 while (ddbreadl(SPI_CONTROL) & 0x0004)
1353 ;
1354 }
1355
1356 if (rlen)
1357 ddbwritel(0x0001 | ((wlen << (8 + 3)) & 0x1f00), SPI_CONTROL);
1358 else
1359 ddbwritel(0x0003 | ((wlen << (8 + 3)) & 0x1f00), SPI_CONTROL);
1360
1361 data = 0;
1362 shift = ((4 - wlen) * 8);
1363 while (wlen) {
1364 data <<= 8;
1365 data |= *wbuf;
1366 wlen--;
1367 wbuf++;
1368 }
1369 if (shift)
1370 data <<= shift;
1371 ddbwritel(data, SPI_DATA);
1372 while (ddbreadl(SPI_CONTROL) & 0x0004)
1373 ;
1374
1375 if (!rlen) {
1376 ddbwritel(0, SPI_CONTROL);
1377 return 0;
1378 }
1379 if (rlen > 4)
1380 ddbwritel(1, SPI_CONTROL);
1381
1382 while (rlen > 4) {
1383 ddbwritel(0xffffffff, SPI_DATA);
1384 while (ddbreadl(SPI_CONTROL) & 0x0004)
1385 ;
1386 data = ddbreadl(SPI_DATA);
1387 *(u32 *) rbuf = swab32(data);
1388 rbuf += 4;
1389 rlen -= 4;
1390 }
1391 ddbwritel(0x0003 | ((rlen << (8 + 3)) & 0x1F00), SPI_CONTROL);
1392 ddbwritel(0xffffffff, SPI_DATA);
1393 while (ddbreadl(SPI_CONTROL) & 0x0004)
1394 ;
1395
1396 data = ddbreadl(SPI_DATA);
1397 ddbwritel(0, SPI_CONTROL);
1398
1399 if (rlen < 4)
1400 data <<= ((4 - rlen) * 8);
1401
1402 while (rlen > 0) {
1403 *rbuf = ((data >> 24) & 0xff);
1404 data <<= 8;
1405 rbuf++;
1406 rlen--;
1407 }
1408 return 0;
1409}
1410
1411#define DDB_MAGIC 'd'
1412
1413struct ddb_flashio {
1414 __u8 *write_buf;
1415 __u32 write_len;
1416 __u8 *read_buf;
1417 __u32 read_len;
1418};
1419
1420#define IOCTL_DDB_FLASHIO _IOWR(DDB_MAGIC, 0x00, struct ddb_flashio)
1421
1422#define DDB_NAME "ddbridge"
1423
1424static u32 ddb_num;
1425static struct ddb *ddbs[32];
1426static struct class *ddb_class;
1427static int ddb_major;
1428
1429static int ddb_open(struct inode *inode, struct file *file)
1430{
1431 struct ddb *dev = ddbs[iminor(inode)];
1432
1433 file->private_data = dev;
1434 return 0;
1435}
1436
1437static long ddb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1438{
1439 struct ddb *dev = file->private_data;
1440 void *parg = (void *)arg;
1441 int res = -EFAULT;
1442
1443 switch (cmd) {
1444 case IOCTL_DDB_FLASHIO:
1445 {
1446 struct ddb_flashio fio;
1447 u8 *rbuf, *wbuf;
1448
1449 if (copy_from_user(&fio, parg, sizeof(fio)))
1450 break;
1451 if (fio.write_len + fio.read_len > 1028) {
1452 printk(KERN_ERR "IOBUF too small\n");
1453 return -ENOMEM;
1454 }
1455 wbuf = &dev->iobuf[0];
1456 if (!wbuf)
1457 return -ENOMEM;
1458 rbuf = wbuf + fio.write_len;
1459 if (copy_from_user(wbuf, fio.write_buf, fio.write_len)) {
1460 vfree(wbuf);
1461 break;
1462 }
1463 res = flashio(dev, wbuf, fio.write_len,
1464 rbuf, fio.read_len);
1465 if (copy_to_user(fio.read_buf, rbuf, fio.read_len))
1466 res = -EFAULT;
1467 break;
1468 }
1469 default:
1470 break;
1471 }
1472 return res;
1473}
1474
1475static const struct file_operations ddb_fops = {
1476 .unlocked_ioctl = ddb_ioctl,
1477 .open = ddb_open,
1478};
1479
1480static char *ddb_devnode(struct device *device, mode_t *mode)
1481{
1482 struct ddb *dev = dev_get_drvdata(device);
1483
1484 return kasprintf(GFP_KERNEL, "ddbridge/card%d", dev->nr);
1485}
1486
1487static int ddb_class_create(void)
1488{
1489 ddb_major = register_chrdev(0, DDB_NAME, &ddb_fops);
1490 if (ddb_major < 0)
1491 return ddb_major;
1492
1493 ddb_class = class_create(THIS_MODULE, DDB_NAME);
1494 if (IS_ERR(ddb_class)) {
1495 unregister_chrdev(ddb_major, DDB_NAME);
1496 return -1;
1497 }
1498 ddb_class->devnode = ddb_devnode;
1499 return 0;
1500}
1501
1502static void ddb_class_destroy(void)
1503{
1504 class_destroy(ddb_class);
1505 unregister_chrdev(ddb_major, DDB_NAME);
1506}
1507
1508static int ddb_device_create(struct ddb *dev)
1509{
1510 dev->nr = ddb_num++;
1511 dev->ddb_dev = device_create(ddb_class, NULL,
1512 MKDEV(ddb_major, dev->nr),
1513 dev, "ddbridge%d", dev->nr);
1514 ddbs[dev->nr] = dev;
1515 if (IS_ERR(dev->ddb_dev))
1516 return -1;
1517 return 0;
1518}
1519
1520static void ddb_device_destroy(struct ddb *dev)
1521{
1522 ddb_num--;
1523 if (IS_ERR(dev->ddb_dev))
1524 return;
1525 device_destroy(ddb_class, MKDEV(ddb_major, 0));
1526}
1527
1528
1529/****************************************************************************/
1530/****************************************************************************/
1531/****************************************************************************/
1532
1533static void ddb_unmap(struct ddb *dev)
1534{
1535 if (dev->regs)
1536 iounmap(dev->regs);
1537 vfree(dev);
1538}
1539
1540
1541static void __devexit ddb_remove(struct pci_dev *pdev)
1542{
1543 struct ddb *dev = (struct ddb *) pci_get_drvdata(pdev);
1544
1545 ddb_ports_detach(dev);
1546 ddb_i2c_release(dev);
1547
1548 ddbwritel(0, INTERRUPT_ENABLE);
1549 free_irq(dev->pdev->irq, dev);
1550#ifdef CONFIG_PCI_MSI
1551 if (dev->msi)
1552 pci_disable_msi(dev->pdev);
1553#endif
1554 ddb_ports_release(dev);
1555 ddb_buffers_free(dev);
1556 ddb_device_destroy(dev);
1557
1558 ddb_unmap(dev);
1559 pci_set_drvdata(pdev, 0);
1560 pci_disable_device(pdev);
1561}
1562
1563
1564static int __devinit ddb_probe(struct pci_dev *pdev,
1565 const struct pci_device_id *id)
1566{
1567 struct ddb *dev;
1568 int stat = 0;
1569 int irq_flag = IRQF_SHARED;
1570
1571 if (pci_enable_device(pdev) < 0)
1572 return -ENODEV;
1573
1574 dev = vmalloc(sizeof(struct ddb));
1575 if (dev == NULL)
1576 return -ENOMEM;
1577 memset(dev, 0, sizeof(struct ddb));
1578
1579 dev->pdev = pdev;
1580 pci_set_drvdata(pdev, dev);
1581 dev->info = (struct ddb_info *) id->driver_data;
1582 printk(KERN_INFO "DDBridge driver detected: %s\n", dev->info->name);
1583
1584 dev->regs = ioremap(pci_resource_start(dev->pdev, 0),
1585 pci_resource_len(dev->pdev, 0));
1586 if (!dev->regs) {
1587 stat = -ENOMEM;
1588 goto fail;
1589 }
1590 printk(KERN_INFO "HW %08x FW %08x\n", ddbreadl(0), ddbreadl(4));
1591
1592#ifdef CONFIG_PCI_MSI
1593 if (pci_msi_enabled())
1594 stat = pci_enable_msi(dev->pdev);
1595 if (stat) {
1596 printk(KERN_INFO ": MSI not available.\n");
1597 } else {
1598 irq_flag = 0;
1599 dev->msi = 1;
1600 }
1601#endif
1602 stat = request_irq(dev->pdev->irq, irq_handler,
1603 irq_flag, "DDBridge", (void *) dev);
1604 if (stat < 0)
1605 goto fail1;
1606 ddbwritel(0, DMA_BASE_WRITE);
1607 ddbwritel(0, DMA_BASE_READ);
1608 ddbwritel(0xffffffff, INTERRUPT_ACK);
1609 ddbwritel(0xfff0f, INTERRUPT_ENABLE);
1610 ddbwritel(0, MSI1_ENABLE);
1611
1612 if (ddb_i2c_init(dev) < 0)
1613 goto fail1;
1614 ddb_ports_init(dev);
1615 if (ddb_buffers_alloc(dev) < 0) {
1616 printk(KERN_INFO ": Could not allocate buffer memory\n");
1617 goto fail2;
1618 }
1619 if (ddb_ports_attach(dev) < 0)
1620 goto fail3;
1621 ddb_device_create(dev);
1622 return 0;
1623
1624fail3:
1625 ddb_ports_detach(dev);
1626 printk(KERN_ERR "fail3\n");
1627 ddb_ports_release(dev);
1628fail2:
1629 printk(KERN_ERR "fail2\n");
1630 ddb_buffers_free(dev);
1631fail1:
1632 printk(KERN_ERR "fail1\n");
1633 if (dev->msi)
1634 pci_disable_msi(dev->pdev);
1635 free_irq(dev->pdev->irq, dev);
1636fail:
1637 printk(KERN_ERR "fail\n");
1638 ddb_unmap(dev);
1639 pci_set_drvdata(pdev, 0);
1640 pci_disable_device(pdev);
1641 return -1;
1642}
1643
1644/******************************************************************************/
1645/******************************************************************************/
1646/******************************************************************************/
1647
1648static struct ddb_info ddb_none = {
1649 .type = DDB_NONE,
1650 .name = "Digital Devices PCIe bridge",
1651};
1652
1653static struct ddb_info ddb_octopus = {
1654 .type = DDB_OCTOPUS,
1655 .name = "Digital Devices Octopus DVB adapter",
1656 .port_num = 4,
1657};
1658
1659static struct ddb_info ddb_octopus_le = {
1660 .type = DDB_OCTOPUS,
1661 .name = "Digital Devices Octopus LE DVB adapter",
1662 .port_num = 2,
1663};
1664
1665static struct ddb_info ddb_v6 = {
1666 .type = DDB_OCTOPUS,
1667 .name = "Digital Devices Cine S2 V6 DVB adapter",
1668 .port_num = 3,
1669};
1670
1671#define DDVID 0xdd01 /* Digital Devices Vendor ID */
1672
1673#define DDB_ID(_vend, _dev, _subvend, _subdev, _driverdata) { \
1674 .vendor = _vend, .device = _dev, \
1675 .subvendor = _subvend, .subdevice = _subdev, \
1676 .driver_data = (unsigned long)&_driverdata }
1677
1678static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
1679 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
1680 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
1681 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
1682 DDB_ID(DDVID, 0x0003, DDVID, 0x0010, ddb_octopus),
1683 DDB_ID(DDVID, 0x0003, DDVID, 0x0020, ddb_v6),
1684 /* in case sub-ids got deleted in flash */
1685 DDB_ID(DDVID, 0x0003, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
1686 {0}
1687};
1688MODULE_DEVICE_TABLE(pci, ddb_id_tbl);
1689
1690
1691static struct pci_driver ddb_pci_driver = {
1692 .name = "DDBridge",
1693 .id_table = ddb_id_tbl,
1694 .probe = ddb_probe,
1695 .remove = ddb_remove,
1696};
1697
1698static __init int module_init_ddbridge(void)
1699{
1700 printk(KERN_INFO "Digital Devices PCIE bridge driver, "
1701 "Copyright (C) 2010-11 Digital Devices GmbH\n");
1702 if (ddb_class_create())
1703 return -1;
1704 return pci_register_driver(&ddb_pci_driver);
1705}
1706
1707static __exit void module_exit_ddbridge(void)
1708{
1709 pci_unregister_driver(&ddb_pci_driver);
1710 ddb_class_destroy();
1711}
1712
1713module_init(module_init_ddbridge);
1714module_exit(module_exit_ddbridge);
1715
1716MODULE_DESCRIPTION("Digital Devices PCIe Bridge");
1717MODULE_AUTHOR("Ralph Metzler");
1718MODULE_LICENSE("GPL");
1719MODULE_VERSION("0.5");
diff --git a/drivers/media/dvb/ddbridge/ddbridge-regs.h b/drivers/media/dvb/ddbridge/ddbridge-regs.h
new file mode 100644
index 000000000000..a3ccb318b500
--- /dev/null
+++ b/drivers/media/dvb/ddbridge/ddbridge-regs.h
@@ -0,0 +1,151 @@
1/*
2 * ddbridge-regs.h: Digital Devices PCIe bridge driver
3 *
4 * Copyright (C) 2010-2011 Digital Devices GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 only, as published by the Free Software Foundation.
9 *
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA
21 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
22 */
23
24/* DD-DVBBridgeV1.h 273 2010-09-17 05:03:16Z manfred */
25
26/* Register Definitions */
27
28#define CUR_REGISTERMAP_VERSION 0x10000
29
30#define HARDWARE_VERSION 0x00
31#define REGISTERMAP_VERSION 0x04
32
33/* ------------------------------------------------------------------------- */
34/* SPI Controller */
35
36#define SPI_CONTROL 0x10
37#define SPI_DATA 0x14
38
39/* ------------------------------------------------------------------------- */
40
41/* Interrupt controller */
42/* How many MSI's are available depends on HW (Min 2 max 8) */
43/* How many are usable also depends on Host platform */
44
45#define INTERRUPT_BASE (0x40)
46
47#define INTERRUPT_ENABLE (INTERRUPT_BASE + 0x00)
48#define MSI0_ENABLE (INTERRUPT_BASE + 0x00)
49#define MSI1_ENABLE (INTERRUPT_BASE + 0x04)
50#define MSI2_ENABLE (INTERRUPT_BASE + 0x08)
51#define MSI3_ENABLE (INTERRUPT_BASE + 0x0C)
52#define MSI4_ENABLE (INTERRUPT_BASE + 0x10)
53#define MSI5_ENABLE (INTERRUPT_BASE + 0x14)
54#define MSI6_ENABLE (INTERRUPT_BASE + 0x18)
55#define MSI7_ENABLE (INTERRUPT_BASE + 0x1C)
56
57#define INTERRUPT_STATUS (INTERRUPT_BASE + 0x20)
58#define INTERRUPT_ACK (INTERRUPT_BASE + 0x20)
59
60#define INTMASK_I2C1 (0x00000001)
61#define INTMASK_I2C2 (0x00000002)
62#define INTMASK_I2C3 (0x00000004)
63#define INTMASK_I2C4 (0x00000008)
64
65#define INTMASK_CIRQ1 (0x00000010)
66#define INTMASK_CIRQ2 (0x00000020)
67#define INTMASK_CIRQ3 (0x00000040)
68#define INTMASK_CIRQ4 (0x00000080)
69
70#define INTMASK_TSINPUT1 (0x00000100)
71#define INTMASK_TSINPUT2 (0x00000200)
72#define INTMASK_TSINPUT3 (0x00000400)
73#define INTMASK_TSINPUT4 (0x00000800)
74#define INTMASK_TSINPUT5 (0x00001000)
75#define INTMASK_TSINPUT6 (0x00002000)
76#define INTMASK_TSINPUT7 (0x00004000)
77#define INTMASK_TSINPUT8 (0x00008000)
78
79#define INTMASK_TSOUTPUT1 (0x00010000)
80#define INTMASK_TSOUTPUT2 (0x00020000)
81#define INTMASK_TSOUTPUT3 (0x00040000)
82#define INTMASK_TSOUTPUT4 (0x00080000)
83
84/* ------------------------------------------------------------------------- */
85/* I2C Master Controller */
86
87#define I2C_BASE (0x80) /* Byte offset */
88
89#define I2C_COMMAND (0x00)
90#define I2C_TIMING (0x04)
91#define I2C_TASKLENGTH (0x08) /* High read, low write */
92#define I2C_TASKADDRESS (0x0C) /* High read, low write */
93
94#define I2C_MONITOR (0x1C)
95
96#define I2C_BASE_1 (I2C_BASE + 0x00)
97#define I2C_BASE_2 (I2C_BASE + 0x20)
98#define I2C_BASE_3 (I2C_BASE + 0x40)
99#define I2C_BASE_4 (I2C_BASE + 0x60)
100
101#define I2C_BASE_N(i) (I2C_BASE + (i) * 0x20)
102
103#define I2C_TASKMEM_BASE (0x1000) /* Byte offset */
104#define I2C_TASKMEM_SIZE (0x1000)
105
106#define I2C_SPEED_400 (0x04030404)
107#define I2C_SPEED_200 (0x09080909)
108#define I2C_SPEED_154 (0x0C0B0C0C)
109#define I2C_SPEED_100 (0x13121313)
110#define I2C_SPEED_77 (0x19181919)
111#define I2C_SPEED_50 (0x27262727)
112
113
114/* ------------------------------------------------------------------------- */
115/* DMA Controller */
116
117#define DMA_BASE_WRITE (0x100)
118#define DMA_BASE_READ (0x140)
119
120#define DMA_CONTROL (0x00) /* 64 */
121#define DMA_ERROR (0x04) /* 65 ( only read instance ) */
122
123#define DMA_DIAG_CONTROL (0x1C) /* 71 */
124#define DMA_DIAG_PACKETCOUNTER_LOW (0x20) /* 72 */
125#define DMA_DIAG_PACKETCOUNTER_HIGH (0x24) /* 73 */
126#define DMA_DIAG_TIMECOUNTER_LOW (0x28) /* 74 */
127#define DMA_DIAG_TIMECOUNTER_HIGH (0x2C) /* 75 */
128#define DMA_DIAG_RECHECKCOUNTER (0x30) /* 76 ( Split completions on read ) */
129#define DMA_DIAG_WAITTIMEOUTINIT (0x34) /* 77 */
130#define DMA_DIAG_WAITOVERFLOWCOUNTER (0x38) /* 78 */
131#define DMA_DIAG_WAITCOUNTER (0x3C) /* 79 */
132
133/* ------------------------------------------------------------------------- */
134/* DMA Buffer */
135
136#define TS_INPUT_BASE (0x200)
137#define TS_INPUT_CONTROL(i) (TS_INPUT_BASE + (i) * 16 + 0x00)
138
139#define TS_OUTPUT_BASE (0x280)
140#define TS_OUTPUT_CONTROL(i) (TS_OUTPUT_BASE + (i) * 16 + 0x00)
141
142#define DMA_BUFFER_BASE (0x300)
143
144#define DMA_BUFFER_CONTROL(i) (DMA_BUFFER_BASE + (i) * 16 + 0x00)
145#define DMA_BUFFER_ACK(i) (DMA_BUFFER_BASE + (i) * 16 + 0x04)
146#define DMA_BUFFER_CURRENT(i) (DMA_BUFFER_BASE + (i) * 16 + 0x08)
147#define DMA_BUFFER_SIZE(i) (DMA_BUFFER_BASE + (i) * 16 + 0x0c)
148
149#define DMA_BASE_ADDRESS_TABLE (0x2000)
150#define DMA_BASE_ADDRESS_TABLE_ENTRIES (512)
151
diff --git a/drivers/media/dvb/ddbridge/ddbridge.h b/drivers/media/dvb/ddbridge/ddbridge.h
new file mode 100644
index 000000000000..6d14893218f4
--- /dev/null
+++ b/drivers/media/dvb/ddbridge/ddbridge.h
@@ -0,0 +1,187 @@
1/*
2 * ddbridge.h: Digital Devices PCIe bridge driver
3 *
4 * Copyright (C) 2010-2011 Digital Devices GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 only, as published by the Free Software Foundation.
9 *
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA
21 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
22 */
23
24#ifndef _DDBRIDGE_H_
25#define _DDBRIDGE_H_
26
27#include <linux/types.h>
28#include <linux/sched.h>
29#include <linux/interrupt.h>
30#include <linux/i2c.h>
31#include <linux/mutex.h>
32#include <asm/dma.h>
33#include <linux/dvb/frontend.h>
34#include <linux/dvb/ca.h>
35#include <linux/dvb/video.h>
36#include <linux/dvb/audio.h>
37#include <linux/socket.h>
38
39#include "dmxdev.h"
40#include "dvbdev.h"
41#include "dvb_demux.h"
42#include "dvb_frontend.h"
43#include "dvb_ringbuffer.h"
44#include "dvb_ca_en50221.h"
45#include "dvb_net.h"
46#include "cxd2099.h"
47
48#define DDB_MAX_I2C 4
49#define DDB_MAX_PORT 4
50#define DDB_MAX_INPUT 8
51#define DDB_MAX_OUTPUT 4
52
53struct ddb_info {
54 int type;
55#define DDB_NONE 0
56#define DDB_OCTOPUS 1
57 char *name;
58 int port_num;
59 u32 port_type[DDB_MAX_PORT];
60};
61
62/* DMA_SIZE MUST be divisible by 188 and 128 !!! */
63
64#define INPUT_DMA_MAX_BUFS 32 /* hardware table limit */
65#define INPUT_DMA_BUFS 8
66#define INPUT_DMA_SIZE (128*47*21)
67
68#define OUTPUT_DMA_MAX_BUFS 32
69#define OUTPUT_DMA_BUFS 8
70#define OUTPUT_DMA_SIZE (128*47*21)
71
72struct ddb;
73struct ddb_port;
74
75struct ddb_input {
76 struct ddb_port *port;
77 u32 nr;
78 int attached;
79
80 dma_addr_t pbuf[INPUT_DMA_MAX_BUFS];
81 u8 *vbuf[INPUT_DMA_MAX_BUFS];
82 u32 dma_buf_num;
83 u32 dma_buf_size;
84
85 struct tasklet_struct tasklet;
86 spinlock_t lock;
87 wait_queue_head_t wq;
88 int running;
89 u32 stat;
90 u32 cbuf;
91 u32 coff;
92
93 struct dvb_adapter adap;
94 struct dvb_device *dev;
95 struct dvb_frontend *fe;
96 struct dvb_frontend *fe2;
97 struct dmxdev dmxdev;
98 struct dvb_demux demux;
99 struct dvb_net dvbnet;
100 struct dmx_frontend hw_frontend;
101 struct dmx_frontend mem_frontend;
102 int users;
103 int (*gate_ctrl)(struct dvb_frontend *, int);
104};
105
106struct ddb_output {
107 struct ddb_port *port;
108 u32 nr;
109 dma_addr_t pbuf[OUTPUT_DMA_MAX_BUFS];
110 u8 *vbuf[OUTPUT_DMA_MAX_BUFS];
111 u32 dma_buf_num;
112 u32 dma_buf_size;
113 struct tasklet_struct tasklet;
114 spinlock_t lock;
115 wait_queue_head_t wq;
116 int running;
117 u32 stat;
118 u32 cbuf;
119 u32 coff;
120
121 struct dvb_adapter adap;
122 struct dvb_device *dev;
123};
124
125struct ddb_i2c {
126 struct ddb *dev;
127 u32 nr;
128 struct i2c_adapter adap;
129 struct i2c_adapter adap2;
130 u32 regs;
131 u32 rbuf;
132 u32 wbuf;
133 int done;
134 wait_queue_head_t wq;
135};
136
137struct ddb_port {
138 struct ddb *dev;
139 u32 nr;
140 struct ddb_i2c *i2c;
141 struct mutex i2c_gate_lock;
142 u32 class;
143#define DDB_PORT_NONE 0
144#define DDB_PORT_CI 1
145#define DDB_PORT_TUNER 2
146 u32 type;
147#define DDB_TUNER_NONE 0
148#define DDB_TUNER_DVBS_ST 1
149#define DDB_TUNER_DVBS_ST_AA 2
150#define DDB_TUNER_DVBCT_TR 16
151#define DDB_TUNER_DVBCT_ST 17
152 u32 adr;
153
154 struct ddb_input *input[2];
155 struct ddb_output *output;
156 struct dvb_ca_en50221 *en;
157};
158
159struct ddb {
160 struct pci_dev *pdev;
161 unsigned char *regs;
162 struct ddb_port port[DDB_MAX_PORT];
163 struct ddb_i2c i2c[DDB_MAX_I2C];
164 struct ddb_input input[DDB_MAX_INPUT];
165 struct ddb_output output[DDB_MAX_OUTPUT];
166
167 struct device *ddb_dev;
168 int nr;
169 u8 iobuf[1028];
170
171 struct ddb_info *info;
172 int msi;
173};
174
175/****************************************************************************/
176
177#define ddbwritel(_val, _adr) writel((_val), \
178 (char *) (dev->regs+(_adr)))
179#define ddbreadl(_adr) readl((char *) (dev->regs+(_adr)))
180#define ddbcpyto(_adr, _src, _count) memcpy_toio((char *) \
181 (dev->regs+(_adr)), (_src), (_count))
182#define ddbcpyfrom(_dst, _adr, _count) memcpy_fromio((_dst), (char *) \
183 (dev->regs+(_adr)), (_count))
184
185/****************************************************************************/
186
187#endif
diff --git a/drivers/media/dvb/dvb-core/Makefile b/drivers/media/dvb/dvb-core/Makefile
index 0b5182835cc8..8f22bcd7c1f9 100644
--- a/drivers/media/dvb/dvb-core/Makefile
+++ b/drivers/media/dvb/dvb-core/Makefile
@@ -2,8 +2,10 @@
2# Makefile for the kernel DVB device drivers. 2# Makefile for the kernel DVB device drivers.
3# 3#
4 4
5dvb-net-$(CONFIG_DVB_NET) := dvb_net.o
6
5dvb-core-objs := dvbdev.o dmxdev.o dvb_demux.o dvb_filter.o \ 7dvb-core-objs := dvbdev.o dmxdev.o dvb_demux.o dvb_filter.o \
6 dvb_ca_en50221.o dvb_frontend.o \ 8 dvb_ca_en50221.o dvb_frontend.o \
7 dvb_net.o dvb_ringbuffer.o dvb_math.o 9 $(dvb-net-y) dvb_ringbuffer.o dvb_math.o
8 10
9obj-$(CONFIG_DVB_CORE) += dvb-core.o 11obj-$(CONFIG_DVB_CORE) += dvb-core.o
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 5b6b451d4694..efe9c30605e8 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -904,7 +904,7 @@ static int dvb_frontend_clear_cache(struct dvb_frontend *fe)
904 .buffer = b \ 904 .buffer = b \
905} 905}
906 906
907static struct dtv_cmds_h dtv_cmds[] = { 907static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
908 _DTV_CMD(DTV_TUNE, 1, 0), 908 _DTV_CMD(DTV_TUNE, 1, 0),
909 _DTV_CMD(DTV_CLEAR, 1, 0), 909 _DTV_CMD(DTV_CLEAR, 1, 0),
910 910
@@ -966,6 +966,7 @@ static struct dtv_cmds_h dtv_cmds[] = {
966 _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 0, 0), 966 _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 0, 0),
967 967
968 _DTV_CMD(DTV_ISDBS_TS_ID, 1, 0), 968 _DTV_CMD(DTV_ISDBS_TS_ID, 1, 0),
969 _DTV_CMD(DTV_DVBT2_PLP_ID, 1, 0),
969 970
970 /* Get */ 971 /* Get */
971 _DTV_CMD(DTV_DISEQC_SLAVE_REPLY, 0, 1), 972 _DTV_CMD(DTV_DISEQC_SLAVE_REPLY, 0, 1),
diff --git a/drivers/media/dvb/dvb-core/dvb_net.h b/drivers/media/dvb/dvb-core/dvb_net.h
index 3a3126cae03b..1e53acd50cf4 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.h
+++ b/drivers/media/dvb/dvb-core/dvb_net.h
@@ -32,6 +32,8 @@
32 32
33#define DVB_NET_DEVICES_MAX 10 33#define DVB_NET_DEVICES_MAX 10
34 34
35#ifdef CONFIG_DVB_NET
36
35struct dvb_net { 37struct dvb_net {
36 struct dvb_device *dvbdev; 38 struct dvb_device *dvbdev;
37 struct net_device *device[DVB_NET_DEVICES_MAX]; 39 struct net_device *device[DVB_NET_DEVICES_MAX];
@@ -40,8 +42,25 @@ struct dvb_net {
40 struct dmx_demux *demux; 42 struct dmx_demux *demux;
41}; 43};
42 44
43
44void dvb_net_release(struct dvb_net *); 45void dvb_net_release(struct dvb_net *);
45int dvb_net_init(struct dvb_adapter *, struct dvb_net *, struct dmx_demux *); 46int dvb_net_init(struct dvb_adapter *, struct dvb_net *, struct dmx_demux *);
46 47
48#else
49
50struct dvb_net {
51 struct dvb_device *dvbdev;
52};
53
54static inline void dvb_net_release(struct dvb_net *dvbnet)
55{
56}
57
58static inline int dvb_net_init(struct dvb_adapter *adap,
59 struct dvb_net *dvbnet, struct dmx_demux *dmx)
60{
61 return 0;
62}
63
64#endif /* ifdef CONFIG_DVB_NET */
65
47#endif 66#endif
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index e85304c59a2b..5d73dec8ac07 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -81,6 +81,7 @@ config DVB_USB_DIB0700
81 select MEDIA_TUNER_MT2266 if !MEDIA_TUNER_CUSTOMISE 81 select MEDIA_TUNER_MT2266 if !MEDIA_TUNER_CUSTOMISE
82 select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE 82 select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE
83 select MEDIA_TUNER_XC5000 if !MEDIA_TUNER_CUSTOMISE 83 select MEDIA_TUNER_XC5000 if !MEDIA_TUNER_CUSTOMISE
84 select MEDIA_TUNER_XC4000 if !MEDIA_TUNER_CUSTOMISE
84 select MEDIA_TUNER_MXL5007T if !MEDIA_TUNER_CUSTOMISE 85 select MEDIA_TUNER_MXL5007T if !MEDIA_TUNER_CUSTOMISE
85 help 86 help
86 Support for USB2.0/1.1 DVB receivers based on the DiB0700 USB bridge. The 87 Support for USB2.0/1.1 DVB receivers based on the DiB0700 USB bridge. The
diff --git a/drivers/media/dvb/dvb-usb/af9015.c b/drivers/media/dvb/dvb-usb/af9015.c
index 100ebc37e99e..d7ad05fc383b 100644
--- a/drivers/media/dvb/dvb-usb/af9015.c
+++ b/drivers/media/dvb/dvb-usb/af9015.c
@@ -91,7 +91,6 @@ static int af9015_rw_udev(struct usb_device *udev, struct req_t *req)
91 case GET_CONFIG: 91 case GET_CONFIG:
92 case READ_MEMORY: 92 case READ_MEMORY:
93 case RECONNECT_USB: 93 case RECONNECT_USB:
94 case GET_IR_CODE:
95 write = 0; 94 write = 0;
96 break; 95 break;
97 case READ_I2C: 96 case READ_I2C:
@@ -164,13 +163,6 @@ static int af9015_rw_udev(struct usb_device *udev, struct req_t *req)
164 deb_xfer("<<< "); 163 deb_xfer("<<< ");
165 debug_dump(buf, act_len, deb_xfer); 164 debug_dump(buf, act_len, deb_xfer);
166 165
167 /* remote controller query status is 1 if remote code is not received */
168 if (req->cmd == GET_IR_CODE && buf[1] == 1) {
169 buf[1] = 0; /* clear command "error" status */
170 memset(&buf[2], 0, req->data_len);
171 buf[3] = 1; /* no remote code received mark */
172 }
173
174 /* check status */ 166 /* check status */
175 if (buf[1]) { 167 if (buf[1]) {
176 err("command failed:%d", buf[1]); 168 err("command failed:%d", buf[1]);
@@ -292,6 +284,10 @@ Due to that the only way to select correct tuner is use demodulator I2C-gate.
292 } 284 }
293 285
294 if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) { 286 if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
287 if (msg[i].len > 3 || msg[i+1].len > 61) {
288 ret = -EOPNOTSUPP;
289 goto error;
290 }
295 if (msg[i].addr == 291 if (msg[i].addr ==
296 af9015_af9013_config[0].demod_address) 292 af9015_af9013_config[0].demod_address)
297 req.cmd = READ_MEMORY; 293 req.cmd = READ_MEMORY;
@@ -306,12 +302,16 @@ Due to that the only way to select correct tuner is use demodulator I2C-gate.
306 ret = af9015_ctrl_msg(d, &req); 302 ret = af9015_ctrl_msg(d, &req);
307 i += 2; 303 i += 2;
308 } else if (msg[i].flags & I2C_M_RD) { 304 } else if (msg[i].flags & I2C_M_RD) {
309 ret = -EINVAL; 305 if (msg[i].len > 61) {
306 ret = -EOPNOTSUPP;
307 goto error;
308 }
310 if (msg[i].addr == 309 if (msg[i].addr ==
311 af9015_af9013_config[0].demod_address) 310 af9015_af9013_config[0].demod_address) {
311 ret = -EINVAL;
312 goto error; 312 goto error;
313 else 313 }
314 req.cmd = READ_I2C; 314 req.cmd = READ_I2C;
315 req.i2c_addr = msg[i].addr; 315 req.i2c_addr = msg[i].addr;
316 req.addr = addr; 316 req.addr = addr;
317 req.mbox = mbox; 317 req.mbox = mbox;
@@ -321,6 +321,10 @@ Due to that the only way to select correct tuner is use demodulator I2C-gate.
321 ret = af9015_ctrl_msg(d, &req); 321 ret = af9015_ctrl_msg(d, &req);
322 i += 1; 322 i += 1;
323 } else { 323 } else {
324 if (msg[i].len > 21) {
325 ret = -EOPNOTSUPP;
326 goto error;
327 }
324 if (msg[i].addr == 328 if (msg[i].addr ==
325 af9015_af9013_config[0].demod_address) 329 af9015_af9013_config[0].demod_address)
326 req.cmd = WRITE_MEMORY; 330 req.cmd = WRITE_MEMORY;
@@ -735,6 +739,7 @@ static const struct af9015_rc_setup af9015_rc_setup_hashes[] = {
735 { 0xb8feb708, RC_MAP_MSI_DIGIVOX_II }, 739 { 0xb8feb708, RC_MAP_MSI_DIGIVOX_II },
736 { 0xa3703d00, RC_MAP_ALINK_DTU_M }, 740 { 0xa3703d00, RC_MAP_ALINK_DTU_M },
737 { 0x9b7dc64e, RC_MAP_TOTAL_MEDIA_IN_HAND }, /* MYGICTV U718 */ 741 { 0x9b7dc64e, RC_MAP_TOTAL_MEDIA_IN_HAND }, /* MYGICTV U718 */
742 { 0x5d49e3db, RC_MAP_DIGITTRADE }, /* LC-Power LC-USB-DVBT */
738 { } 743 { }
739}; 744};
740 745
@@ -749,6 +754,8 @@ static const struct af9015_rc_setup af9015_rc_setup_usbids[] = {
749 RC_MAP_AZUREWAVE_AD_TU700 }, 754 RC_MAP_AZUREWAVE_AD_TU700 },
750 { (USB_VID_MSI_2 << 16) + USB_PID_MSI_DIGI_VOX_MINI_III, 755 { (USB_VID_MSI_2 << 16) + USB_PID_MSI_DIGI_VOX_MINI_III,
751 RC_MAP_MSI_DIGIVOX_III }, 756 RC_MAP_MSI_DIGIVOX_III },
757 { (USB_VID_MSI_2 << 16) + USB_PID_MSI_DIGIVOX_DUO,
758 RC_MAP_MSI_DIGIVOX_III },
752 { (USB_VID_LEADTEK << 16) + USB_PID_WINFAST_DTV_DONGLE_GOLD, 759 { (USB_VID_LEADTEK << 16) + USB_PID_WINFAST_DTV_DONGLE_GOLD,
753 RC_MAP_LEADTEK_Y04G0051 }, 760 RC_MAP_LEADTEK_Y04G0051 },
754 { (USB_VID_AVERMEDIA << 16) + USB_PID_AVERMEDIA_VOLAR_X, 761 { (USB_VID_AVERMEDIA << 16) + USB_PID_AVERMEDIA_VOLAR_X,
@@ -759,6 +766,8 @@ static const struct af9015_rc_setup af9015_rc_setup_usbids[] = {
759 RC_MAP_DIGITALNOW_TINYTWIN }, 766 RC_MAP_DIGITALNOW_TINYTWIN },
760 { (USB_VID_GTEK << 16) + USB_PID_TINYTWIN_3, 767 { (USB_VID_GTEK << 16) + USB_PID_TINYTWIN_3,
761 RC_MAP_DIGITALNOW_TINYTWIN }, 768 RC_MAP_DIGITALNOW_TINYTWIN },
769 { (USB_VID_KWORLD_2 << 16) + USB_PID_SVEON_STV22,
770 RC_MAP_MSI_DIGIVOX_III },
762 { } 771 { }
763}; 772};
764 773
@@ -1082,44 +1091,11 @@ error:
1082 return ret; 1091 return ret;
1083} 1092}
1084 1093
1085/* init 2nd I2C adapter */
1086static int af9015_i2c_init(struct dvb_usb_device *d)
1087{
1088 int ret;
1089 struct af9015_state *state = d->priv;
1090 deb_info("%s:\n", __func__);
1091
1092 strncpy(state->i2c_adap.name, d->desc->name,
1093 sizeof(state->i2c_adap.name));
1094 state->i2c_adap.algo = d->props.i2c_algo;
1095 state->i2c_adap.algo_data = NULL;
1096 state->i2c_adap.dev.parent = &d->udev->dev;
1097
1098 i2c_set_adapdata(&state->i2c_adap, d);
1099
1100 ret = i2c_add_adapter(&state->i2c_adap);
1101 if (ret < 0)
1102 err("could not add i2c adapter");
1103
1104 return ret;
1105}
1106
1107static int af9015_af9013_frontend_attach(struct dvb_usb_adapter *adap) 1094static int af9015_af9013_frontend_attach(struct dvb_usb_adapter *adap)
1108{ 1095{
1109 int ret; 1096 int ret;
1110 struct af9015_state *state = adap->dev->priv;
1111 struct i2c_adapter *i2c_adap;
1112
1113 if (adap->id == 0) {
1114 /* select I2C adapter */
1115 i2c_adap = &adap->dev->i2c_adap;
1116
1117 deb_info("%s: init I2C\n", __func__);
1118 ret = af9015_i2c_init(adap->dev);
1119 } else {
1120 /* select I2C adapter */
1121 i2c_adap = &state->i2c_adap;
1122 1097
1098 if (adap->id == 1) {
1123 /* copy firmware to 2nd demodulator */ 1099 /* copy firmware to 2nd demodulator */
1124 if (af9015_config.dual_mode) { 1100 if (af9015_config.dual_mode) {
1125 ret = af9015_copy_firmware(adap->dev); 1101 ret = af9015_copy_firmware(adap->dev);
@@ -1136,7 +1112,7 @@ static int af9015_af9013_frontend_attach(struct dvb_usb_adapter *adap)
1136 1112
1137 /* attach demodulator */ 1113 /* attach demodulator */
1138 adap->fe = dvb_attach(af9013_attach, &af9015_af9013_config[adap->id], 1114 adap->fe = dvb_attach(af9013_attach, &af9015_af9013_config[adap->id],
1139 i2c_adap); 1115 &adap->dev->i2c_adap);
1140 1116
1141 return adap->fe == NULL ? -ENODEV : 0; 1117 return adap->fe == NULL ? -ENODEV : 0;
1142} 1118}
@@ -1206,57 +1182,56 @@ static struct mxl5007t_config af9015_mxl5007t_config = {
1206 1182
1207static int af9015_tuner_attach(struct dvb_usb_adapter *adap) 1183static int af9015_tuner_attach(struct dvb_usb_adapter *adap)
1208{ 1184{
1209 struct af9015_state *state = adap->dev->priv;
1210 struct i2c_adapter *i2c_adap;
1211 int ret; 1185 int ret;
1212 deb_info("%s:\n", __func__); 1186 deb_info("%s:\n", __func__);
1213 1187
1214 /* select I2C adapter */
1215 if (adap->id == 0)
1216 i2c_adap = &adap->dev->i2c_adap;
1217 else
1218 i2c_adap = &state->i2c_adap;
1219
1220 switch (af9015_af9013_config[adap->id].tuner) { 1188 switch (af9015_af9013_config[adap->id].tuner) {
1221 case AF9013_TUNER_MT2060: 1189 case AF9013_TUNER_MT2060:
1222 case AF9013_TUNER_MT2060_2: 1190 case AF9013_TUNER_MT2060_2:
1223 ret = dvb_attach(mt2060_attach, adap->fe, i2c_adap, 1191 ret = dvb_attach(mt2060_attach, adap->fe, &adap->dev->i2c_adap,
1224 &af9015_mt2060_config, 1192 &af9015_mt2060_config,
1225 af9015_config.mt2060_if1[adap->id]) 1193 af9015_config.mt2060_if1[adap->id])
1226 == NULL ? -ENODEV : 0; 1194 == NULL ? -ENODEV : 0;
1227 break; 1195 break;
1228 case AF9013_TUNER_QT1010: 1196 case AF9013_TUNER_QT1010:
1229 case AF9013_TUNER_QT1010A: 1197 case AF9013_TUNER_QT1010A:
1230 ret = dvb_attach(qt1010_attach, adap->fe, i2c_adap, 1198 ret = dvb_attach(qt1010_attach, adap->fe, &adap->dev->i2c_adap,
1231 &af9015_qt1010_config) == NULL ? -ENODEV : 0; 1199 &af9015_qt1010_config) == NULL ? -ENODEV : 0;
1232 break; 1200 break;
1233 case AF9013_TUNER_TDA18271: 1201 case AF9013_TUNER_TDA18271:
1234 ret = dvb_attach(tda18271_attach, adap->fe, 0xc0, i2c_adap, 1202 ret = dvb_attach(tda18271_attach, adap->fe, 0xc0,
1203 &adap->dev->i2c_adap,
1235 &af9015_tda18271_config) == NULL ? -ENODEV : 0; 1204 &af9015_tda18271_config) == NULL ? -ENODEV : 0;
1236 break; 1205 break;
1237 case AF9013_TUNER_TDA18218: 1206 case AF9013_TUNER_TDA18218:
1238 ret = dvb_attach(tda18218_attach, adap->fe, i2c_adap, 1207 ret = dvb_attach(tda18218_attach, adap->fe,
1208 &adap->dev->i2c_adap,
1239 &af9015_tda18218_config) == NULL ? -ENODEV : 0; 1209 &af9015_tda18218_config) == NULL ? -ENODEV : 0;
1240 break; 1210 break;
1241 case AF9013_TUNER_MXL5003D: 1211 case AF9013_TUNER_MXL5003D:
1242 ret = dvb_attach(mxl5005s_attach, adap->fe, i2c_adap, 1212 ret = dvb_attach(mxl5005s_attach, adap->fe,
1213 &adap->dev->i2c_adap,
1243 &af9015_mxl5003_config) == NULL ? -ENODEV : 0; 1214 &af9015_mxl5003_config) == NULL ? -ENODEV : 0;
1244 break; 1215 break;
1245 case AF9013_TUNER_MXL5005D: 1216 case AF9013_TUNER_MXL5005D:
1246 case AF9013_TUNER_MXL5005R: 1217 case AF9013_TUNER_MXL5005R:
1247 ret = dvb_attach(mxl5005s_attach, adap->fe, i2c_adap, 1218 ret = dvb_attach(mxl5005s_attach, adap->fe,
1219 &adap->dev->i2c_adap,
1248 &af9015_mxl5005_config) == NULL ? -ENODEV : 0; 1220 &af9015_mxl5005_config) == NULL ? -ENODEV : 0;
1249 break; 1221 break;
1250 case AF9013_TUNER_ENV77H11D5: 1222 case AF9013_TUNER_ENV77H11D5:
1251 ret = dvb_attach(dvb_pll_attach, adap->fe, 0xc0, i2c_adap, 1223 ret = dvb_attach(dvb_pll_attach, adap->fe, 0xc0,
1224 &adap->dev->i2c_adap,
1252 DVB_PLL_TDA665X) == NULL ? -ENODEV : 0; 1225 DVB_PLL_TDA665X) == NULL ? -ENODEV : 0;
1253 break; 1226 break;
1254 case AF9013_TUNER_MC44S803: 1227 case AF9013_TUNER_MC44S803:
1255 ret = dvb_attach(mc44s803_attach, adap->fe, i2c_adap, 1228 ret = dvb_attach(mc44s803_attach, adap->fe,
1229 &adap->dev->i2c_adap,
1256 &af9015_mc44s803_config) == NULL ? -ENODEV : 0; 1230 &af9015_mc44s803_config) == NULL ? -ENODEV : 0;
1257 break; 1231 break;
1258 case AF9013_TUNER_MXL5007T: 1232 case AF9013_TUNER_MXL5007T:
1259 ret = dvb_attach(mxl5007t_attach, adap->fe, i2c_adap, 1233 ret = dvb_attach(mxl5007t_attach, adap->fe,
1234 &adap->dev->i2c_adap,
1260 0xc0, &af9015_mxl5007t_config) == NULL ? -ENODEV : 0; 1235 0xc0, &af9015_mxl5007t_config) == NULL ? -ENODEV : 0;
1261 break; 1236 break;
1262 case AF9013_TUNER_UNKNOWN: 1237 case AF9013_TUNER_UNKNOWN:
@@ -1309,6 +1284,7 @@ static struct usb_device_id af9015_usb_table[] = {
1309 USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC)}, 1284 USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC)},
1310/* 35 */{USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A850T)}, 1285/* 35 */{USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A850T)},
1311 {USB_DEVICE(USB_VID_GTEK, USB_PID_TINYTWIN_3)}, 1286 {USB_DEVICE(USB_VID_GTEK, USB_PID_TINYTWIN_3)},
1287 {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV22)},
1312 {0}, 1288 {0},
1313}; 1289};
1314MODULE_DEVICE_TABLE(usb, af9015_usb_table); 1290MODULE_DEVICE_TABLE(usb, af9015_usb_table);
@@ -1502,7 +1478,7 @@ static struct dvb_usb_device_properties af9015_properties[] = {
1502 1478
1503 .i2c_algo = &af9015_i2c_algo, 1479 .i2c_algo = &af9015_i2c_algo,
1504 1480
1505 .num_device_descs = 9, /* check max from dvb-usb.h */ 1481 .num_device_descs = 10, /* check max from dvb-usb.h */
1506 .devices = { 1482 .devices = {
1507 { 1483 {
1508 .name = "Xtensions XD-380", 1484 .name = "Xtensions XD-380",
@@ -1554,6 +1530,11 @@ static struct dvb_usb_device_properties af9015_properties[] = {
1554 .cold_ids = {&af9015_usb_table[20], NULL}, 1530 .cold_ids = {&af9015_usb_table[20], NULL},
1555 .warm_ids = {NULL}, 1531 .warm_ids = {NULL},
1556 }, 1532 },
1533 {
1534 .name = "Sveon STV22 Dual USB DVB-T Tuner HDTV",
1535 .cold_ids = {&af9015_usb_table[37], NULL},
1536 .warm_ids = {NULL},
1537 },
1557 } 1538 }
1558 }, { 1539 }, {
1559 .caps = DVB_USB_IS_AN_I2C_ADAPTER, 1540 .caps = DVB_USB_IS_AN_I2C_ADAPTER,
@@ -1704,33 +1685,11 @@ static int af9015_usb_probe(struct usb_interface *intf,
1704 return ret; 1685 return ret;
1705} 1686}
1706 1687
1707static void af9015_i2c_exit(struct dvb_usb_device *d)
1708{
1709 struct af9015_state *state = d->priv;
1710 deb_info("%s:\n", __func__);
1711
1712 /* remove 2nd I2C adapter */
1713 if (d->state & DVB_USB_STATE_I2C)
1714 i2c_del_adapter(&state->i2c_adap);
1715}
1716
1717static void af9015_usb_device_exit(struct usb_interface *intf)
1718{
1719 struct dvb_usb_device *d = usb_get_intfdata(intf);
1720 deb_info("%s:\n", __func__);
1721
1722 /* remove 2nd I2C adapter */
1723 if (d != NULL && d->desc != NULL)
1724 af9015_i2c_exit(d);
1725
1726 dvb_usb_device_exit(intf);
1727}
1728
1729/* usb specific object needed to register this driver with the usb subsystem */ 1688/* usb specific object needed to register this driver with the usb subsystem */
1730static struct usb_driver af9015_usb_driver = { 1689static struct usb_driver af9015_usb_driver = {
1731 .name = "dvb_usb_af9015", 1690 .name = "dvb_usb_af9015",
1732 .probe = af9015_usb_probe, 1691 .probe = af9015_usb_probe,
1733 .disconnect = af9015_usb_device_exit, 1692 .disconnect = dvb_usb_device_exit,
1734 .id_table = af9015_usb_table, 1693 .id_table = af9015_usb_table,
1735}; 1694};
1736 1695
diff --git a/drivers/media/dvb/dvb-usb/af9015.h b/drivers/media/dvb/dvb-usb/af9015.h
index beb3004f00ba..6252ea6c1904 100644
--- a/drivers/media/dvb/dvb-usb/af9015.h
+++ b/drivers/media/dvb/dvb-usb/af9015.h
@@ -99,7 +99,6 @@ enum af9015_ir_mode {
99}; 99};
100 100
101struct af9015_state { 101struct af9015_state {
102 struct i2c_adapter i2c_adap; /* I2C adapter for 2nd FE */
103 u8 rc_repeat; 102 u8 rc_repeat;
104 u32 rc_keycode; 103 u32 rc_keycode;
105 u8 rc_last[4]; 104 u8 rc_last[4];
diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c
index 7c327b54308e..2cbf19a52e38 100644
--- a/drivers/media/dvb/dvb-usb/anysee.c
+++ b/drivers/media/dvb/dvb-usb/anysee.c
@@ -347,15 +347,17 @@ static struct isl6423_config anysee_isl6423_config = {
347 * PCB: ? 347 * PCB: ?
348 * parts: DNOS404ZH102A(MT352, DTT7579(?)) 348 * parts: DNOS404ZH102A(MT352, DTT7579(?))
349 * 349 *
350 * E30 VID=04b4 PID=861f HW=2 FW=2.1 Product=???????? 350 * E30 VID=04b4 PID=861f HW=2 FW=2.1 "anysee-T(LP)"
351 * PCB: ? 351 * PCB: PCB 507T (rev1.61)
352 * parts: DNOS404ZH103A(ZL10353, DTT7579(?)) 352 * parts: DNOS404ZH103A(ZL10353, DTT7579(?))
353 * OEA=0a OEB=00 OEC=00 OED=ff OEE=00
354 * IOA=45 IOB=ff IOC=00 IOD=ff IOE=00
353 * 355 *
354 * E30 Plus VID=04b4 PID=861f HW=6 FW=1.0 "anysee" 356 * E30 Plus VID=04b4 PID=861f HW=6 FW=1.0 "anysee"
355 * PCB: 507CD (rev1.1) 357 * PCB: 507CD (rev1.1)
356 * parts: DNOS404ZH103A(ZL10353, DTT7579(?)), CST56I01 358 * parts: DNOS404ZH103A(ZL10353, DTT7579(?)), CST56I01
357 * OEA=80 OEB=00 OEC=00 OED=ff OEF=fe 359 * OEA=80 OEB=00 OEC=00 OED=ff OEE=fe
358 * IOA=4f IOB=ff IOC=00 IOD=06 IOF=01 360 * IOA=4f IOB=ff IOC=00 IOD=06 IOE=01
359 * IOD[0] ZL10353 1=enabled 361 * IOD[0] ZL10353 1=enabled
360 * IOA[7] TS 0=enabled 362 * IOA[7] TS 0=enabled
361 * tuner is not behind ZL10353 I2C-gate (no care if gate disabled or not) 363 * tuner is not behind ZL10353 I2C-gate (no care if gate disabled or not)
@@ -363,30 +365,30 @@ static struct isl6423_config anysee_isl6423_config = {
363 * E30 C Plus VID=04b4 PID=861f HW=10 FW=1.0 "anysee-DC(LP)" 365 * E30 C Plus VID=04b4 PID=861f HW=10 FW=1.0 "anysee-DC(LP)"
364 * PCB: 507DC (rev0.2) 366 * PCB: 507DC (rev0.2)
365 * parts: TDA10023, DTOS403IH102B TM, CST56I01 367 * parts: TDA10023, DTOS403IH102B TM, CST56I01
366 * OEA=80 OEB=00 OEC=00 OED=ff OEF=fe 368 * OEA=80 OEB=00 OEC=00 OED=ff OEE=fe
367 * IOA=4f IOB=ff IOC=00 IOD=26 IOF=01 369 * IOA=4f IOB=ff IOC=00 IOD=26 IOE=01
368 * IOD[0] TDA10023 1=enabled 370 * IOD[0] TDA10023 1=enabled
369 * 371 *
370 * E30 S2 Plus VID=04b4 PID=861f HW=11 FW=0.1 "anysee-S2(LP)" 372 * E30 S2 Plus VID=04b4 PID=861f HW=11 FW=0.1 "anysee-S2(LP)"
371 * PCB: 507SI (rev2.1) 373 * PCB: 507SI (rev2.1)
372 * parts: BS2N10WCC01(CX24116, CX24118), ISL6423, TDA8024 374 * parts: BS2N10WCC01(CX24116, CX24118), ISL6423, TDA8024
373 * OEA=80 OEB=00 OEC=ff OED=ff OEF=fe 375 * OEA=80 OEB=00 OEC=ff OED=ff OEE=fe
374 * IOA=4d IOB=ff IOC=00 IOD=26 IOF=01 376 * IOA=4d IOB=ff IOC=00 IOD=26 IOE=01
375 * IOD[0] CX24116 1=enabled 377 * IOD[0] CX24116 1=enabled
376 * 378 *
377 * E30 C Plus VID=1c73 PID=861f HW=15 FW=1.2 "anysee-FA(LP)" 379 * E30 C Plus VID=1c73 PID=861f HW=15 FW=1.2 "anysee-FA(LP)"
378 * PCB: 507FA (rev0.4) 380 * PCB: 507FA (rev0.4)
379 * parts: TDA10023, DTOS403IH102B TM, TDA8024 381 * parts: TDA10023, DTOS403IH102B TM, TDA8024
380 * OEA=80 OEB=00 OEC=ff OED=ff OEF=ff 382 * OEA=80 OEB=00 OEC=ff OED=ff OEE=ff
381 * IOA=4d IOB=ff IOC=00 IOD=00 IOF=c0 383 * IOA=4d IOB=ff IOC=00 IOD=00 IOE=c0
382 * IOD[5] TDA10023 1=enabled 384 * IOD[5] TDA10023 1=enabled
383 * IOE[0] tuner 1=enabled 385 * IOE[0] tuner 1=enabled
384 * 386 *
385 * E30 Combo Plus VID=1c73 PID=861f HW=15 FW=1.2 "anysee-FA(LP)" 387 * E30 Combo Plus VID=1c73 PID=861f HW=15 FW=1.2 "anysee-FA(LP)"
386 * PCB: 507FA (rev1.1) 388 * PCB: 507FA (rev1.1)
387 * parts: ZL10353, TDA10023, DTOS403IH102B TM, TDA8024 389 * parts: ZL10353, TDA10023, DTOS403IH102B TM, TDA8024
388 * OEA=80 OEB=00 OEC=ff OED=ff OEF=ff 390 * OEA=80 OEB=00 OEC=ff OED=ff OEE=ff
389 * IOA=4d IOB=ff IOC=00 IOD=00 IOF=c0 391 * IOA=4d IOB=ff IOC=00 IOD=00 IOE=c0
390 * DVB-C: 392 * DVB-C:
391 * IOD[5] TDA10023 1=enabled 393 * IOD[5] TDA10023 1=enabled
392 * IOE[0] tuner 1=enabled 394 * IOE[0] tuner 1=enabled
@@ -398,8 +400,8 @@ static struct isl6423_config anysee_isl6423_config = {
398 * E7 TC VID=1c73 PID=861f HW=18 FW=0.7 AMTCI=0.5 "anysee-E7TC(LP)" 400 * E7 TC VID=1c73 PID=861f HW=18 FW=0.7 AMTCI=0.5 "anysee-E7TC(LP)"
399 * PCB: 508TC (rev0.6) 401 * PCB: 508TC (rev0.6)
400 * parts: ZL10353, TDA10023, DNOD44CDH086A(TDA18212) 402 * parts: ZL10353, TDA10023, DNOD44CDH086A(TDA18212)
401 * OEA=80 OEB=00 OEC=03 OED=f7 OEF=ff 403 * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff
402 * IOA=4d IOB=00 IOC=cc IOD=48 IOF=e4 404 * IOA=4d IOB=00 IOC=cc IOD=48 IOE=e4
403 * IOA[7] TS 1=enabled 405 * IOA[7] TS 1=enabled
404 * IOE[4] TDA18212 1=enabled 406 * IOE[4] TDA18212 1=enabled
405 * DVB-C: 407 * DVB-C:
@@ -414,11 +416,34 @@ static struct isl6423_config anysee_isl6423_config = {
414 * E7 S2 VID=1c73 PID=861f HW=19 FW=0.4 AMTCI=0.5 "anysee-E7S2(LP)" 416 * E7 S2 VID=1c73 PID=861f HW=19 FW=0.4 AMTCI=0.5 "anysee-E7S2(LP)"
415 * PCB: 508S2 (rev0.7) 417 * PCB: 508S2 (rev0.7)
416 * parts: DNBU10512IST(STV0903, STV6110), ISL6423 418 * parts: DNBU10512IST(STV0903, STV6110), ISL6423
417 * OEA=80 OEB=00 OEC=03 OED=f7 OEF=ff 419 * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff
418 * IOA=4d IOB=00 IOC=c4 IOD=08 IOF=e4 420 * IOA=4d IOB=00 IOC=c4 IOD=08 IOE=e4
419 * IOA[7] TS 1=enabled 421 * IOA[7] TS 1=enabled
420 * IOE[5] STV0903 1=enabled 422 * IOE[5] STV0903 1=enabled
421 * 423 *
424 * E7 PTC VID=1c73 PID=861f HW=21 FW=0.1 AMTCI=?? "anysee-E7PTC(LP)"
425 * PCB: 508PTC (rev0.5)
426 * parts: ZL10353, TDA10023, DNOD44CDH086A(TDA18212)
427 * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff
428 * IOA=4d IOB=00 IOC=cc IOD=48 IOE=e4
429 * IOA[7] TS 1=enabled
430 * IOE[4] TDA18212 1=enabled
431 * DVB-C:
432 * IOD[6] ZL10353 0=disabled
433 * IOD[5] TDA10023 1=enabled
434 * IOE[0] IF 1=enabled
435 * DVB-T:
436 * IOD[5] TDA10023 0=disabled
437 * IOD[6] ZL10353 1=enabled
438 * IOE[0] IF 0=enabled
439 *
440 * E7 S2 VID=1c73 PID=861f HW=22 FW=0.1 AMTCI=?? "anysee-E7PS2(LP)"
441 * PCB: 508PS2 (rev0.4)
442 * parts: DNBU10512IST(STV0903, STV6110), ISL6423
443 * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff
444 * IOA=4d IOB=00 IOC=c4 IOD=08 IOE=e4
445 * IOA[7] TS 1=enabled
446 * IOE[5] STV0903 1=enabled
422 */ 447 */
423 448
424static int anysee_frontend_attach(struct dvb_usb_adapter *adap) 449static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
@@ -459,7 +484,7 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
459 state->hw = hw_info[0]; 484 state->hw = hw_info[0];
460 485
461 switch (state->hw) { 486 switch (state->hw) {
462 case ANYSEE_HW_02: /* 2 */ 487 case ANYSEE_HW_507T: /* 2 */
463 /* E30 */ 488 /* E30 */
464 489
465 /* attach demod */ 490 /* attach demod */
@@ -593,7 +618,9 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
593 618
594 break; 619 break;
595 case ANYSEE_HW_508TC: /* 18 */ 620 case ANYSEE_HW_508TC: /* 18 */
621 case ANYSEE_HW_508PTC: /* 21 */
596 /* E7 TC */ 622 /* E7 TC */
623 /* E7 PTC */
597 624
598 /* enable transport stream on IOA[7] */ 625 /* enable transport stream on IOA[7] */
599 ret = anysee_wr_reg_mask(adap->dev, REG_IOA, (1 << 7), 0x80); 626 ret = anysee_wr_reg_mask(adap->dev, REG_IOA, (1 << 7), 0x80);
@@ -650,7 +677,9 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
650 677
651 break; 678 break;
652 case ANYSEE_HW_508S2: /* 19 */ 679 case ANYSEE_HW_508S2: /* 19 */
680 case ANYSEE_HW_508PS2: /* 22 */
653 /* E7 S2 */ 681 /* E7 S2 */
682 /* E7 PS2 */
654 683
655 /* enable transport stream on IOA[7] */ 684 /* enable transport stream on IOA[7] */
656 ret = anysee_wr_reg_mask(adap->dev, REG_IOA, (1 << 7), 0x80); 685 ret = anysee_wr_reg_mask(adap->dev, REG_IOA, (1 << 7), 0x80);
@@ -687,7 +716,7 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
687 deb_info("%s:\n", __func__); 716 deb_info("%s:\n", __func__);
688 717
689 switch (state->hw) { 718 switch (state->hw) {
690 case ANYSEE_HW_02: /* 2 */ 719 case ANYSEE_HW_507T: /* 2 */
691 /* E30 */ 720 /* E30 */
692 721
693 /* attach tuner */ 722 /* attach tuner */
@@ -762,7 +791,9 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
762 791
763 break; 792 break;
764 case ANYSEE_HW_508TC: /* 18 */ 793 case ANYSEE_HW_508TC: /* 18 */
794 case ANYSEE_HW_508PTC: /* 21 */
765 /* E7 TC */ 795 /* E7 TC */
796 /* E7 PTC */
766 797
767 /* enable tuner on IOE[4] */ 798 /* enable tuner on IOE[4] */
768 ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 4), 0x10); 799 ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 4), 0x10);
@@ -775,7 +806,9 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
775 806
776 break; 807 break;
777 case ANYSEE_HW_508S2: /* 19 */ 808 case ANYSEE_HW_508S2: /* 19 */
809 case ANYSEE_HW_508PS2: /* 22 */
778 /* E7 S2 */ 810 /* E7 S2 */
811 /* E7 PS2 */
779 812
780 /* attach tuner */ 813 /* attach tuner */
781 fe = dvb_attach(stv6110_attach, adap->fe, 814 fe = dvb_attach(stv6110_attach, adap->fe,
diff --git a/drivers/media/dvb/dvb-usb/anysee.h b/drivers/media/dvb/dvb-usb/anysee.h
index a7673aa1e007..ad6ccd1ea2d9 100644
--- a/drivers/media/dvb/dvb-usb/anysee.h
+++ b/drivers/media/dvb/dvb-usb/anysee.h
@@ -61,13 +61,15 @@ struct anysee_state {
61 u8 seq; 61 u8 seq;
62}; 62};
63 63
64#define ANYSEE_HW_02 2 /* E30 */ 64#define ANYSEE_HW_507T 2 /* E30 */
65#define ANYSEE_HW_507CD 6 /* E30 Plus */ 65#define ANYSEE_HW_507CD 6 /* E30 Plus */
66#define ANYSEE_HW_507DC 10 /* E30 C Plus */ 66#define ANYSEE_HW_507DC 10 /* E30 C Plus */
67#define ANYSEE_HW_507SI 11 /* E30 S2 Plus */ 67#define ANYSEE_HW_507SI 11 /* E30 S2 Plus */
68#define ANYSEE_HW_507FA 15 /* E30 Combo Plus / E30 C Plus */ 68#define ANYSEE_HW_507FA 15 /* E30 Combo Plus / E30 C Plus */
69#define ANYSEE_HW_508TC 18 /* E7 TC */ 69#define ANYSEE_HW_508TC 18 /* E7 TC */
70#define ANYSEE_HW_508S2 19 /* E7 S2 */ 70#define ANYSEE_HW_508S2 19 /* E7 S2 */
71#define ANYSEE_HW_508PTC 21 /* E7 PTC Plus */
72#define ANYSEE_HW_508PS2 22 /* E7 PS2 Plus */
71 73
72#define REG_IOA 0x80 /* Port A (bit addressable) */ 74#define REG_IOA 0x80 /* Port A (bit addressable) */
73#define REG_IOB 0x90 /* Port B (bit addressable) */ 75#define REG_IOB 0x90 /* Port B (bit addressable) */
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index c519ad5eb731..d0ea5b64f6b4 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -17,6 +17,7 @@
17#include "mt2266.h" 17#include "mt2266.h"
18#include "tuner-xc2028.h" 18#include "tuner-xc2028.h"
19#include "xc5000.h" 19#include "xc5000.h"
20#include "xc4000.h"
20#include "s5h1411.h" 21#include "s5h1411.h"
21#include "dib0070.h" 22#include "dib0070.h"
22#include "dib0090.h" 23#include "dib0090.h"
@@ -2655,6 +2656,156 @@ static int xc5000_tuner_attach(struct dvb_usb_adapter *adap)
2655 == NULL ? -ENODEV : 0; 2656 == NULL ? -ENODEV : 0;
2656} 2657}
2657 2658
2659static int dib0700_xc4000_tuner_callback(void *priv, int component,
2660 int command, int arg)
2661{
2662 struct dvb_usb_adapter *adap = priv;
2663
2664 if (command == XC4000_TUNER_RESET) {
2665 /* Reset the tuner */
2666 dib7000p_set_gpio(adap->fe, 8, 0, 0);
2667 msleep(10);
2668 dib7000p_set_gpio(adap->fe, 8, 0, 1);
2669 } else {
2670 err("xc4000: unknown tuner callback command: %d\n", command);
2671 return -EINVAL;
2672 }
2673
2674 return 0;
2675}
2676
2677static struct dibx000_agc_config stk7700p_7000p_xc4000_agc_config = {
2678 .band_caps = BAND_UHF | BAND_VHF,
2679 .setup = 0x64,
2680 .inv_gain = 0x02c8,
2681 .time_stabiliz = 0x15,
2682 .alpha_level = 0x00,
2683 .thlock = 0x76,
2684 .wbd_inv = 0x01,
2685 .wbd_ref = 0x0b33,
2686 .wbd_sel = 0x00,
2687 .wbd_alpha = 0x02,
2688 .agc1_max = 0x00,
2689 .agc1_min = 0x00,
2690 .agc2_max = 0x9b26,
2691 .agc2_min = 0x26ca,
2692 .agc1_pt1 = 0x00,
2693 .agc1_pt2 = 0x00,
2694 .agc1_pt3 = 0x00,
2695 .agc1_slope1 = 0x00,
2696 .agc1_slope2 = 0x00,
2697 .agc2_pt1 = 0x00,
2698 .agc2_pt2 = 0x80,
2699 .agc2_slope1 = 0x1d,
2700 .agc2_slope2 = 0x1d,
2701 .alpha_mant = 0x11,
2702 .alpha_exp = 0x1b,
2703 .beta_mant = 0x17,
2704 .beta_exp = 0x33,
2705 .perform_agc_softsplit = 0x00,
2706};
2707
2708static struct dibx000_bandwidth_config stk7700p_xc4000_pll_config = {
2709 60000, 30000, /* internal, sampling */
2710 1, 8, 3, 1, 0, /* pll_cfg: prediv, ratio, range, reset, bypass */
2711 0, 0, 1, 1, 0, /* misc: refdiv, bypclk_div, IO_CLK_en_core, */
2712 /* ADClkSrc, modulo */
2713 (3 << 14) | (1 << 12) | 524, /* sad_cfg: refsel, sel, freq_15k */
2714 39370534, /* ifreq */
2715 20452225, /* timf */
2716 30000000 /* xtal */
2717};
2718
2719/* FIXME: none of these inputs are validated yet */
2720static struct dib7000p_config pctv_340e_config = {
2721 .output_mpeg2_in_188_bytes = 1,
2722
2723 .agc_config_count = 1,
2724 .agc = &stk7700p_7000p_xc4000_agc_config,
2725 .bw = &stk7700p_xc4000_pll_config,
2726
2727 .gpio_dir = DIB7000M_GPIO_DEFAULT_DIRECTIONS,
2728 .gpio_val = DIB7000M_GPIO_DEFAULT_VALUES,
2729 .gpio_pwm_pos = DIB7000M_GPIO_DEFAULT_PWM_POS,
2730};
2731
2732/* PCTV 340e GPIOs map:
2733 dib0700:
2734 GPIO2 - CX25843 sleep
2735 GPIO3 - CS5340 reset
2736 GPIO5 - IRD
2737 GPIO6 - Power Supply
2738 GPIO8 - LNA (1=off 0=on)
2739 GPIO10 - CX25843 reset
2740 dib7000:
2741 GPIO8 - xc4000 reset
2742 */
2743static int pctv340e_frontend_attach(struct dvb_usb_adapter *adap)
2744{
2745 struct dib0700_state *st = adap->dev->priv;
2746
2747 /* Power Supply on */
2748 dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0);
2749 msleep(50);
2750 dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
2751 msleep(100); /* Allow power supply to settle before probing */
2752
2753 /* cx25843 reset */
2754 dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
2755 msleep(1); /* cx25843 datasheet say 350us required */
2756 dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
2757
2758 /* LNA off for now */
2759 dib0700_set_gpio(adap->dev, GPIO8, GPIO_OUT, 1);
2760
2761 /* Put the CX25843 to sleep for now since we're in digital mode */
2762 dib0700_set_gpio(adap->dev, GPIO2, GPIO_OUT, 1);
2763
2764 /* FIXME: not verified yet */
2765 dib0700_ctrl_clock(adap->dev, 72, 1);
2766
2767 msleep(500);
2768
2769 if (dib7000pc_detection(&adap->dev->i2c_adap) == 0) {
2770 /* Demodulator not found for some reason? */
2771 return -ENODEV;
2772 }
2773
2774 adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x12,
2775 &pctv_340e_config);
2776 st->is_dib7000pc = 1;
2777
2778 return adap->fe == NULL ? -ENODEV : 0;
2779}
2780
2781static struct xc4000_config dib7000p_xc4000_tunerconfig = {
2782 .i2c_address = 0x61,
2783 .default_pm = 1,
2784 .dvb_amplitude = 0,
2785 .set_smoothedcvbs = 0,
2786 .if_khz = 5400
2787};
2788
2789static int xc4000_tuner_attach(struct dvb_usb_adapter *adap)
2790{
2791 struct i2c_adapter *tun_i2c;
2792
2793 /* The xc4000 is not on the main i2c bus */
2794 tun_i2c = dib7000p_get_i2c_master(adap->fe,
2795 DIBX000_I2C_INTERFACE_TUNER, 1);
2796 if (tun_i2c == NULL) {
2797 printk(KERN_ERR "Could not reach tuner i2c bus\n");
2798 return 0;
2799 }
2800
2801 /* Setup the reset callback */
2802 adap->fe->callback = dib0700_xc4000_tuner_callback;
2803
2804 return dvb_attach(xc4000_attach, adap->fe, tun_i2c,
2805 &dib7000p_xc4000_tunerconfig)
2806 == NULL ? -ENODEV : 0;
2807}
2808
2658static struct lgdt3305_config hcw_lgdt3305_config = { 2809static struct lgdt3305_config hcw_lgdt3305_config = {
2659 .i2c_addr = 0x0e, 2810 .i2c_addr = 0x0e,
2660 .mpeg_mode = LGDT3305_MPEG_PARALLEL, 2811 .mpeg_mode = LGDT3305_MPEG_PARALLEL,
@@ -2802,6 +2953,8 @@ struct usb_device_id dib0700_usb_id_table[] = {
2802 { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_TFE7090PVR) }, 2953 { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_TFE7090PVR) },
2803 { USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_AIRSTAR_TELESTICK_2) }, 2954 { USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_AIRSTAR_TELESTICK_2) },
2804/* 75 */{ USB_DEVICE(USB_VID_MEDION, USB_PID_CREATIX_CTX1921) }, 2955/* 75 */{ USB_DEVICE(USB_VID_MEDION, USB_PID_CREATIX_CTX1921) },
2956 { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV340E) },
2957 { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV340E_SE) },
2805 { 0 } /* Terminating entry */ 2958 { 0 } /* Terminating entry */
2806}; 2959};
2807MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table); 2960MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
@@ -3772,6 +3925,41 @@ struct dvb_usb_device_properties dib0700_devices[] = {
3772 RC_TYPE_NEC, 3925 RC_TYPE_NEC,
3773 .change_protocol = dib0700_change_protocol, 3926 .change_protocol = dib0700_change_protocol,
3774 }, 3927 },
3928 }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
3929 .num_adapters = 1,
3930 .adapter = {
3931 {
3932 .frontend_attach = pctv340e_frontend_attach,
3933 .tuner_attach = xc4000_tuner_attach,
3934
3935 DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
3936
3937 .size_of_priv = sizeof(struct
3938 dib0700_adapter_state),
3939 },
3940 },
3941
3942 .num_device_descs = 2,
3943 .devices = {
3944 { "Pinnacle PCTV 340e HD Pro USB Stick",
3945 { &dib0700_usb_id_table[76], NULL },
3946 { NULL },
3947 },
3948 { "Pinnacle PCTV Hybrid Stick Solo",
3949 { &dib0700_usb_id_table[77], NULL },
3950 { NULL },
3951 },
3952 },
3953 .rc.core = {
3954 .rc_interval = DEFAULT_RC_INTERVAL,
3955 .rc_codes = RC_MAP_DIB0700_RC5_TABLE,
3956 .module_name = "dib0700",
3957 .rc_query = dib0700_rc_query_old_firmware,
3958 .allowed_protos = RC_TYPE_RC5 |
3959 RC_TYPE_RC6 |
3960 RC_TYPE_NEC,
3961 .change_protocol = dib0700_change_protocol,
3962 },
3775 }, 3963 },
3776}; 3964};
3777 3965
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index 21b15495d2d7..2a79b8fb3e8d 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -230,6 +230,8 @@
230#define USB_PID_PINNACLE_PCTV310E 0x3211 230#define USB_PID_PINNACLE_PCTV310E 0x3211
231#define USB_PID_PINNACLE_PCTV801E 0x023a 231#define USB_PID_PINNACLE_PCTV801E 0x023a
232#define USB_PID_PINNACLE_PCTV801E_SE 0x023b 232#define USB_PID_PINNACLE_PCTV801E_SE 0x023b
233#define USB_PID_PINNACLE_PCTV340E 0x023d
234#define USB_PID_PINNACLE_PCTV340E_SE 0x023e
233#define USB_PID_PINNACLE_PCTV73A 0x0243 235#define USB_PID_PINNACLE_PCTV73A 0x0243
234#define USB_PID_PINNACLE_PCTV73ESE 0x0245 236#define USB_PID_PINNACLE_PCTV73ESE 0x0245
235#define USB_PID_PINNACLE_PCTV74E 0x0246 237#define USB_PID_PINNACLE_PCTV74E 0x0246
@@ -313,6 +315,7 @@
313#define USB_PID_FRIIO_WHITE 0x0001 315#define USB_PID_FRIIO_WHITE 0x0001
314#define USB_PID_TVWAY_PLUS 0x0002 316#define USB_PID_TVWAY_PLUS 0x0002
315#define USB_PID_SVEON_STV20 0xe39d 317#define USB_PID_SVEON_STV20 0xe39d
318#define USB_PID_SVEON_STV22 0xe401
316#define USB_PID_AZUREWAVE_AZ6027 0x3275 319#define USB_PID_AZUREWAVE_AZ6027 0x3275
317#define USB_PID_TERRATEC_DVBS2CI_V1 0x10a4 320#define USB_PID_TERRATEC_DVBS2CI_V1 0x10a4
318#define USB_PID_TERRATEC_DVBS2CI_V2 0x10ac 321#define USB_PID_TERRATEC_DVBS2CI_V2 0x10ac
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb.h b/drivers/media/dvb/dvb-usb/dvb-usb.h
index 76a80968482a..7d35d078342b 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb.h
@@ -85,7 +85,7 @@ static inline u8 rc5_data(struct rc_map_table *key)
85 return key->scancode & 0xff; 85 return key->scancode & 0xff;
86} 86}
87 87
88static inline u8 rc5_scan(struct rc_map_table *key) 88static inline u16 rc5_scan(struct rc_map_table *key)
89{ 89{
90 return key->scancode & 0xffff; 90 return key->scancode & 0xffff;
91} 91}
diff --git a/drivers/media/dvb/dvb-usb/gp8psk.h b/drivers/media/dvb/dvb-usb/gp8psk.h
index 831749a518cb..ed32b9da4843 100644
--- a/drivers/media/dvb/dvb-usb/gp8psk.h
+++ b/drivers/media/dvb/dvb-usb/gp8psk.h
@@ -78,9 +78,6 @@ extern int dvb_usb_gp8psk_debug;
78#define ADV_MOD_DVB_BPSK 9 /* DVB-S BPSK */ 78#define ADV_MOD_DVB_BPSK 9 /* DVB-S BPSK */
79 79
80#define GET_USB_SPEED 0x07 80#define GET_USB_SPEED 0x07
81 #define USB_SPEED_LOW 0
82 #define USB_SPEED_FULL 1
83 #define USB_SPEED_HIGH 2
84 81
85#define RESET_FX2 0x13 82#define RESET_FX2 0x13
86 83
diff --git a/drivers/media/dvb/dvb-usb/technisat-usb2.c b/drivers/media/dvb/dvb-usb/technisat-usb2.c
index 08f8842ad280..473b95ed4d52 100644
--- a/drivers/media/dvb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/dvb/dvb-usb/technisat-usb2.c
@@ -765,10 +765,8 @@ static void technisat_usb2_disconnect(struct usb_interface *intf)
765 /* work and stuff was only created when the device is is hot-state */ 765 /* work and stuff was only created when the device is is hot-state */
766 if (dev != NULL) { 766 if (dev != NULL) {
767 struct technisat_usb2_state *state = dev->priv; 767 struct technisat_usb2_state *state = dev->priv;
768 if (state != NULL) { 768 if (state != NULL)
769 cancel_delayed_work_sync(&state->green_led_work); 769 cancel_delayed_work_sync(&state->green_led_work);
770 flush_scheduled_work();
771 }
772 } 770 }
773 771
774 dvb_usb_device_exit(intf); 772 dvb_usb_device_exit(intf);
diff --git a/drivers/media/dvb/dvb-usb/vp7045.h b/drivers/media/dvb/dvb-usb/vp7045.h
index 969688f85267..cf5ec46f8bb1 100644
--- a/drivers/media/dvb/dvb-usb/vp7045.h
+++ b/drivers/media/dvb/dvb-usb/vp7045.h
@@ -36,9 +36,6 @@
36 #define Tuner_Power_OFF 0 36 #define Tuner_Power_OFF 0
37 37
38#define GET_USB_SPEED 0x07 38#define GET_USB_SPEED 0x07
39 #define USB_SPEED_LOW 0
40 #define USB_SPEED_FULL 1
41 #define USB_SPEED_HIGH 2
42 39
43#define LOCK_TUNER_COMMAND 0x09 40#define LOCK_TUNER_COMMAND 0x09
44 41
diff --git a/drivers/media/dvb/firewire/firedtv-avc.c b/drivers/media/dvb/firewire/firedtv-avc.c
index 21c52e3b522e..489ae8245867 100644
--- a/drivers/media/dvb/firewire/firedtv-avc.c
+++ b/drivers/media/dvb/firewire/firedtv-avc.c
@@ -1208,7 +1208,7 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
1208 if (r->response != AVC_RESPONSE_ACCEPTED) { 1208 if (r->response != AVC_RESPONSE_ACCEPTED) {
1209 dev_err(fdtv->device, 1209 dev_err(fdtv->device,
1210 "CA PMT failed with response 0x%x\n", r->response); 1210 "CA PMT failed with response 0x%x\n", r->response);
1211 ret = -EFAULT; 1211 ret = -EACCES;
1212 } 1212 }
1213out: 1213out:
1214 mutex_unlock(&fdtv->avc_mutex); 1214 mutex_unlock(&fdtv->avc_mutex);
diff --git a/drivers/media/dvb/firewire/firedtv-ci.c b/drivers/media/dvb/firewire/firedtv-ci.c
index 8ffb565f0704..e5ebdbfe8c19 100644
--- a/drivers/media/dvb/firewire/firedtv-ci.c
+++ b/drivers/media/dvb/firewire/firedtv-ci.c
@@ -45,11 +45,6 @@ static int fdtv_get_ca_flags(struct firedtv_tuner_status *stat)
45 return flags; 45 return flags;
46} 46}
47 47
48static int fdtv_ca_reset(struct firedtv *fdtv)
49{
50 return avc_ca_reset(fdtv) ? -EFAULT : 0;
51}
52
53static int fdtv_ca_get_caps(void *arg) 48static int fdtv_ca_get_caps(void *arg)
54{ 49{
55 struct ca_caps *cap = arg; 50 struct ca_caps *cap = arg;
@@ -65,12 +60,14 @@ static int fdtv_ca_get_slot_info(struct firedtv *fdtv, void *arg)
65{ 60{
66 struct firedtv_tuner_status stat; 61 struct firedtv_tuner_status stat;
67 struct ca_slot_info *slot = arg; 62 struct ca_slot_info *slot = arg;
63 int err;
68 64
69 if (avc_tuner_status(fdtv, &stat)) 65 err = avc_tuner_status(fdtv, &stat);
70 return -EFAULT; 66 if (err)
67 return err;
71 68
72 if (slot->num != 0) 69 if (slot->num != 0)
73 return -EFAULT; 70 return -EACCES;
74 71
75 slot->type = CA_CI; 72 slot->type = CA_CI;
76 slot->flags = fdtv_get_ca_flags(&stat); 73 slot->flags = fdtv_get_ca_flags(&stat);
@@ -81,21 +78,21 @@ static int fdtv_ca_app_info(struct firedtv *fdtv, void *arg)
81{ 78{
82 struct ca_msg *reply = arg; 79 struct ca_msg *reply = arg;
83 80
84 return avc_ca_app_info(fdtv, reply->msg, &reply->length) ? -EFAULT : 0; 81 return avc_ca_app_info(fdtv, reply->msg, &reply->length);
85} 82}
86 83
87static int fdtv_ca_info(struct firedtv *fdtv, void *arg) 84static int fdtv_ca_info(struct firedtv *fdtv, void *arg)
88{ 85{
89 struct ca_msg *reply = arg; 86 struct ca_msg *reply = arg;
90 87
91 return avc_ca_info(fdtv, reply->msg, &reply->length) ? -EFAULT : 0; 88 return avc_ca_info(fdtv, reply->msg, &reply->length);
92} 89}
93 90
94static int fdtv_ca_get_mmi(struct firedtv *fdtv, void *arg) 91static int fdtv_ca_get_mmi(struct firedtv *fdtv, void *arg)
95{ 92{
96 struct ca_msg *reply = arg; 93 struct ca_msg *reply = arg;
97 94
98 return avc_ca_get_mmi(fdtv, reply->msg, &reply->length) ? -EFAULT : 0; 95 return avc_ca_get_mmi(fdtv, reply->msg, &reply->length);
99} 96}
100 97
101static int fdtv_ca_get_msg(struct firedtv *fdtv, void *arg) 98static int fdtv_ca_get_msg(struct firedtv *fdtv, void *arg)
@@ -111,14 +108,15 @@ static int fdtv_ca_get_msg(struct firedtv *fdtv, void *arg)
111 err = fdtv_ca_info(fdtv, arg); 108 err = fdtv_ca_info(fdtv, arg);
112 break; 109 break;
113 default: 110 default:
114 if (avc_tuner_status(fdtv, &stat)) 111 err = avc_tuner_status(fdtv, &stat);
115 err = -EFAULT; 112 if (err)
116 else if (stat.ca_mmi == 1) 113 break;
114 if (stat.ca_mmi == 1)
117 err = fdtv_ca_get_mmi(fdtv, arg); 115 err = fdtv_ca_get_mmi(fdtv, arg);
118 else { 116 else {
119 dev_info(fdtv->device, "unhandled CA message 0x%08x\n", 117 dev_info(fdtv->device, "unhandled CA message 0x%08x\n",
120 fdtv->ca_last_command); 118 fdtv->ca_last_command);
121 err = -EFAULT; 119 err = -EACCES;
122 } 120 }
123 } 121 }
124 fdtv->ca_last_command = 0; 122 fdtv->ca_last_command = 0;
@@ -141,7 +139,7 @@ static int fdtv_ca_pmt(struct firedtv *fdtv, void *arg)
141 data_length = msg->msg[3]; 139 data_length = msg->msg[3];
142 } 140 }
143 141
144 return avc_ca_pmt(fdtv, &msg->msg[data_pos], data_length) ? -EFAULT : 0; 142 return avc_ca_pmt(fdtv, &msg->msg[data_pos], data_length);
145} 143}
146 144
147static int fdtv_ca_send_msg(struct firedtv *fdtv, void *arg) 145static int fdtv_ca_send_msg(struct firedtv *fdtv, void *arg)
@@ -170,7 +168,7 @@ static int fdtv_ca_send_msg(struct firedtv *fdtv, void *arg)
170 default: 168 default:
171 dev_err(fdtv->device, "unhandled CA message 0x%08x\n", 169 dev_err(fdtv->device, "unhandled CA message 0x%08x\n",
172 fdtv->ca_last_command); 170 fdtv->ca_last_command);
173 err = -EFAULT; 171 err = -EACCES;
174 } 172 }
175 return err; 173 return err;
176} 174}
@@ -184,7 +182,7 @@ static int fdtv_ca_ioctl(struct file *file, unsigned int cmd, void *arg)
184 182
185 switch (cmd) { 183 switch (cmd) {
186 case CA_RESET: 184 case CA_RESET:
187 err = fdtv_ca_reset(fdtv); 185 err = avc_ca_reset(fdtv);
188 break; 186 break;
189 case CA_GET_CAP: 187 case CA_GET_CAP:
190 err = fdtv_ca_get_caps(arg); 188 err = fdtv_ca_get_caps(arg);
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index 44b816f2601e..32e08e351525 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -49,6 +49,27 @@ config DVB_STV6110x
49 help 49 help
50 A Silicon tuner that supports DVB-S and DVB-S2 modes 50 A Silicon tuner that supports DVB-S and DVB-S2 modes
51 51
52comment "Multistandard (cable + terrestrial) frontends"
53 depends on DVB_CORE
54
55config DVB_DRXK
56 tristate "Micronas DRXK based"
57 depends on DVB_CORE && I2C
58 default m if DVB_FE_CUSTOMISE
59 help
60 Micronas DRX-K DVB-C/T demodulator.
61
62 Say Y when you want to support this frontend.
63
64config DVB_TDA18271C2DD
65 tristate "NXP TDA18271C2 silicon tuner"
66 depends on DVB_CORE && I2C
67 default m if DVB_FE_CUSTOMISE
68 help
69 NXP TDA18271 silicon tuner.
70
71 Say Y when you want to support this tuner.
72
52comment "DVB-S (satellite) frontends" 73comment "DVB-S (satellite) frontends"
53 depends on DVB_CORE 74 depends on DVB_CORE
54 75
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index 2f3a6f736d64..6a6ba053ead4 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -10,6 +10,7 @@ stv0900-objs = stv0900_core.o stv0900_sw.o
10au8522-objs = au8522_dig.o au8522_decoder.o 10au8522-objs = au8522_dig.o au8522_decoder.o
11drxd-objs = drxd_firm.o drxd_hard.o 11drxd-objs = drxd_firm.o drxd_hard.o
12cxd2820r-objs = cxd2820r_core.o cxd2820r_c.o cxd2820r_t.o cxd2820r_t2.o 12cxd2820r-objs = cxd2820r_core.o cxd2820r_c.o cxd2820r_t.o cxd2820r_t2.o
13drxk-objs := drxk_hard.o
13 14
14obj-$(CONFIG_DVB_PLL) += dvb-pll.o 15obj-$(CONFIG_DVB_PLL) += dvb-pll.o
15obj-$(CONFIG_DVB_STV0299) += stv0299.o 16obj-$(CONFIG_DVB_STV0299) += stv0299.o
@@ -88,4 +89,6 @@ obj-$(CONFIG_DVB_MB86A20S) += mb86a20s.o
88obj-$(CONFIG_DVB_IX2505V) += ix2505v.o 89obj-$(CONFIG_DVB_IX2505V) += ix2505v.o
89obj-$(CONFIG_DVB_STV0367) += stv0367.o 90obj-$(CONFIG_DVB_STV0367) += stv0367.o
90obj-$(CONFIG_DVB_CXD2820R) += cxd2820r.o 91obj-$(CONFIG_DVB_CXD2820R) += cxd2820r.o
92obj-$(CONFIG_DVB_DRXK) += drxk.o
93obj-$(CONFIG_DVB_TDA18271C2DD) += tda18271c2dd.o
91 94
diff --git a/drivers/media/dvb/frontends/au8522_decoder.c b/drivers/media/dvb/frontends/au8522_decoder.c
index b537891a4cc9..2b248c12f404 100644
--- a/drivers/media/dvb/frontends/au8522_decoder.c
+++ b/drivers/media/dvb/frontends/au8522_decoder.c
@@ -692,7 +692,7 @@ static int au8522_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
692 /* Interrogate the decoder to see if we are getting a real signal */ 692 /* Interrogate the decoder to see if we are getting a real signal */
693 lock_status = au8522_readreg(state, 0x00); 693 lock_status = au8522_readreg(state, 0x00);
694 if (lock_status == 0xa2) 694 if (lock_status == 0xa2)
695 vt->signal = 0x01; 695 vt->signal = 0xffff;
696 else 696 else
697 vt->signal = 0x00; 697 vt->signal = 0x00;
698 698
diff --git a/drivers/media/dvb/frontends/cx24113.c b/drivers/media/dvb/frontends/cx24113.c
index e9ee55592fd3..c341d57d5e81 100644
--- a/drivers/media/dvb/frontends/cx24113.c
+++ b/drivers/media/dvb/frontends/cx24113.c
@@ -31,8 +31,8 @@
31 31
32static int debug; 32static int debug;
33 33
34#define info(args...) do { printk(KERN_INFO "CX24113: " args); } while (0) 34#define cx_info(args...) do { printk(KERN_INFO "CX24113: " args); } while (0)
35#define err(args...) do { printk(KERN_ERR "CX24113: " args); } while (0) 35#define cx_err(args...) do { printk(KERN_ERR "CX24113: " args); } while (0)
36 36
37#define dprintk(args...) \ 37#define dprintk(args...) \
38 do { \ 38 do { \
@@ -341,7 +341,7 @@ static void cx24113_calc_pll_nf(struct cx24113_state *state, u16 *n, s32 *f)
341 } while (N < 6 && R < 3); 341 } while (N < 6 && R < 3);
342 342
343 if (N < 6) { 343 if (N < 6) {
344 err("strange frequency: N < 6\n"); 344 cx_err("strange frequency: N < 6\n");
345 return; 345 return;
346 } 346 }
347 F = freq_hz; 347 F = freq_hz;
@@ -563,7 +563,7 @@ struct dvb_frontend *cx24113_attach(struct dvb_frontend *fe,
563 kzalloc(sizeof(struct cx24113_state), GFP_KERNEL); 563 kzalloc(sizeof(struct cx24113_state), GFP_KERNEL);
564 int rc; 564 int rc;
565 if (state == NULL) { 565 if (state == NULL) {
566 err("Unable to kzalloc\n"); 566 cx_err("Unable to kzalloc\n");
567 goto error; 567 goto error;
568 } 568 }
569 569
@@ -571,7 +571,7 @@ struct dvb_frontend *cx24113_attach(struct dvb_frontend *fe,
571 state->config = config; 571 state->config = config;
572 state->i2c = i2c; 572 state->i2c = i2c;
573 573
574 info("trying to detect myself\n"); 574 cx_info("trying to detect myself\n");
575 575
576 /* making a dummy read, because of some expected troubles 576 /* making a dummy read, because of some expected troubles
577 * after power on */ 577 * after power on */
@@ -579,24 +579,24 @@ struct dvb_frontend *cx24113_attach(struct dvb_frontend *fe,
579 579
580 rc = cx24113_readreg(state, 0x00); 580 rc = cx24113_readreg(state, 0x00);
581 if (rc < 0) { 581 if (rc < 0) {
582 info("CX24113 not found.\n"); 582 cx_info("CX24113 not found.\n");
583 goto error; 583 goto error;
584 } 584 }
585 state->rev = rc; 585 state->rev = rc;
586 586
587 switch (rc) { 587 switch (rc) {
588 case 0x43: 588 case 0x43:
589 info("detected CX24113 variant\n"); 589 cx_info("detected CX24113 variant\n");
590 break; 590 break;
591 case REV_CX24113: 591 case REV_CX24113:
592 info("successfully detected\n"); 592 cx_info("successfully detected\n");
593 break; 593 break;
594 default: 594 default:
595 err("unsupported device id: %x\n", state->rev); 595 cx_err("unsupported device id: %x\n", state->rev);
596 goto error; 596 goto error;
597 } 597 }
598 state->ver = cx24113_readreg(state, 0x01); 598 state->ver = cx24113_readreg(state, 0x01);
599 info("version: %x\n", state->ver); 599 cx_info("version: %x\n", state->ver);
600 600
601 /* create dvb_frontend */ 601 /* create dvb_frontend */
602 memcpy(&fe->ops.tuner_ops, &cx24113_tuner_ops, 602 memcpy(&fe->ops.tuner_ops, &cx24113_tuner_ops,
diff --git a/drivers/media/dvb/frontends/cx24116.c b/drivers/media/dvb/frontends/cx24116.c
index 95c6465b87a1..ccd05255d527 100644
--- a/drivers/media/dvb/frontends/cx24116.c
+++ b/drivers/media/dvb/frontends/cx24116.c
@@ -1452,11 +1452,7 @@ tuned: /* Set/Reset B/W */
1452 cmd.args[0x00] = CMD_BANDWIDTH; 1452 cmd.args[0x00] = CMD_BANDWIDTH;
1453 cmd.args[0x01] = 0x00; 1453 cmd.args[0x01] = 0x00;
1454 cmd.len = 0x02; 1454 cmd.len = 0x02;
1455 ret = cx24116_cmd_execute(fe, &cmd); 1455 return cx24116_cmd_execute(fe, &cmd);
1456 if (ret != 0)
1457 return ret;
1458
1459 return ret;
1460} 1456}
1461 1457
1462static int cx24116_tune(struct dvb_frontend *fe, struct dvb_frontend_parameters *params, 1458static int cx24116_tune(struct dvb_frontend *fe, struct dvb_frontend_parameters *params,
diff --git a/drivers/media/dvb/frontends/cxd2820r.h b/drivers/media/dvb/frontends/cxd2820r.h
index ad17845123d9..2906582dc94c 100644
--- a/drivers/media/dvb/frontends/cxd2820r.h
+++ b/drivers/media/dvb/frontends/cxd2820r.h
@@ -55,13 +55,13 @@ struct cxd2820r_config {
55 * Default: 0 55 * Default: 0
56 * Values: 0, 1 56 * Values: 0, 1
57 */ 57 */
58 int if_agc_polarity:1; 58 bool if_agc_polarity;
59 59
60 /* Spectrum inversion. 60 /* Spectrum inversion.
61 * Default: 0 61 * Default: 0
62 * Values: 0, 1 62 * Values: 0, 1
63 */ 63 */
64 int spec_inv:1; 64 bool spec_inv;
65 65
66 /* IFs for all used modes. 66 /* IFs for all used modes.
67 * Default: none, must set 67 * Default: none, must set
diff --git a/drivers/media/dvb/frontends/cxd2820r_core.c b/drivers/media/dvb/frontends/cxd2820r_core.c
index 0779f69db793..d416e85589e1 100644
--- a/drivers/media/dvb/frontends/cxd2820r_core.c
+++ b/drivers/media/dvb/frontends/cxd2820r_core.c
@@ -314,6 +314,8 @@ static int cxd2820r_set_frontend(struct dvb_frontend *fe,
314 } else if (c->delivery_system == SYS_DVBT2) { 314 } else if (c->delivery_system == SYS_DVBT2) {
315 /* DVB-T => DVB-T2 */ 315 /* DVB-T => DVB-T2 */
316 ret = cxd2820r_sleep_t(fe); 316 ret = cxd2820r_sleep_t(fe);
317 if (ret)
318 break;
317 ret = cxd2820r_set_frontend_t2(fe, p); 319 ret = cxd2820r_set_frontend_t2(fe, p);
318 } 320 }
319 break; 321 break;
@@ -324,6 +326,8 @@ static int cxd2820r_set_frontend(struct dvb_frontend *fe,
324 } else if (c->delivery_system == SYS_DVBT) { 326 } else if (c->delivery_system == SYS_DVBT) {
325 /* DVB-T2 => DVB-T */ 327 /* DVB-T2 => DVB-T */
326 ret = cxd2820r_sleep_t2(fe); 328 ret = cxd2820r_sleep_t2(fe);
329 if (ret)
330 break;
327 ret = cxd2820r_set_frontend_t(fe, p); 331 ret = cxd2820r_set_frontend_t(fe, p);
328 } 332 }
329 break; 333 break;
@@ -740,12 +744,13 @@ static int cxd2820r_tuner_i2c_xfer(struct i2c_adapter *i2c_adap,
740 struct i2c_msg msg[], int num) 744 struct i2c_msg msg[], int num)
741{ 745{
742 struct cxd2820r_priv *priv = i2c_get_adapdata(i2c_adap); 746 struct cxd2820r_priv *priv = i2c_get_adapdata(i2c_adap);
743 u8 obuf[msg[0].len + 2]; 747 int ret;
748 u8 *obuf = kmalloc(msg[0].len + 2, GFP_KERNEL);
744 struct i2c_msg msg2[2] = { 749 struct i2c_msg msg2[2] = {
745 { 750 {
746 .addr = priv->cfg.i2c_address, 751 .addr = priv->cfg.i2c_address,
747 .flags = 0, 752 .flags = 0,
748 .len = sizeof(obuf), 753 .len = msg[0].len + 2,
749 .buf = obuf, 754 .buf = obuf,
750 }, { 755 }, {
751 .addr = priv->cfg.i2c_address, 756 .addr = priv->cfg.i2c_address,
@@ -755,15 +760,24 @@ static int cxd2820r_tuner_i2c_xfer(struct i2c_adapter *i2c_adap,
755 } 760 }
756 }; 761 };
757 762
763 if (!obuf)
764 return -ENOMEM;
765
758 obuf[0] = 0x09; 766 obuf[0] = 0x09;
759 obuf[1] = (msg[0].addr << 1); 767 obuf[1] = (msg[0].addr << 1);
760 if (num == 2) { /* I2C read */ 768 if (num == 2) { /* I2C read */
761 obuf[1] = (msg[0].addr << 1) | I2C_M_RD; /* I2C RD flag */ 769 obuf[1] = (msg[0].addr << 1) | I2C_M_RD; /* I2C RD flag */
762 msg2[0].len = sizeof(obuf) - 1; /* maybe HW bug ? */ 770 msg2[0].len = msg[0].len + 2 - 1; /* '-1' maybe HW bug ? */
763 } 771 }
764 memcpy(&obuf[2], msg[0].buf, msg[0].len); 772 memcpy(&obuf[2], msg[0].buf, msg[0].len);
765 773
766 return i2c_transfer(priv->i2c, msg2, num); 774 ret = i2c_transfer(priv->i2c, msg2, num);
775 if (ret < 0)
776 warn("tuner i2c failed ret:%d", ret);
777
778 kfree(obuf);
779
780 return ret;
767} 781}
768 782
769static struct i2c_algorithm cxd2820r_tuner_i2c_algo = { 783static struct i2c_algorithm cxd2820r_tuner_i2c_algo = {
diff --git a/drivers/media/dvb/frontends/cxd2820r_priv.h b/drivers/media/dvb/frontends/cxd2820r_priv.h
index 25adbeefa6d3..0c0ebc9d5c4a 100644
--- a/drivers/media/dvb/frontends/cxd2820r_priv.h
+++ b/drivers/media/dvb/frontends/cxd2820r_priv.h
@@ -55,13 +55,13 @@ struct cxd2820r_priv {
55 struct mutex fe_lock; /* FE lock */ 55 struct mutex fe_lock; /* FE lock */
56 int active_fe:2; /* FE lock, -1=NONE, 0=DVB-T/T2, 1=DVB-C */ 56 int active_fe:2; /* FE lock, -1=NONE, 0=DVB-T/T2, 1=DVB-C */
57 57
58 int ber_running:1; 58 bool ber_running;
59 59
60 u8 bank[2]; 60 u8 bank[2];
61 u8 gpio[3]; 61 u8 gpio[3];
62 62
63 fe_delivery_system_t delivery_system; 63 fe_delivery_system_t delivery_system;
64 int last_tune_failed:1; /* for switch between T and T2 tune */ 64 bool last_tune_failed; /* for switch between T and T2 tune */
65}; 65};
66 66
67/* cxd2820r_core.c */ 67/* cxd2820r_core.c */
diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c
index 0c9f40c2a251..a64a538ba364 100644
--- a/drivers/media/dvb/frontends/dib7000p.c
+++ b/drivers/media/dvb/frontends/dib7000p.c
@@ -2336,6 +2336,11 @@ struct dvb_frontend *dib7000p_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr,
2336 request_firmware() will hit an OOPS (this should be moved somewhere 2336 request_firmware() will hit an OOPS (this should be moved somewhere
2337 more common) */ 2337 more common) */
2338 2338
2339 /* FIXME: make sure the dev.parent field is initialized, or else
2340 request_firmware() will hit an OOPS (this should be moved somewhere
2341 more common) */
2342 st->i2c_master.gated_tuner_i2c_adap.dev.parent = i2c_adap->dev.parent;
2343
2339 dibx000_init_i2c_master(&st->i2c_master, DIB7000P, st->i2c_adap, st->i2c_addr); 2344 dibx000_init_i2c_master(&st->i2c_master, DIB7000P, st->i2c_adap, st->i2c_addr);
2340 2345
2341 /* init 7090 tuner adapter */ 2346 /* init 7090 tuner adapter */
diff --git a/drivers/media/dvb/frontends/drxd_hard.c b/drivers/media/dvb/frontends/drxd_hard.c
index ea4c1c361d2b..2238bf0be959 100644
--- a/drivers/media/dvb/frontends/drxd_hard.c
+++ b/drivers/media/dvb/frontends/drxd_hard.c
@@ -28,7 +28,6 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/firmware.h> 29#include <linux/firmware.h>
30#include <linux/i2c.h> 30#include <linux/i2c.h>
31#include <linux/version.h>
32#include <asm/div64.h> 31#include <asm/div64.h>
33 32
34#include "dvb_frontend.h" 33#include "dvb_frontend.h"
@@ -233,7 +232,7 @@ static int i2c_read(struct i2c_adapter *adap,
233 return 0; 232 return 0;
234} 233}
235 234
236inline u32 MulDiv32(u32 a, u32 b, u32 c) 235static inline u32 MulDiv32(u32 a, u32 b, u32 c)
237{ 236{
238 u64 tmp64; 237 u64 tmp64;
239 238
@@ -910,14 +909,16 @@ static int load_firmware(struct drxd_state *state, const char *fw_name)
910 return -EIO; 909 return -EIO;
911 } 910 }
912 911
913 state->microcode = kzalloc(fw->size, GFP_KERNEL); 912 state->microcode = kmalloc(fw->size, GFP_KERNEL);
914 if (state->microcode == NULL) { 913 if (state->microcode == NULL) {
915 printk(KERN_ERR "drxd: firmware load failure: nomemory\n"); 914 release_firmware(fw);
915 printk(KERN_ERR "drxd: firmware load failure: no memory\n");
916 return -ENOMEM; 916 return -ENOMEM;
917 } 917 }
918 918
919 memcpy(state->microcode, fw->data, fw->size); 919 memcpy(state->microcode, fw->data, fw->size);
920 state->microcode_length = fw->size; 920 state->microcode_length = fw->size;
921 release_firmware(fw);
921 return 0; 922 return 0;
922} 923}
923 924
diff --git a/drivers/media/dvb/frontends/drxk.h b/drivers/media/dvb/frontends/drxk.h
new file mode 100644
index 000000000000..58baf419560c
--- /dev/null
+++ b/drivers/media/dvb/frontends/drxk.h
@@ -0,0 +1,47 @@
1#ifndef _DRXK_H_
2#define _DRXK_H_
3
4#include <linux/types.h>
5#include <linux/i2c.h>
6
7/**
8 * struct drxk_config - Configure the initial parameters for DRX-K
9 *
10 * adr: I2C Address of the DRX-K
11 * single_master: Device is on the single master mode
12 * no_i2c_bridge: Don't switch the I2C bridge to talk with tuner
13 * antenna_gpio: GPIO bit used to control the antenna
14 * antenna_dvbt: GPIO bit for changing antenna to DVB-C. A value of 1
15 * means that 1=DVBC, 0 = DVBT. Zero means the opposite.
16 * microcode_name: Name of the firmware file with the microcode
17 *
18 * On the *_gpio vars, bit 0 is UIO-1, bit 1 is UIO-2 and bit 2 is
19 * UIO-3.
20 */
21struct drxk_config {
22 u8 adr;
23 bool single_master;
24 bool no_i2c_bridge;
25
26 bool antenna_dvbt;
27 u16 antenna_gpio;
28
29 const char *microcode_name;
30};
31
32#if defined(CONFIG_DVB_DRXK) || (defined(CONFIG_DVB_DRXK_MODULE) \
33 && defined(MODULE))
34extern struct dvb_frontend *drxk_attach(const struct drxk_config *config,
35 struct i2c_adapter *i2c,
36 struct dvb_frontend **fe_t);
37#else
38static inline struct dvb_frontend *drxk_attach(const struct drxk_config *config,
39 struct i2c_adapter *i2c,
40 struct dvb_frontend **fe_t)
41{
42 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
43 return NULL;
44}
45#endif
46
47#endif
diff --git a/drivers/media/dvb/frontends/drxk_hard.c b/drivers/media/dvb/frontends/drxk_hard.c
new file mode 100644
index 000000000000..41b083820dae
--- /dev/null
+++ b/drivers/media/dvb/frontends/drxk_hard.c
@@ -0,0 +1,6454 @@
1/*
2 * drxk_hard: DRX-K DVB-C/T demodulator driver
3 *
4 * Copyright (C) 2010-2011 Digital Devices GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 only, as published by the Free Software Foundation.
9 *
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA
21 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/firmware.h>
30#include <linux/i2c.h>
31#include <linux/version.h>
32#include <asm/div64.h>
33
34#include "dvb_frontend.h"
35#include "drxk.h"
36#include "drxk_hard.h"
37
38static int PowerDownDVBT(struct drxk_state *state, bool setPowerMode);
39static int PowerDownQAM(struct drxk_state *state);
40static int SetDVBTStandard(struct drxk_state *state,
41 enum OperationMode oMode);
42static int SetQAMStandard(struct drxk_state *state,
43 enum OperationMode oMode);
44static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz,
45 s32 tunerFreqOffset);
46static int SetDVBTStandard(struct drxk_state *state,
47 enum OperationMode oMode);
48static int DVBTStart(struct drxk_state *state);
49static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz,
50 s32 tunerFreqOffset);
51static int GetQAMLockStatus(struct drxk_state *state, u32 *pLockStatus);
52static int GetDVBTLockStatus(struct drxk_state *state, u32 *pLockStatus);
53static int SwitchAntennaToQAM(struct drxk_state *state);
54static int SwitchAntennaToDVBT(struct drxk_state *state);
55
56static bool IsDVBT(struct drxk_state *state)
57{
58 return state->m_OperationMode == OM_DVBT;
59}
60
61static bool IsQAM(struct drxk_state *state)
62{
63 return state->m_OperationMode == OM_QAM_ITU_A ||
64 state->m_OperationMode == OM_QAM_ITU_B ||
65 state->m_OperationMode == OM_QAM_ITU_C;
66}
67
68bool IsA1WithPatchCode(struct drxk_state *state)
69{
70 return state->m_DRXK_A1_PATCH_CODE;
71}
72
73bool IsA1WithRomCode(struct drxk_state *state)
74{
75 return state->m_DRXK_A1_ROM_CODE;
76}
77
78#define NOA1ROM 0
79
80#define DRXDAP_FASI_SHORT_FORMAT(addr) (((addr) & 0xFC30FF80) == 0)
81#define DRXDAP_FASI_LONG_FORMAT(addr) (((addr) & 0xFC30FF80) != 0)
82
83#define DEFAULT_MER_83 165
84#define DEFAULT_MER_93 250
85
86#ifndef DRXK_MPEG_SERIAL_OUTPUT_PIN_DRIVE_STRENGTH
87#define DRXK_MPEG_SERIAL_OUTPUT_PIN_DRIVE_STRENGTH (0x02)
88#endif
89
90#ifndef DRXK_MPEG_PARALLEL_OUTPUT_PIN_DRIVE_STRENGTH
91#define DRXK_MPEG_PARALLEL_OUTPUT_PIN_DRIVE_STRENGTH (0x03)
92#endif
93
94#ifndef DRXK_MPEG_OUTPUT_CLK_DRIVE_STRENGTH
95#define DRXK_MPEG_OUTPUT_CLK_DRIVE_STRENGTH (0x06)
96#endif
97
98#define DEFAULT_DRXK_MPEG_LOCK_TIMEOUT 700
99#define DEFAULT_DRXK_DEMOD_LOCK_TIMEOUT 500
100
101#ifndef DRXK_KI_RAGC_ATV
102#define DRXK_KI_RAGC_ATV 4
103#endif
104#ifndef DRXK_KI_IAGC_ATV
105#define DRXK_KI_IAGC_ATV 6
106#endif
107#ifndef DRXK_KI_DAGC_ATV
108#define DRXK_KI_DAGC_ATV 7
109#endif
110
111#ifndef DRXK_KI_RAGC_QAM
112#define DRXK_KI_RAGC_QAM 3
113#endif
114#ifndef DRXK_KI_IAGC_QAM
115#define DRXK_KI_IAGC_QAM 4
116#endif
117#ifndef DRXK_KI_DAGC_QAM
118#define DRXK_KI_DAGC_QAM 7
119#endif
120#ifndef DRXK_KI_RAGC_DVBT
121#define DRXK_KI_RAGC_DVBT (IsA1WithPatchCode(state) ? 3 : 2)
122#endif
123#ifndef DRXK_KI_IAGC_DVBT
124#define DRXK_KI_IAGC_DVBT (IsA1WithPatchCode(state) ? 4 : 2)
125#endif
126#ifndef DRXK_KI_DAGC_DVBT
127#define DRXK_KI_DAGC_DVBT (IsA1WithPatchCode(state) ? 10 : 7)
128#endif
129
130#ifndef DRXK_AGC_DAC_OFFSET
131#define DRXK_AGC_DAC_OFFSET (0x800)
132#endif
133
134#ifndef DRXK_BANDWIDTH_8MHZ_IN_HZ
135#define DRXK_BANDWIDTH_8MHZ_IN_HZ (0x8B8249L)
136#endif
137
138#ifndef DRXK_BANDWIDTH_7MHZ_IN_HZ
139#define DRXK_BANDWIDTH_7MHZ_IN_HZ (0x7A1200L)
140#endif
141
142#ifndef DRXK_BANDWIDTH_6MHZ_IN_HZ
143#define DRXK_BANDWIDTH_6MHZ_IN_HZ (0x68A1B6L)
144#endif
145
146#ifndef DRXK_QAM_SYMBOLRATE_MAX
147#define DRXK_QAM_SYMBOLRATE_MAX (7233000)
148#endif
149
150#define DRXK_BL_ROM_OFFSET_TAPS_DVBT 56
151#define DRXK_BL_ROM_OFFSET_TAPS_ITU_A 64
152#define DRXK_BL_ROM_OFFSET_TAPS_ITU_C 0x5FE0
153#define DRXK_BL_ROM_OFFSET_TAPS_BG 24
154#define DRXK_BL_ROM_OFFSET_TAPS_DKILLP 32
155#define DRXK_BL_ROM_OFFSET_TAPS_NTSC 40
156#define DRXK_BL_ROM_OFFSET_TAPS_FM 48
157#define DRXK_BL_ROM_OFFSET_UCODE 0
158
159#define DRXK_BLC_TIMEOUT 100
160
161#define DRXK_BLCC_NR_ELEMENTS_TAPS 2
162#define DRXK_BLCC_NR_ELEMENTS_UCODE 6
163
164#define DRXK_BLDC_NR_ELEMENTS_TAPS 28
165
166#ifndef DRXK_OFDM_NE_NOTCH_WIDTH
167#define DRXK_OFDM_NE_NOTCH_WIDTH (4)
168#endif
169
170#define DRXK_QAM_SL_SIG_POWER_QAM16 (40960)
171#define DRXK_QAM_SL_SIG_POWER_QAM32 (20480)
172#define DRXK_QAM_SL_SIG_POWER_QAM64 (43008)
173#define DRXK_QAM_SL_SIG_POWER_QAM128 (20992)
174#define DRXK_QAM_SL_SIG_POWER_QAM256 (43520)
175
176static unsigned int debug;
177module_param(debug, int, 0644);
178MODULE_PARM_DESC(debug, "enable debug messages");
179
180#define dprintk(level, fmt, arg...) do { \
181if (debug >= level) \
182 printk(KERN_DEBUG "drxk: %s" fmt, __func__, ## arg); \
183} while (0)
184
185
186static inline u32 MulDiv32(u32 a, u32 b, u32 c)
187{
188 u64 tmp64;
189
190 tmp64 = (u64) a * (u64) b;
191 do_div(tmp64, c);
192
193 return (u32) tmp64;
194}
195
196inline u32 Frac28a(u32 a, u32 c)
197{
198 int i = 0;
199 u32 Q1 = 0;
200 u32 R0 = 0;
201
202 R0 = (a % c) << 4; /* 32-28 == 4 shifts possible at max */
203 Q1 = a / c; /* integer part, only the 4 least significant bits
204 will be visible in the result */
205
206 /* division using radix 16, 7 nibbles in the result */
207 for (i = 0; i < 7; i++) {
208 Q1 = (Q1 << 4) | (R0 / c);
209 R0 = (R0 % c) << 4;
210 }
211 /* rounding */
212 if ((R0 >> 3) >= c)
213 Q1++;
214
215 return Q1;
216}
217
218static u32 Log10Times100(u32 x)
219{
220 static const u8 scale = 15;
221 static const u8 indexWidth = 5;
222 u8 i = 0;
223 u32 y = 0;
224 u32 d = 0;
225 u32 k = 0;
226 u32 r = 0;
227 /*
228 log2lut[n] = (1<<scale) * 200 * log2(1.0 + ((1.0/(1<<INDEXWIDTH)) * n))
229 0 <= n < ((1<<INDEXWIDTH)+1)
230 */
231
232 static const u32 log2lut[] = {
233 0, /* 0.000000 */
234 290941, /* 290941.300628 */
235 573196, /* 573196.476418 */
236 847269, /* 847269.179851 */
237 1113620, /* 1113620.489452 */
238 1372674, /* 1372673.576986 */
239 1624818, /* 1624817.752104 */
240 1870412, /* 1870411.981536 */
241 2109788, /* 2109787.962654 */
242 2343253, /* 2343252.817465 */
243 2571091, /* 2571091.461923 */
244 2793569, /* 2793568.696416 */
245 3010931, /* 3010931.055901 */
246 3223408, /* 3223408.452106 */
247 3431216, /* 3431215.635215 */
248 3634553, /* 3634553.498355 */
249 3833610, /* 3833610.244726 */
250 4028562, /* 4028562.434393 */
251 4219576, /* 4219575.925308 */
252 4406807, /* 4406806.721144 */
253 4590402, /* 4590401.736809 */
254 4770499, /* 4770499.491025 */
255 4947231, /* 4947230.734179 */
256 5120719, /* 5120719.018555 */
257 5291081, /* 5291081.217197 */
258 5458428, /* 5458427.996830 */
259 5622864, /* 5622864.249668 */
260 5784489, /* 5784489.488298 */
261 5943398, /* 5943398.207380 */
262 6099680, /* 6099680.215452 */
263 6253421, /* 6253420.939751 */
264 6404702, /* 6404701.706649 */
265 6553600, /* 6553600.000000 */
266 };
267
268
269 if (x == 0)
270 return 0;
271
272 /* Scale x (normalize) */
273 /* computing y in log(x/y) = log(x) - log(y) */
274 if ((x & ((0xffffffff) << (scale + 1))) == 0) {
275 for (k = scale; k > 0; k--) {
276 if (x & (((u32) 1) << scale))
277 break;
278 x <<= 1;
279 }
280 } else {
281 for (k = scale; k < 31; k++) {
282 if ((x & (((u32) (-1)) << (scale + 1))) == 0)
283 break;
284 x >>= 1;
285 }
286 }
287 /*
288 Now x has binary point between bit[scale] and bit[scale-1]
289 and 1.0 <= x < 2.0 */
290
291 /* correction for divison: log(x) = log(x/y)+log(y) */
292 y = k * ((((u32) 1) << scale) * 200);
293
294 /* remove integer part */
295 x &= ((((u32) 1) << scale) - 1);
296 /* get index */
297 i = (u8) (x >> (scale - indexWidth));
298 /* compute delta (x - a) */
299 d = x & ((((u32) 1) << (scale - indexWidth)) - 1);
300 /* compute log, multiplication (d* (..)) must be within range ! */
301 y += log2lut[i] +
302 ((d * (log2lut[i + 1] - log2lut[i])) >> (scale - indexWidth));
303 /* Conver to log10() */
304 y /= 108853; /* (log2(10) << scale) */
305 r = (y >> 1);
306 /* rounding */
307 if (y & ((u32) 1))
308 r++;
309 return r;
310}
311
312/****************************************************************************/
313/* I2C **********************************************************************/
314/****************************************************************************/
315
316static int i2c_read1(struct i2c_adapter *adapter, u8 adr, u8 *val)
317{
318 struct i2c_msg msgs[1] = { {.addr = adr, .flags = I2C_M_RD,
319 .buf = val, .len = 1}
320 };
321
322 return i2c_transfer(adapter, msgs, 1);
323}
324
325static int i2c_write(struct i2c_adapter *adap, u8 adr, u8 *data, int len)
326{
327 int status;
328 struct i2c_msg msg = {
329 .addr = adr, .flags = 0, .buf = data, .len = len };
330
331 dprintk(3, ":");
332 if (debug > 2) {
333 int i;
334 for (i = 0; i < len; i++)
335 printk(KERN_CONT " %02x", data[i]);
336 printk(KERN_CONT "\n");
337 }
338 status = i2c_transfer(adap, &msg, 1);
339 if (status >= 0 && status != 1)
340 status = -EIO;
341
342 if (status < 0)
343 printk(KERN_ERR "drxk: i2c write error at addr 0x%02x\n", adr);
344
345 return status;
346}
347
348static int i2c_read(struct i2c_adapter *adap,
349 u8 adr, u8 *msg, int len, u8 *answ, int alen)
350{
351 int status;
352 struct i2c_msg msgs[2] = {
353 {.addr = adr, .flags = 0,
354 .buf = msg, .len = len},
355 {.addr = adr, .flags = I2C_M_RD,
356 .buf = answ, .len = alen}
357 };
358
359 status = i2c_transfer(adap, msgs, 2);
360 if (status != 2) {
361 if (debug > 2)
362 printk(KERN_CONT ": ERROR!\n");
363 if (status >= 0)
364 status = -EIO;
365
366 printk(KERN_ERR "drxk: i2c read error at addr 0x%02x\n", adr);
367 return status;
368 }
369 if (debug > 2) {
370 int i;
371 dprintk(2, ": read from ");
372 for (i = 0; i < len; i++)
373 printk(KERN_CONT " %02x", msg[i]);
374 printk(KERN_CONT "Value = ");
375 for (i = 0; i < alen; i++)
376 printk(KERN_CONT " %02x", answ[i]);
377 printk(KERN_CONT "\n");
378 }
379 return 0;
380}
381
382static int read16_flags(struct drxk_state *state, u32 reg, u16 *data, u8 flags)
383{
384 int status;
385 u8 adr = state->demod_address, mm1[4], mm2[2], len;
386
387 if (state->single_master)
388 flags |= 0xC0;
389
390 if (DRXDAP_FASI_LONG_FORMAT(reg) || (flags != 0)) {
391 mm1[0] = (((reg << 1) & 0xFF) | 0x01);
392 mm1[1] = ((reg >> 16) & 0xFF);
393 mm1[2] = ((reg >> 24) & 0xFF) | flags;
394 mm1[3] = ((reg >> 7) & 0xFF);
395 len = 4;
396 } else {
397 mm1[0] = ((reg << 1) & 0xFF);
398 mm1[1] = (((reg >> 16) & 0x0F) | ((reg >> 18) & 0xF0));
399 len = 2;
400 }
401 dprintk(2, "(0x%08x, 0x%02x)\n", reg, flags);
402 status = i2c_read(state->i2c, adr, mm1, len, mm2, 2);
403 if (status < 0)
404 return status;
405 if (data)
406 *data = mm2[0] | (mm2[1] << 8);
407
408 return 0;
409}
410
411static int read16(struct drxk_state *state, u32 reg, u16 *data)
412{
413 return read16_flags(state, reg, data, 0);
414}
415
416static int read32_flags(struct drxk_state *state, u32 reg, u32 *data, u8 flags)
417{
418 int status;
419 u8 adr = state->demod_address, mm1[4], mm2[4], len;
420
421 if (state->single_master)
422 flags |= 0xC0;
423
424 if (DRXDAP_FASI_LONG_FORMAT(reg) || (flags != 0)) {
425 mm1[0] = (((reg << 1) & 0xFF) | 0x01);
426 mm1[1] = ((reg >> 16) & 0xFF);
427 mm1[2] = ((reg >> 24) & 0xFF) | flags;
428 mm1[3] = ((reg >> 7) & 0xFF);
429 len = 4;
430 } else {
431 mm1[0] = ((reg << 1) & 0xFF);
432 mm1[1] = (((reg >> 16) & 0x0F) | ((reg >> 18) & 0xF0));
433 len = 2;
434 }
435 dprintk(2, "(0x%08x, 0x%02x)\n", reg, flags);
436 status = i2c_read(state->i2c, adr, mm1, len, mm2, 4);
437 if (status < 0)
438 return status;
439 if (data)
440 *data = mm2[0] | (mm2[1] << 8) |
441 (mm2[2] << 16) | (mm2[3] << 24);
442
443 return 0;
444}
445
446static int read32(struct drxk_state *state, u32 reg, u32 *data)
447{
448 return read32_flags(state, reg, data, 0);
449}
450
451static int write16_flags(struct drxk_state *state, u32 reg, u16 data, u8 flags)
452{
453 u8 adr = state->demod_address, mm[6], len;
454
455 if (state->single_master)
456 flags |= 0xC0;
457 if (DRXDAP_FASI_LONG_FORMAT(reg) || (flags != 0)) {
458 mm[0] = (((reg << 1) & 0xFF) | 0x01);
459 mm[1] = ((reg >> 16) & 0xFF);
460 mm[2] = ((reg >> 24) & 0xFF) | flags;
461 mm[3] = ((reg >> 7) & 0xFF);
462 len = 4;
463 } else {
464 mm[0] = ((reg << 1) & 0xFF);
465 mm[1] = (((reg >> 16) & 0x0F) | ((reg >> 18) & 0xF0));
466 len = 2;
467 }
468 mm[len] = data & 0xff;
469 mm[len + 1] = (data >> 8) & 0xff;
470
471 dprintk(2, "(0x%08x, 0x%04x, 0x%02x)\n", reg, data, flags);
472 return i2c_write(state->i2c, adr, mm, len + 2);
473}
474
475static int write16(struct drxk_state *state, u32 reg, u16 data)
476{
477 return write16_flags(state, reg, data, 0);
478}
479
480static int write32_flags(struct drxk_state *state, u32 reg, u32 data, u8 flags)
481{
482 u8 adr = state->demod_address, mm[8], len;
483
484 if (state->single_master)
485 flags |= 0xC0;
486 if (DRXDAP_FASI_LONG_FORMAT(reg) || (flags != 0)) {
487 mm[0] = (((reg << 1) & 0xFF) | 0x01);
488 mm[1] = ((reg >> 16) & 0xFF);
489 mm[2] = ((reg >> 24) & 0xFF) | flags;
490 mm[3] = ((reg >> 7) & 0xFF);
491 len = 4;
492 } else {
493 mm[0] = ((reg << 1) & 0xFF);
494 mm[1] = (((reg >> 16) & 0x0F) | ((reg >> 18) & 0xF0));
495 len = 2;
496 }
497 mm[len] = data & 0xff;
498 mm[len + 1] = (data >> 8) & 0xff;
499 mm[len + 2] = (data >> 16) & 0xff;
500 mm[len + 3] = (data >> 24) & 0xff;
501 dprintk(2, "(0x%08x, 0x%08x, 0x%02x)\n", reg, data, flags);
502
503 return i2c_write(state->i2c, adr, mm, len + 4);
504}
505
506static int write32(struct drxk_state *state, u32 reg, u32 data)
507{
508 return write32_flags(state, reg, data, 0);
509}
510
511static int write_block(struct drxk_state *state, u32 Address,
512 const int BlockSize, const u8 pBlock[])
513{
514 int status = 0, BlkSize = BlockSize;
515 u8 Flags = 0;
516
517 if (state->single_master)
518 Flags |= 0xC0;
519
520 while (BlkSize > 0) {
521 int Chunk = BlkSize > state->m_ChunkSize ?
522 state->m_ChunkSize : BlkSize;
523 u8 *AdrBuf = &state->Chunk[0];
524 u32 AdrLength = 0;
525
526 if (DRXDAP_FASI_LONG_FORMAT(Address) || (Flags != 0)) {
527 AdrBuf[0] = (((Address << 1) & 0xFF) | 0x01);
528 AdrBuf[1] = ((Address >> 16) & 0xFF);
529 AdrBuf[2] = ((Address >> 24) & 0xFF);
530 AdrBuf[3] = ((Address >> 7) & 0xFF);
531 AdrBuf[2] |= Flags;
532 AdrLength = 4;
533 if (Chunk == state->m_ChunkSize)
534 Chunk -= 2;
535 } else {
536 AdrBuf[0] = ((Address << 1) & 0xFF);
537 AdrBuf[1] = (((Address >> 16) & 0x0F) |
538 ((Address >> 18) & 0xF0));
539 AdrLength = 2;
540 }
541 memcpy(&state->Chunk[AdrLength], pBlock, Chunk);
542 dprintk(2, "(0x%08x, 0x%02x)\n", Address, Flags);
543 if (debug > 1) {
544 int i;
545 if (pBlock)
546 for (i = 0; i < Chunk; i++)
547 printk(KERN_CONT " %02x", pBlock[i]);
548 printk(KERN_CONT "\n");
549 }
550 status = i2c_write(state->i2c, state->demod_address,
551 &state->Chunk[0], Chunk + AdrLength);
552 if (status < 0) {
553 printk(KERN_ERR "drxk: %s: i2c write error at addr 0x%02x\n",
554 __func__, Address);
555 break;
556 }
557 pBlock += Chunk;
558 Address += (Chunk >> 1);
559 BlkSize -= Chunk;
560 }
561 return status;
562}
563
564#ifndef DRXK_MAX_RETRIES_POWERUP
565#define DRXK_MAX_RETRIES_POWERUP 20
566#endif
567
568int PowerUpDevice(struct drxk_state *state)
569{
570 int status;
571 u8 data = 0;
572 u16 retryCount = 0;
573
574 dprintk(1, "\n");
575
576 status = i2c_read1(state->i2c, state->demod_address, &data);
577 if (status < 0) {
578 do {
579 data = 0;
580 status = i2c_write(state->i2c, state->demod_address,
581 &data, 1);
582 msleep(10);
583 retryCount++;
584 if (status < 0)
585 continue;
586 status = i2c_read1(state->i2c, state->demod_address,
587 &data);
588 } while (status < 0 &&
589 (retryCount < DRXK_MAX_RETRIES_POWERUP));
590 if (status < 0 && retryCount >= DRXK_MAX_RETRIES_POWERUP)
591 goto error;
592 }
593
594 /* Make sure all clk domains are active */
595 status = write16(state, SIO_CC_PWD_MODE__A, SIO_CC_PWD_MODE_LEVEL_NONE);
596 if (status < 0)
597 goto error;
598 status = write16(state, SIO_CC_UPDATE__A, SIO_CC_UPDATE_KEY);
599 if (status < 0)
600 goto error;
601 /* Enable pll lock tests */
602 status = write16(state, SIO_CC_PLL_LOCK__A, 1);
603 if (status < 0)
604 goto error;
605
606 state->m_currentPowerMode = DRX_POWER_UP;
607
608error:
609 if (status < 0)
610 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
611
612 return status;
613}
614
615
616static int init_state(struct drxk_state *state)
617{
618 /*
619 * FIXME: most (all?) of the values bellow should be moved into
620 * struct drxk_config, as they are probably board-specific
621 */
622 u32 ulVSBIfAgcMode = DRXK_AGC_CTRL_AUTO;
623 u32 ulVSBIfAgcOutputLevel = 0;
624 u32 ulVSBIfAgcMinLevel = 0;
625 u32 ulVSBIfAgcMaxLevel = 0x7FFF;
626 u32 ulVSBIfAgcSpeed = 3;
627
628 u32 ulVSBRfAgcMode = DRXK_AGC_CTRL_AUTO;
629 u32 ulVSBRfAgcOutputLevel = 0;
630 u32 ulVSBRfAgcMinLevel = 0;
631 u32 ulVSBRfAgcMaxLevel = 0x7FFF;
632 u32 ulVSBRfAgcSpeed = 3;
633 u32 ulVSBRfAgcTop = 9500;
634 u32 ulVSBRfAgcCutOffCurrent = 4000;
635
636 u32 ulATVIfAgcMode = DRXK_AGC_CTRL_AUTO;
637 u32 ulATVIfAgcOutputLevel = 0;
638 u32 ulATVIfAgcMinLevel = 0;
639 u32 ulATVIfAgcMaxLevel = 0;
640 u32 ulATVIfAgcSpeed = 3;
641
642 u32 ulATVRfAgcMode = DRXK_AGC_CTRL_OFF;
643 u32 ulATVRfAgcOutputLevel = 0;
644 u32 ulATVRfAgcMinLevel = 0;
645 u32 ulATVRfAgcMaxLevel = 0;
646 u32 ulATVRfAgcTop = 9500;
647 u32 ulATVRfAgcCutOffCurrent = 4000;
648 u32 ulATVRfAgcSpeed = 3;
649
650 u32 ulQual83 = DEFAULT_MER_83;
651 u32 ulQual93 = DEFAULT_MER_93;
652
653 u32 ulDVBTStaticTSClock = 1;
654 u32 ulDVBCStaticTSClock = 1;
655
656 u32 ulMpegLockTimeOut = DEFAULT_DRXK_MPEG_LOCK_TIMEOUT;
657 u32 ulDemodLockTimeOut = DEFAULT_DRXK_DEMOD_LOCK_TIMEOUT;
658
659 /* io_pad_cfg register (8 bit reg.) MSB bit is 1 (default value) */
660 /* io_pad_cfg_mode output mode is drive always */
661 /* io_pad_cfg_drive is set to power 2 (23 mA) */
662 u32 ulGPIOCfg = 0x0113;
663 u32 ulSerialMode = 1;
664 u32 ulInvertTSClock = 0;
665 u32 ulTSDataStrength = DRXK_MPEG_SERIAL_OUTPUT_PIN_DRIVE_STRENGTH;
666 u32 ulTSClockkStrength = DRXK_MPEG_OUTPUT_CLK_DRIVE_STRENGTH;
667 u32 ulDVBTBitrate = 50000000;
668 u32 ulDVBCBitrate = DRXK_QAM_SYMBOLRATE_MAX * 8;
669
670 u32 ulInsertRSByte = 0;
671
672 u32 ulRfMirror = 1;
673 u32 ulPowerDown = 0;
674
675 dprintk(1, "\n");
676
677 state->m_hasLNA = false;
678 state->m_hasDVBT = false;
679 state->m_hasDVBC = false;
680 state->m_hasATV = false;
681 state->m_hasOOB = false;
682 state->m_hasAudio = false;
683
684 state->m_ChunkSize = 124;
685
686 state->m_oscClockFreq = 0;
687 state->m_smartAntInverted = false;
688 state->m_bPDownOpenBridge = false;
689
690 /* real system clock frequency in kHz */
691 state->m_sysClockFreq = 151875;
692 /* Timing div, 250ns/Psys */
693 /* Timing div, = (delay (nano seconds) * sysclk (kHz))/ 1000 */
694 state->m_HICfgTimingDiv = ((state->m_sysClockFreq / 1000) *
695 HI_I2C_DELAY) / 1000;
696 /* Clipping */
697 if (state->m_HICfgTimingDiv > SIO_HI_RA_RAM_PAR_2_CFG_DIV__M)
698 state->m_HICfgTimingDiv = SIO_HI_RA_RAM_PAR_2_CFG_DIV__M;
699 state->m_HICfgWakeUpKey = (state->demod_address << 1);
700 /* port/bridge/power down ctrl */
701 state->m_HICfgCtrl = SIO_HI_RA_RAM_PAR_5_CFG_SLV0_SLAVE;
702
703 state->m_bPowerDown = (ulPowerDown != 0);
704
705 state->m_DRXK_A1_PATCH_CODE = false;
706 state->m_DRXK_A1_ROM_CODE = false;
707 state->m_DRXK_A2_ROM_CODE = false;
708 state->m_DRXK_A3_ROM_CODE = false;
709 state->m_DRXK_A2_PATCH_CODE = false;
710 state->m_DRXK_A3_PATCH_CODE = false;
711
712 /* Init AGC and PGA parameters */
713 /* VSB IF */
714 state->m_vsbIfAgcCfg.ctrlMode = (ulVSBIfAgcMode);
715 state->m_vsbIfAgcCfg.outputLevel = (ulVSBIfAgcOutputLevel);
716 state->m_vsbIfAgcCfg.minOutputLevel = (ulVSBIfAgcMinLevel);
717 state->m_vsbIfAgcCfg.maxOutputLevel = (ulVSBIfAgcMaxLevel);
718 state->m_vsbIfAgcCfg.speed = (ulVSBIfAgcSpeed);
719 state->m_vsbPgaCfg = 140;
720
721 /* VSB RF */
722 state->m_vsbRfAgcCfg.ctrlMode = (ulVSBRfAgcMode);
723 state->m_vsbRfAgcCfg.outputLevel = (ulVSBRfAgcOutputLevel);
724 state->m_vsbRfAgcCfg.minOutputLevel = (ulVSBRfAgcMinLevel);
725 state->m_vsbRfAgcCfg.maxOutputLevel = (ulVSBRfAgcMaxLevel);
726 state->m_vsbRfAgcCfg.speed = (ulVSBRfAgcSpeed);
727 state->m_vsbRfAgcCfg.top = (ulVSBRfAgcTop);
728 state->m_vsbRfAgcCfg.cutOffCurrent = (ulVSBRfAgcCutOffCurrent);
729 state->m_vsbPreSawCfg.reference = 0x07;
730 state->m_vsbPreSawCfg.usePreSaw = true;
731
732 state->m_Quality83percent = DEFAULT_MER_83;
733 state->m_Quality93percent = DEFAULT_MER_93;
734 if (ulQual93 <= 500 && ulQual83 < ulQual93) {
735 state->m_Quality83percent = ulQual83;
736 state->m_Quality93percent = ulQual93;
737 }
738
739 /* ATV IF */
740 state->m_atvIfAgcCfg.ctrlMode = (ulATVIfAgcMode);
741 state->m_atvIfAgcCfg.outputLevel = (ulATVIfAgcOutputLevel);
742 state->m_atvIfAgcCfg.minOutputLevel = (ulATVIfAgcMinLevel);
743 state->m_atvIfAgcCfg.maxOutputLevel = (ulATVIfAgcMaxLevel);
744 state->m_atvIfAgcCfg.speed = (ulATVIfAgcSpeed);
745
746 /* ATV RF */
747 state->m_atvRfAgcCfg.ctrlMode = (ulATVRfAgcMode);
748 state->m_atvRfAgcCfg.outputLevel = (ulATVRfAgcOutputLevel);
749 state->m_atvRfAgcCfg.minOutputLevel = (ulATVRfAgcMinLevel);
750 state->m_atvRfAgcCfg.maxOutputLevel = (ulATVRfAgcMaxLevel);
751 state->m_atvRfAgcCfg.speed = (ulATVRfAgcSpeed);
752 state->m_atvRfAgcCfg.top = (ulATVRfAgcTop);
753 state->m_atvRfAgcCfg.cutOffCurrent = (ulATVRfAgcCutOffCurrent);
754 state->m_atvPreSawCfg.reference = 0x04;
755 state->m_atvPreSawCfg.usePreSaw = true;
756
757
758 /* DVBT RF */
759 state->m_dvbtRfAgcCfg.ctrlMode = DRXK_AGC_CTRL_OFF;
760 state->m_dvbtRfAgcCfg.outputLevel = 0;
761 state->m_dvbtRfAgcCfg.minOutputLevel = 0;
762 state->m_dvbtRfAgcCfg.maxOutputLevel = 0xFFFF;
763 state->m_dvbtRfAgcCfg.top = 0x2100;
764 state->m_dvbtRfAgcCfg.cutOffCurrent = 4000;
765 state->m_dvbtRfAgcCfg.speed = 1;
766
767
768 /* DVBT IF */
769 state->m_dvbtIfAgcCfg.ctrlMode = DRXK_AGC_CTRL_AUTO;
770 state->m_dvbtIfAgcCfg.outputLevel = 0;
771 state->m_dvbtIfAgcCfg.minOutputLevel = 0;
772 state->m_dvbtIfAgcCfg.maxOutputLevel = 9000;
773 state->m_dvbtIfAgcCfg.top = 13424;
774 state->m_dvbtIfAgcCfg.cutOffCurrent = 0;
775 state->m_dvbtIfAgcCfg.speed = 3;
776 state->m_dvbtIfAgcCfg.FastClipCtrlDelay = 30;
777 state->m_dvbtIfAgcCfg.IngainTgtMax = 30000;
778 /* state->m_dvbtPgaCfg = 140; */
779
780 state->m_dvbtPreSawCfg.reference = 4;
781 state->m_dvbtPreSawCfg.usePreSaw = false;
782
783 /* QAM RF */
784 state->m_qamRfAgcCfg.ctrlMode = DRXK_AGC_CTRL_OFF;
785 state->m_qamRfAgcCfg.outputLevel = 0;
786 state->m_qamRfAgcCfg.minOutputLevel = 6023;
787 state->m_qamRfAgcCfg.maxOutputLevel = 27000;
788 state->m_qamRfAgcCfg.top = 0x2380;
789 state->m_qamRfAgcCfg.cutOffCurrent = 4000;
790 state->m_qamRfAgcCfg.speed = 3;
791
792 /* QAM IF */
793 state->m_qamIfAgcCfg.ctrlMode = DRXK_AGC_CTRL_AUTO;
794 state->m_qamIfAgcCfg.outputLevel = 0;
795 state->m_qamIfAgcCfg.minOutputLevel = 0;
796 state->m_qamIfAgcCfg.maxOutputLevel = 9000;
797 state->m_qamIfAgcCfg.top = 0x0511;
798 state->m_qamIfAgcCfg.cutOffCurrent = 0;
799 state->m_qamIfAgcCfg.speed = 3;
800 state->m_qamIfAgcCfg.IngainTgtMax = 5119;
801 state->m_qamIfAgcCfg.FastClipCtrlDelay = 50;
802
803 state->m_qamPgaCfg = 140;
804 state->m_qamPreSawCfg.reference = 4;
805 state->m_qamPreSawCfg.usePreSaw = false;
806
807 state->m_OperationMode = OM_NONE;
808 state->m_DrxkState = DRXK_UNINITIALIZED;
809
810 /* MPEG output configuration */
811 state->m_enableMPEGOutput = true; /* If TRUE; enable MPEG ouput */
812 state->m_insertRSByte = false; /* If TRUE; insert RS byte */
813 state->m_enableParallel = true; /* If TRUE;
814 parallel out otherwise serial */
815 state->m_invertDATA = false; /* If TRUE; invert DATA signals */
816 state->m_invertERR = false; /* If TRUE; invert ERR signal */
817 state->m_invertSTR = false; /* If TRUE; invert STR signals */
818 state->m_invertVAL = false; /* If TRUE; invert VAL signals */
819 state->m_invertCLK = (ulInvertTSClock != 0); /* If TRUE; invert CLK signals */
820 state->m_DVBTStaticCLK = (ulDVBTStaticTSClock != 0);
821 state->m_DVBCStaticCLK = (ulDVBCStaticTSClock != 0);
822 /* If TRUE; static MPEG clockrate will be used;
823 otherwise clockrate will adapt to the bitrate of the TS */
824
825 state->m_DVBTBitrate = ulDVBTBitrate;
826 state->m_DVBCBitrate = ulDVBCBitrate;
827
828 state->m_TSDataStrength = (ulTSDataStrength & 0x07);
829 state->m_TSClockkStrength = (ulTSClockkStrength & 0x07);
830
831 /* Maximum bitrate in b/s in case static clockrate is selected */
832 state->m_mpegTsStaticBitrate = 19392658;
833 state->m_disableTEIhandling = false;
834
835 if (ulInsertRSByte)
836 state->m_insertRSByte = true;
837
838 state->m_MpegLockTimeOut = DEFAULT_DRXK_MPEG_LOCK_TIMEOUT;
839 if (ulMpegLockTimeOut < 10000)
840 state->m_MpegLockTimeOut = ulMpegLockTimeOut;
841 state->m_DemodLockTimeOut = DEFAULT_DRXK_DEMOD_LOCK_TIMEOUT;
842 if (ulDemodLockTimeOut < 10000)
843 state->m_DemodLockTimeOut = ulDemodLockTimeOut;
844
845 /* QAM defaults */
846 state->m_Constellation = DRX_CONSTELLATION_AUTO;
847 state->m_qamInterleaveMode = DRXK_QAM_I12_J17;
848 state->m_fecRsPlen = 204 * 8; /* fecRsPlen annex A */
849 state->m_fecRsPrescale = 1;
850
851 state->m_sqiSpeed = DRXK_DVBT_SQI_SPEED_MEDIUM;
852 state->m_agcFastClipCtrlDelay = 0;
853
854 state->m_GPIOCfg = (ulGPIOCfg);
855
856 state->m_bPowerDown = false;
857 state->m_currentPowerMode = DRX_POWER_DOWN;
858
859 state->m_enableParallel = (ulSerialMode == 0);
860
861 state->m_rfmirror = (ulRfMirror == 0);
862 state->m_IfAgcPol = false;
863 return 0;
864}
865
866static int DRXX_Open(struct drxk_state *state)
867{
868 int status = 0;
869 u32 jtag = 0;
870 u16 bid = 0;
871 u16 key = 0;
872
873 dprintk(1, "\n");
874 /* stop lock indicator process */
875 status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE);
876 if (status < 0)
877 goto error;
878 /* Check device id */
879 status = read16(state, SIO_TOP_COMM_KEY__A, &key);
880 if (status < 0)
881 goto error;
882 status = write16(state, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY);
883 if (status < 0)
884 goto error;
885 status = read32(state, SIO_TOP_JTAGID_LO__A, &jtag);
886 if (status < 0)
887 goto error;
888 status = read16(state, SIO_PDR_UIO_IN_HI__A, &bid);
889 if (status < 0)
890 goto error;
891 status = write16(state, SIO_TOP_COMM_KEY__A, key);
892error:
893 if (status < 0)
894 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
895 return status;
896}
897
898static int GetDeviceCapabilities(struct drxk_state *state)
899{
900 u16 sioPdrOhwCfg = 0;
901 u32 sioTopJtagidLo = 0;
902 int status;
903 const char *spin = "";
904
905 dprintk(1, "\n");
906
907 /* driver 0.9.0 */
908 /* stop lock indicator process */
909 status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE);
910 if (status < 0)
911 goto error;
912 status = write16(state, SIO_TOP_COMM_KEY__A, 0xFABA);
913 if (status < 0)
914 goto error;
915 status = read16(state, SIO_PDR_OHW_CFG__A, &sioPdrOhwCfg);
916 if (status < 0)
917 goto error;
918 status = write16(state, SIO_TOP_COMM_KEY__A, 0x0000);
919 if (status < 0)
920 goto error;
921
922 switch ((sioPdrOhwCfg & SIO_PDR_OHW_CFG_FREF_SEL__M)) {
923 case 0:
924 /* ignore (bypass ?) */
925 break;
926 case 1:
927 /* 27 MHz */
928 state->m_oscClockFreq = 27000;
929 break;
930 case 2:
931 /* 20.25 MHz */
932 state->m_oscClockFreq = 20250;
933 break;
934 case 3:
935 /* 4 MHz */
936 state->m_oscClockFreq = 20250;
937 break;
938 default:
939 printk(KERN_ERR "drxk: Clock Frequency is unkonwn\n");
940 return -EINVAL;
941 }
942 /*
943 Determine device capabilities
944 Based on pinning v14
945 */
946 status = read32(state, SIO_TOP_JTAGID_LO__A, &sioTopJtagidLo);
947 if (status < 0)
948 goto error;
949 /* driver 0.9.0 */
950 switch ((sioTopJtagidLo >> 29) & 0xF) {
951 case 0:
952 state->m_deviceSpin = DRXK_SPIN_A1;
953 spin = "A1";
954 break;
955 case 2:
956 state->m_deviceSpin = DRXK_SPIN_A2;
957 spin = "A2";
958 break;
959 case 3:
960 state->m_deviceSpin = DRXK_SPIN_A3;
961 spin = "A3";
962 break;
963 default:
964 state->m_deviceSpin = DRXK_SPIN_UNKNOWN;
965 status = -EINVAL;
966 printk(KERN_ERR "drxk: Spin unknown\n");
967 goto error2;
968 }
969 switch ((sioTopJtagidLo >> 12) & 0xFF) {
970 case 0x13:
971 /* typeId = DRX3913K_TYPE_ID */
972 state->m_hasLNA = false;
973 state->m_hasOOB = false;
974 state->m_hasATV = false;
975 state->m_hasAudio = false;
976 state->m_hasDVBT = true;
977 state->m_hasDVBC = true;
978 state->m_hasSAWSW = true;
979 state->m_hasGPIO2 = false;
980 state->m_hasGPIO1 = false;
981 state->m_hasIRQN = false;
982 break;
983 case 0x15:
984 /* typeId = DRX3915K_TYPE_ID */
985 state->m_hasLNA = false;
986 state->m_hasOOB = false;
987 state->m_hasATV = true;
988 state->m_hasAudio = false;
989 state->m_hasDVBT = true;
990 state->m_hasDVBC = false;
991 state->m_hasSAWSW = true;
992 state->m_hasGPIO2 = true;
993 state->m_hasGPIO1 = true;
994 state->m_hasIRQN = false;
995 break;
996 case 0x16:
997 /* typeId = DRX3916K_TYPE_ID */
998 state->m_hasLNA = false;
999 state->m_hasOOB = false;
1000 state->m_hasATV = true;
1001 state->m_hasAudio = false;
1002 state->m_hasDVBT = true;
1003 state->m_hasDVBC = false;
1004 state->m_hasSAWSW = true;
1005 state->m_hasGPIO2 = true;
1006 state->m_hasGPIO1 = true;
1007 state->m_hasIRQN = false;
1008 break;
1009 case 0x18:
1010 /* typeId = DRX3918K_TYPE_ID */
1011 state->m_hasLNA = false;
1012 state->m_hasOOB = false;
1013 state->m_hasATV = true;
1014 state->m_hasAudio = true;
1015 state->m_hasDVBT = true;
1016 state->m_hasDVBC = false;
1017 state->m_hasSAWSW = true;
1018 state->m_hasGPIO2 = true;
1019 state->m_hasGPIO1 = true;
1020 state->m_hasIRQN = false;
1021 break;
1022 case 0x21:
1023 /* typeId = DRX3921K_TYPE_ID */
1024 state->m_hasLNA = false;
1025 state->m_hasOOB = false;
1026 state->m_hasATV = true;
1027 state->m_hasAudio = true;
1028 state->m_hasDVBT = true;
1029 state->m_hasDVBC = true;
1030 state->m_hasSAWSW = true;
1031 state->m_hasGPIO2 = true;
1032 state->m_hasGPIO1 = true;
1033 state->m_hasIRQN = false;
1034 break;
1035 case 0x23:
1036 /* typeId = DRX3923K_TYPE_ID */
1037 state->m_hasLNA = false;
1038 state->m_hasOOB = false;
1039 state->m_hasATV = true;
1040 state->m_hasAudio = true;
1041 state->m_hasDVBT = true;
1042 state->m_hasDVBC = true;
1043 state->m_hasSAWSW = true;
1044 state->m_hasGPIO2 = true;
1045 state->m_hasGPIO1 = true;
1046 state->m_hasIRQN = false;
1047 break;
1048 case 0x25:
1049 /* typeId = DRX3925K_TYPE_ID */
1050 state->m_hasLNA = false;
1051 state->m_hasOOB = false;
1052 state->m_hasATV = true;
1053 state->m_hasAudio = true;
1054 state->m_hasDVBT = true;
1055 state->m_hasDVBC = true;
1056 state->m_hasSAWSW = true;
1057 state->m_hasGPIO2 = true;
1058 state->m_hasGPIO1 = true;
1059 state->m_hasIRQN = false;
1060 break;
1061 case 0x26:
1062 /* typeId = DRX3926K_TYPE_ID */
1063 state->m_hasLNA = false;
1064 state->m_hasOOB = false;
1065 state->m_hasATV = true;
1066 state->m_hasAudio = false;
1067 state->m_hasDVBT = true;
1068 state->m_hasDVBC = true;
1069 state->m_hasSAWSW = true;
1070 state->m_hasGPIO2 = true;
1071 state->m_hasGPIO1 = true;
1072 state->m_hasIRQN = false;
1073 break;
1074 default:
1075 printk(KERN_ERR "drxk: DeviceID 0x%02x not supported\n",
1076 ((sioTopJtagidLo >> 12) & 0xFF));
1077 status = -EINVAL;
1078 goto error2;
1079 }
1080
1081 printk(KERN_INFO
1082 "drxk: detected a drx-39%02xk, spin %s, xtal %d.%03d MHz\n",
1083 ((sioTopJtagidLo >> 12) & 0xFF), spin,
1084 state->m_oscClockFreq / 1000,
1085 state->m_oscClockFreq % 1000);
1086
1087error:
1088 if (status < 0)
1089 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
1090
1091error2:
1092 return status;
1093}
1094
1095static int HI_Command(struct drxk_state *state, u16 cmd, u16 *pResult)
1096{
1097 int status;
1098 bool powerdown_cmd;
1099
1100 dprintk(1, "\n");
1101
1102 /* Write command */
1103 status = write16(state, SIO_HI_RA_RAM_CMD__A, cmd);
1104 if (status < 0)
1105 goto error;
1106 if (cmd == SIO_HI_RA_RAM_CMD_RESET)
1107 msleep(1);
1108
1109 powerdown_cmd =
1110 (bool) ((cmd == SIO_HI_RA_RAM_CMD_CONFIG) &&
1111 ((state->m_HICfgCtrl) &
1112 SIO_HI_RA_RAM_PAR_5_CFG_SLEEP__M) ==
1113 SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ);
1114 if (powerdown_cmd == false) {
1115 /* Wait until command rdy */
1116 u32 retryCount = 0;
1117 u16 waitCmd;
1118
1119 do {
1120 msleep(1);
1121 retryCount += 1;
1122 status = read16(state, SIO_HI_RA_RAM_CMD__A,
1123 &waitCmd);
1124 } while ((status < 0) && (retryCount < DRXK_MAX_RETRIES)
1125 && (waitCmd != 0));
1126 if (status < 0)
1127 goto error;
1128 status = read16(state, SIO_HI_RA_RAM_RES__A, pResult);
1129 }
1130error:
1131 if (status < 0)
1132 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
1133
1134 return status;
1135}
1136
1137static int HI_CfgCommand(struct drxk_state *state)
1138{
1139 int status;
1140
1141 dprintk(1, "\n");
1142
1143 mutex_lock(&state->mutex);
1144
1145 status = write16(state, SIO_HI_RA_RAM_PAR_6__A, state->m_HICfgTimeout);
1146 if (status < 0)
1147 goto error;
1148 status = write16(state, SIO_HI_RA_RAM_PAR_5__A, state->m_HICfgCtrl);
1149 if (status < 0)
1150 goto error;
1151 status = write16(state, SIO_HI_RA_RAM_PAR_4__A, state->m_HICfgWakeUpKey);
1152 if (status < 0)
1153 goto error;
1154 status = write16(state, SIO_HI_RA_RAM_PAR_3__A, state->m_HICfgBridgeDelay);
1155 if (status < 0)
1156 goto error;
1157 status = write16(state, SIO_HI_RA_RAM_PAR_2__A, state->m_HICfgTimingDiv);
1158 if (status < 0)
1159 goto error;
1160 status = write16(state, SIO_HI_RA_RAM_PAR_1__A, SIO_HI_RA_RAM_PAR_1_PAR1_SEC_KEY);
1161 if (status < 0)
1162 goto error;
1163 status = HI_Command(state, SIO_HI_RA_RAM_CMD_CONFIG, 0);
1164 if (status < 0)
1165 goto error;
1166
1167 state->m_HICfgCtrl &= ~SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ;
1168error:
1169 mutex_unlock(&state->mutex);
1170 if (status < 0)
1171 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
1172 return status;
1173}
1174
1175static int InitHI(struct drxk_state *state)
1176{
1177 dprintk(1, "\n");
1178
1179 state->m_HICfgWakeUpKey = (state->demod_address << 1);
1180 state->m_HICfgTimeout = 0x96FF;
1181 /* port/bridge/power down ctrl */
1182 state->m_HICfgCtrl = SIO_HI_RA_RAM_PAR_5_CFG_SLV0_SLAVE;
1183
1184 return HI_CfgCommand(state);
1185}
1186
1187static int MPEGTSConfigurePins(struct drxk_state *state, bool mpegEnable)
1188{
1189 int status = -1;
1190 u16 sioPdrMclkCfg = 0;
1191 u16 sioPdrMdxCfg = 0;
1192
1193 dprintk(1, "\n");
1194
1195 /* stop lock indicator process */
1196 status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE);
1197 if (status < 0)
1198 goto error;
1199
1200 /* MPEG TS pad configuration */
1201 status = write16(state, SIO_TOP_COMM_KEY__A, 0xFABA);
1202 if (status < 0)
1203 goto error;
1204
1205 if (mpegEnable == false) {
1206 /* Set MPEG TS pads to inputmode */
1207 status = write16(state, SIO_PDR_MSTRT_CFG__A, 0x0000);
1208 if (status < 0)
1209 goto error;
1210 status = write16(state, SIO_PDR_MERR_CFG__A, 0x0000);
1211 if (status < 0)
1212 goto error;
1213 status = write16(state, SIO_PDR_MCLK_CFG__A, 0x0000);
1214 if (status < 0)
1215 goto error;
1216 status = write16(state, SIO_PDR_MVAL_CFG__A, 0x0000);
1217 if (status < 0)
1218 goto error;
1219 status = write16(state, SIO_PDR_MD0_CFG__A, 0x0000);
1220 if (status < 0)
1221 goto error;
1222 status = write16(state, SIO_PDR_MD1_CFG__A, 0x0000);
1223 if (status < 0)
1224 goto error;
1225 status = write16(state, SIO_PDR_MD2_CFG__A, 0x0000);
1226 if (status < 0)
1227 goto error;
1228 status = write16(state, SIO_PDR_MD3_CFG__A, 0x0000);
1229 if (status < 0)
1230 goto error;
1231 status = write16(state, SIO_PDR_MD4_CFG__A, 0x0000);
1232 if (status < 0)
1233 goto error;
1234 status = write16(state, SIO_PDR_MD5_CFG__A, 0x0000);
1235 if (status < 0)
1236 goto error;
1237 status = write16(state, SIO_PDR_MD6_CFG__A, 0x0000);
1238 if (status < 0)
1239 goto error;
1240 status = write16(state, SIO_PDR_MD7_CFG__A, 0x0000);
1241 if (status < 0)
1242 goto error;
1243 } else {
1244 /* Enable MPEG output */
1245 sioPdrMdxCfg =
1246 ((state->m_TSDataStrength <<
1247 SIO_PDR_MD0_CFG_DRIVE__B) | 0x0003);
1248 sioPdrMclkCfg = ((state->m_TSClockkStrength <<
1249 SIO_PDR_MCLK_CFG_DRIVE__B) |
1250 0x0003);
1251
1252 status = write16(state, SIO_PDR_MSTRT_CFG__A, sioPdrMdxCfg);
1253 if (status < 0)
1254 goto error;
1255 status = write16(state, SIO_PDR_MERR_CFG__A, 0x0000); /* Disable */
1256 if (status < 0)
1257 goto error;
1258 status = write16(state, SIO_PDR_MVAL_CFG__A, 0x0000); /* Disable */
1259 if (status < 0)
1260 goto error;
1261 if (state->m_enableParallel == true) {
1262 /* paralel -> enable MD1 to MD7 */
1263 status = write16(state, SIO_PDR_MD1_CFG__A, sioPdrMdxCfg);
1264 if (status < 0)
1265 goto error;
1266 status = write16(state, SIO_PDR_MD2_CFG__A, sioPdrMdxCfg);
1267 if (status < 0)
1268 goto error;
1269 status = write16(state, SIO_PDR_MD3_CFG__A, sioPdrMdxCfg);
1270 if (status < 0)
1271 goto error;
1272 status = write16(state, SIO_PDR_MD4_CFG__A, sioPdrMdxCfg);
1273 if (status < 0)
1274 goto error;
1275 status = write16(state, SIO_PDR_MD5_CFG__A, sioPdrMdxCfg);
1276 if (status < 0)
1277 goto error;
1278 status = write16(state, SIO_PDR_MD6_CFG__A, sioPdrMdxCfg);
1279 if (status < 0)
1280 goto error;
1281 status = write16(state, SIO_PDR_MD7_CFG__A, sioPdrMdxCfg);
1282 if (status < 0)
1283 goto error;
1284 } else {
1285 sioPdrMdxCfg = ((state->m_TSDataStrength <<
1286 SIO_PDR_MD0_CFG_DRIVE__B)
1287 | 0x0003);
1288 /* serial -> disable MD1 to MD7 */
1289 status = write16(state, SIO_PDR_MD1_CFG__A, 0x0000);
1290 if (status < 0)
1291 goto error;
1292 status = write16(state, SIO_PDR_MD2_CFG__A, 0x0000);
1293 if (status < 0)
1294 goto error;
1295 status = write16(state, SIO_PDR_MD3_CFG__A, 0x0000);
1296 if (status < 0)
1297 goto error;
1298 status = write16(state, SIO_PDR_MD4_CFG__A, 0x0000);
1299 if (status < 0)
1300 goto error;
1301 status = write16(state, SIO_PDR_MD5_CFG__A, 0x0000);
1302 if (status < 0)
1303 goto error;
1304 status = write16(state, SIO_PDR_MD6_CFG__A, 0x0000);
1305 if (status < 0)
1306 goto error;
1307 status = write16(state, SIO_PDR_MD7_CFG__A, 0x0000);
1308 if (status < 0)
1309 goto error;
1310 }
1311 status = write16(state, SIO_PDR_MCLK_CFG__A, sioPdrMclkCfg);
1312 if (status < 0)
1313 goto error;
1314 status = write16(state, SIO_PDR_MD0_CFG__A, sioPdrMdxCfg);
1315 if (status < 0)
1316 goto error;
1317 }
1318 /* Enable MB output over MPEG pads and ctl input */
1319 status = write16(state, SIO_PDR_MON_CFG__A, 0x0000);
1320 if (status < 0)
1321 goto error;
1322 /* Write nomagic word to enable pdr reg write */
1323 status = write16(state, SIO_TOP_COMM_KEY__A, 0x0000);
1324error:
1325 if (status < 0)
1326 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
1327 return status;
1328}
1329
1330static int MPEGTSDisable(struct drxk_state *state)
1331{
1332 dprintk(1, "\n");
1333
1334 return MPEGTSConfigurePins(state, false);
1335}
1336
1337static int BLChainCmd(struct drxk_state *state,
1338 u16 romOffset, u16 nrOfElements, u32 timeOut)
1339{
1340 u16 blStatus = 0;
1341 int status;
1342 unsigned long end;
1343
1344 dprintk(1, "\n");
1345 mutex_lock(&state->mutex);
1346 status = write16(state, SIO_BL_MODE__A, SIO_BL_MODE_CHAIN);
1347 if (status < 0)
1348 goto error;
1349 status = write16(state, SIO_BL_CHAIN_ADDR__A, romOffset);
1350 if (status < 0)
1351 goto error;
1352 status = write16(state, SIO_BL_CHAIN_LEN__A, nrOfElements);
1353 if (status < 0)
1354 goto error;
1355 status = write16(state, SIO_BL_ENABLE__A, SIO_BL_ENABLE_ON);
1356 if (status < 0)
1357 goto error;
1358
1359 end = jiffies + msecs_to_jiffies(timeOut);
1360 do {
1361 msleep(1);
1362 status = read16(state, SIO_BL_STATUS__A, &blStatus);
1363 if (status < 0)
1364 goto error;
1365 } while ((blStatus == 0x1) &&
1366 ((time_is_after_jiffies(end))));
1367
1368 if (blStatus == 0x1) {
1369 printk(KERN_ERR "drxk: SIO not ready\n");
1370 status = -EINVAL;
1371 goto error2;
1372 }
1373error:
1374 if (status < 0)
1375 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
1376error2:
1377 mutex_unlock(&state->mutex);
1378 return status;
1379}
1380
1381
1382static int DownloadMicrocode(struct drxk_state *state,
1383 const u8 pMCImage[], u32 Length)
1384{
1385 const u8 *pSrc = pMCImage;
1386 u16 Flags;
1387 u16 Drain;
1388 u32 Address;
1389 u16 nBlocks;
1390 u16 BlockSize;
1391 u16 BlockCRC;
1392 u32 offset = 0;
1393 u32 i;
1394 int status = 0;
1395
1396 dprintk(1, "\n");
1397
1398 /* down the drain (we don care about MAGIC_WORD) */
1399 Drain = (pSrc[0] << 8) | pSrc[1];
1400 pSrc += sizeof(u16);
1401 offset += sizeof(u16);
1402 nBlocks = (pSrc[0] << 8) | pSrc[1];
1403 pSrc += sizeof(u16);
1404 offset += sizeof(u16);
1405
1406 for (i = 0; i < nBlocks; i += 1) {
1407 Address = (pSrc[0] << 24) | (pSrc[1] << 16) |
1408 (pSrc[2] << 8) | pSrc[3];
1409 pSrc += sizeof(u32);
1410 offset += sizeof(u32);
1411
1412 BlockSize = ((pSrc[0] << 8) | pSrc[1]) * sizeof(u16);
1413 pSrc += sizeof(u16);
1414 offset += sizeof(u16);
1415
1416 Flags = (pSrc[0] << 8) | pSrc[1];
1417 pSrc += sizeof(u16);
1418 offset += sizeof(u16);
1419
1420 BlockCRC = (pSrc[0] << 8) | pSrc[1];
1421 pSrc += sizeof(u16);
1422 offset += sizeof(u16);
1423
1424 if (offset + BlockSize > Length) {
1425 printk(KERN_ERR "drxk: Firmware is corrupted.\n");
1426 return -EINVAL;
1427 }
1428
1429 status = write_block(state, Address, BlockSize, pSrc);
1430 if (status < 0) {
1431 printk(KERN_ERR "drxk: Error %d while loading firmware\n", status);
1432 break;
1433 }
1434 pSrc += BlockSize;
1435 offset += BlockSize;
1436 }
1437 return status;
1438}
1439
1440static int DVBTEnableOFDMTokenRing(struct drxk_state *state, bool enable)
1441{
1442 int status;
1443 u16 data = 0;
1444 u16 desiredCtrl = SIO_OFDM_SH_OFDM_RING_ENABLE_ON;
1445 u16 desiredStatus = SIO_OFDM_SH_OFDM_RING_STATUS_ENABLED;
1446 unsigned long end;
1447
1448 dprintk(1, "\n");
1449
1450 if (enable == false) {
1451 desiredCtrl = SIO_OFDM_SH_OFDM_RING_ENABLE_OFF;
1452 desiredStatus = SIO_OFDM_SH_OFDM_RING_STATUS_DOWN;
1453 }
1454
1455 status = read16(state, SIO_OFDM_SH_OFDM_RING_STATUS__A, &data);
1456 if (status >= 0 && data == desiredStatus) {
1457 /* tokenring already has correct status */
1458 return status;
1459 }
1460 /* Disable/enable dvbt tokenring bridge */
1461 status = write16(state, SIO_OFDM_SH_OFDM_RING_ENABLE__A, desiredCtrl);
1462
1463 end = jiffies + msecs_to_jiffies(DRXK_OFDM_TR_SHUTDOWN_TIMEOUT);
1464 do {
1465 status = read16(state, SIO_OFDM_SH_OFDM_RING_STATUS__A, &data);
1466 if ((status >= 0 && data == desiredStatus) || time_is_after_jiffies(end))
1467 break;
1468 msleep(1);
1469 } while (1);
1470 if (data != desiredStatus) {
1471 printk(KERN_ERR "drxk: SIO not ready\n");
1472 return -EINVAL;
1473 }
1474 return status;
1475}
1476
1477static int MPEGTSStop(struct drxk_state *state)
1478{
1479 int status = 0;
1480 u16 fecOcSncMode = 0;
1481 u16 fecOcIprMode = 0;
1482
1483 dprintk(1, "\n");
1484
1485 /* Gracefull shutdown (byte boundaries) */
1486 status = read16(state, FEC_OC_SNC_MODE__A, &fecOcSncMode);
1487 if (status < 0)
1488 goto error;
1489 fecOcSncMode |= FEC_OC_SNC_MODE_SHUTDOWN__M;
1490 status = write16(state, FEC_OC_SNC_MODE__A, fecOcSncMode);
1491 if (status < 0)
1492 goto error;
1493
1494 /* Suppress MCLK during absence of data */
1495 status = read16(state, FEC_OC_IPR_MODE__A, &fecOcIprMode);
1496 if (status < 0)
1497 goto error;
1498 fecOcIprMode |= FEC_OC_IPR_MODE_MCLK_DIS_DAT_ABS__M;
1499 status = write16(state, FEC_OC_IPR_MODE__A, fecOcIprMode);
1500
1501error:
1502 if (status < 0)
1503 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
1504
1505 return status;
1506}
1507
1508static int scu_command(struct drxk_state *state,
1509 u16 cmd, u8 parameterLen,
1510 u16 *parameter, u8 resultLen, u16 *result)
1511{
1512#if (SCU_RAM_PARAM_0__A - SCU_RAM_PARAM_15__A) != 15
1513#error DRXK register mapping no longer compatible with this routine!
1514#endif
1515 u16 curCmd = 0;
1516 int status = -EINVAL;
1517 unsigned long end;
1518 u8 buffer[34];
1519 int cnt = 0, ii;
1520 const char *p;
1521 char errname[30];
1522
1523 dprintk(1, "\n");
1524
1525 if ((cmd == 0) || ((parameterLen > 0) && (parameter == NULL)) ||
1526 ((resultLen > 0) && (result == NULL)))
1527 goto error;
1528
1529 mutex_lock(&state->mutex);
1530
1531 /* assume that the command register is ready
1532 since it is checked afterwards */
1533 for (ii = parameterLen - 1; ii >= 0; ii -= 1) {
1534 buffer[cnt++] = (parameter[ii] & 0xFF);
1535 buffer[cnt++] = ((parameter[ii] >> 8) & 0xFF);
1536 }
1537 buffer[cnt++] = (cmd & 0xFF);
1538 buffer[cnt++] = ((cmd >> 8) & 0xFF);
1539
1540 write_block(state, SCU_RAM_PARAM_0__A -
1541 (parameterLen - 1), cnt, buffer);
1542 /* Wait until SCU has processed command */
1543 end = jiffies + msecs_to_jiffies(DRXK_MAX_WAITTIME);
1544 do {
1545 msleep(1);
1546 status = read16(state, SCU_RAM_COMMAND__A, &curCmd);
1547 if (status < 0)
1548 goto error;
1549 } while (!(curCmd == DRX_SCU_READY) && (time_is_after_jiffies(end)));
1550 if (curCmd != DRX_SCU_READY) {
1551 printk(KERN_ERR "drxk: SCU not ready\n");
1552 status = -EIO;
1553 goto error2;
1554 }
1555 /* read results */
1556 if ((resultLen > 0) && (result != NULL)) {
1557 s16 err;
1558 int ii;
1559
1560 for (ii = resultLen - 1; ii >= 0; ii -= 1) {
1561 status = read16(state, SCU_RAM_PARAM_0__A - ii, &result[ii]);
1562 if (status < 0)
1563 goto error;
1564 }
1565
1566 /* Check if an error was reported by SCU */
1567 err = (s16)result[0];
1568 if (err >= 0)
1569 goto error;
1570
1571 /* check for the known error codes */
1572 switch (err) {
1573 case SCU_RESULT_UNKCMD:
1574 p = "SCU_RESULT_UNKCMD";
1575 break;
1576 case SCU_RESULT_UNKSTD:
1577 p = "SCU_RESULT_UNKSTD";
1578 break;
1579 case SCU_RESULT_SIZE:
1580 p = "SCU_RESULT_SIZE";
1581 break;
1582 case SCU_RESULT_INVPAR:
1583 p = "SCU_RESULT_INVPAR";
1584 break;
1585 default: /* Other negative values are errors */
1586 sprintf(errname, "ERROR: %d\n", err);
1587 p = errname;
1588 }
1589 printk(KERN_ERR "drxk: %s while sending cmd 0x%04x with params:", p, cmd);
1590 print_hex_dump_bytes("drxk: ", DUMP_PREFIX_NONE, buffer, cnt);
1591 status = -EINVAL;
1592 goto error2;
1593 }
1594
1595error:
1596 if (status < 0)
1597 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
1598error2:
1599 mutex_unlock(&state->mutex);
1600 return status;
1601}
1602
1603static int SetIqmAf(struct drxk_state *state, bool active)
1604{
1605 u16 data = 0;
1606 int status;
1607
1608 dprintk(1, "\n");
1609
1610 /* Configure IQM */
1611 status = read16(state, IQM_AF_STDBY__A, &data);
1612 if (status < 0)
1613 goto error;
1614
1615 if (!active) {
1616 data |= (IQM_AF_STDBY_STDBY_ADC_STANDBY
1617 | IQM_AF_STDBY_STDBY_AMP_STANDBY
1618 | IQM_AF_STDBY_STDBY_PD_STANDBY
1619 | IQM_AF_STDBY_STDBY_TAGC_IF_STANDBY
1620 | IQM_AF_STDBY_STDBY_TAGC_RF_STANDBY);
1621 } else {
1622 data &= ((~IQM_AF_STDBY_STDBY_ADC_STANDBY)
1623 & (~IQM_AF_STDBY_STDBY_AMP_STANDBY)
1624 & (~IQM_AF_STDBY_STDBY_PD_STANDBY)
1625 & (~IQM_AF_STDBY_STDBY_TAGC_IF_STANDBY)
1626 & (~IQM_AF_STDBY_STDBY_TAGC_RF_STANDBY)
1627 );
1628 }
1629 status = write16(state, IQM_AF_STDBY__A, data);
1630
1631error:
1632 if (status < 0)
1633 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
1634 return status;
1635}
1636
1637static int CtrlPowerMode(struct drxk_state *state, enum DRXPowerMode *mode)
1638{
1639 int status = 0;
1640 u16 sioCcPwdMode = 0;
1641
1642 dprintk(1, "\n");
1643
1644 /* Check arguments */
1645 if (mode == NULL)
1646 return -EINVAL;
1647
1648 switch (*mode) {
1649 case DRX_POWER_UP:
1650 sioCcPwdMode = SIO_CC_PWD_MODE_LEVEL_NONE;
1651 break;
1652 case DRXK_POWER_DOWN_OFDM:
1653 sioCcPwdMode = SIO_CC_PWD_MODE_LEVEL_OFDM;
1654 break;
1655 case DRXK_POWER_DOWN_CORE:
1656 sioCcPwdMode = SIO_CC_PWD_MODE_LEVEL_CLOCK;
1657 break;
1658 case DRXK_POWER_DOWN_PLL:
1659 sioCcPwdMode = SIO_CC_PWD_MODE_LEVEL_PLL;
1660 break;
1661 case DRX_POWER_DOWN:
1662 sioCcPwdMode = SIO_CC_PWD_MODE_LEVEL_OSC;
1663 break;
1664 default:
1665 /* Unknow sleep mode */
1666 return -EINVAL;
1667 }
1668
1669 /* If already in requested power mode, do nothing */
1670 if (state->m_currentPowerMode == *mode)
1671 return 0;
1672
1673 /* For next steps make sure to start from DRX_POWER_UP mode */
1674 if (state->m_currentPowerMode != DRX_POWER_UP) {
1675 status = PowerUpDevice(state);
1676 if (status < 0)
1677 goto error;
1678 status = DVBTEnableOFDMTokenRing(state, true);
1679 if (status < 0)
1680 goto error;
1681 }
1682
1683 if (*mode == DRX_POWER_UP) {
1684 /* Restore analog & pin configuartion */
1685 } else {
1686 /* Power down to requested mode */
1687 /* Backup some register settings */
1688 /* Set pins with possible pull-ups connected
1689 to them in input mode */
1690 /* Analog power down */
1691 /* ADC power down */
1692 /* Power down device */
1693 /* stop all comm_exec */
1694 /* Stop and power down previous standard */
1695 switch (state->m_OperationMode) {
1696 case OM_DVBT:
1697 status = MPEGTSStop(state);
1698 if (status < 0)
1699 goto error;
1700 status = PowerDownDVBT(state, false);
1701 if (status < 0)
1702 goto error;
1703 break;
1704 case OM_QAM_ITU_A:
1705 case OM_QAM_ITU_C:
1706 status = MPEGTSStop(state);
1707 if (status < 0)
1708 goto error;
1709 status = PowerDownQAM(state);
1710 if (status < 0)
1711 goto error;
1712 break;
1713 default:
1714 break;
1715 }
1716 status = DVBTEnableOFDMTokenRing(state, false);
1717 if (status < 0)
1718 goto error;
1719 status = write16(state, SIO_CC_PWD_MODE__A, sioCcPwdMode);
1720 if (status < 0)
1721 goto error;
1722 status = write16(state, SIO_CC_UPDATE__A, SIO_CC_UPDATE_KEY);
1723 if (status < 0)
1724 goto error;
1725
1726 if (*mode != DRXK_POWER_DOWN_OFDM) {
1727 state->m_HICfgCtrl |=
1728 SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ;
1729 status = HI_CfgCommand(state);
1730 if (status < 0)
1731 goto error;
1732 }
1733 }
1734 state->m_currentPowerMode = *mode;
1735
1736error:
1737 if (status < 0)
1738 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
1739
1740 return status;
1741}
1742
1743static int PowerDownDVBT(struct drxk_state *state, bool setPowerMode)
1744{
1745 enum DRXPowerMode powerMode = DRXK_POWER_DOWN_OFDM;
1746 u16 cmdResult = 0;
1747 u16 data = 0;
1748 int status;
1749
1750 dprintk(1, "\n");
1751
1752 status = read16(state, SCU_COMM_EXEC__A, &data);
1753 if (status < 0)
1754 goto error;
1755 if (data == SCU_COMM_EXEC_ACTIVE) {
1756 /* Send OFDM stop command */
1757 status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM | SCU_RAM_COMMAND_CMD_DEMOD_STOP, 0, NULL, 1, &cmdResult);
1758 if (status < 0)
1759 goto error;
1760 /* Send OFDM reset command */
1761 status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM | SCU_RAM_COMMAND_CMD_DEMOD_RESET, 0, NULL, 1, &cmdResult);
1762 if (status < 0)
1763 goto error;
1764 }
1765
1766 /* Reset datapath for OFDM, processors first */
1767 status = write16(state, OFDM_SC_COMM_EXEC__A, OFDM_SC_COMM_EXEC_STOP);
1768 if (status < 0)
1769 goto error;
1770 status = write16(state, OFDM_LC_COMM_EXEC__A, OFDM_LC_COMM_EXEC_STOP);
1771 if (status < 0)
1772 goto error;
1773 status = write16(state, IQM_COMM_EXEC__A, IQM_COMM_EXEC_B_STOP);
1774 if (status < 0)
1775 goto error;
1776
1777 /* powerdown AFE */
1778 status = SetIqmAf(state, false);
1779 if (status < 0)
1780 goto error;
1781
1782 /* powerdown to OFDM mode */
1783 if (setPowerMode) {
1784 status = CtrlPowerMode(state, &powerMode);
1785 if (status < 0)
1786 goto error;
1787 }
1788error:
1789 if (status < 0)
1790 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
1791 return status;
1792}
1793
1794static int SetOperationMode(struct drxk_state *state,
1795 enum OperationMode oMode)
1796{
1797 int status = 0;
1798
1799 dprintk(1, "\n");
1800 /*
1801 Stop and power down previous standard
1802 TODO investigate total power down instead of partial
1803 power down depending on "previous" standard.
1804 */
1805
1806 /* disable HW lock indicator */
1807 status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE);
1808 if (status < 0)
1809 goto error;
1810
1811 /* Device is already at the required mode */
1812 if (state->m_OperationMode == oMode)
1813 return 0;
1814
1815 switch (state->m_OperationMode) {
1816 /* OM_NONE was added for start up */
1817 case OM_NONE:
1818 break;
1819 case OM_DVBT:
1820 status = MPEGTSStop(state);
1821 if (status < 0)
1822 goto error;
1823 status = PowerDownDVBT(state, true);
1824 if (status < 0)
1825 goto error;
1826 state->m_OperationMode = OM_NONE;
1827 break;
1828 case OM_QAM_ITU_A: /* fallthrough */
1829 case OM_QAM_ITU_C:
1830 status = MPEGTSStop(state);
1831 if (status < 0)
1832 goto error;
1833 status = PowerDownQAM(state);
1834 if (status < 0)
1835 goto error;
1836 state->m_OperationMode = OM_NONE;
1837 break;
1838 case OM_QAM_ITU_B:
1839 default:
1840 status = -EINVAL;
1841 goto error;
1842 }
1843
1844 /*
1845 Power up new standard
1846 */
1847 switch (oMode) {
1848 case OM_DVBT:
1849 state->m_OperationMode = oMode;
1850 status = SetDVBTStandard(state, oMode);
1851 if (status < 0)
1852 goto error;
1853 break;
1854 case OM_QAM_ITU_A: /* fallthrough */
1855 case OM_QAM_ITU_C:
1856 state->m_OperationMode = oMode;
1857 status = SetQAMStandard(state, oMode);
1858 if (status < 0)
1859 goto error;
1860 break;
1861 case OM_QAM_ITU_B:
1862 default:
1863 status = -EINVAL;
1864 }
1865error:
1866 if (status < 0)
1867 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
1868 return status;
1869}
1870
1871static int Start(struct drxk_state *state, s32 offsetFreq,
1872 s32 IntermediateFrequency)
1873{
1874 int status = -EINVAL;
1875
1876 u16 IFreqkHz;
1877 s32 OffsetkHz = offsetFreq / 1000;
1878
1879 dprintk(1, "\n");
1880 if (state->m_DrxkState != DRXK_STOPPED &&
1881 state->m_DrxkState != DRXK_DTV_STARTED)
1882 goto error;
1883
1884 state->m_bMirrorFreqSpect = (state->param.inversion == INVERSION_ON);
1885
1886 if (IntermediateFrequency < 0) {
1887 state->m_bMirrorFreqSpect = !state->m_bMirrorFreqSpect;
1888 IntermediateFrequency = -IntermediateFrequency;
1889 }
1890
1891 switch (state->m_OperationMode) {
1892 case OM_QAM_ITU_A:
1893 case OM_QAM_ITU_C:
1894 IFreqkHz = (IntermediateFrequency / 1000);
1895 status = SetQAM(state, IFreqkHz, OffsetkHz);
1896 if (status < 0)
1897 goto error;
1898 state->m_DrxkState = DRXK_DTV_STARTED;
1899 break;
1900 case OM_DVBT:
1901 IFreqkHz = (IntermediateFrequency / 1000);
1902 status = MPEGTSStop(state);
1903 if (status < 0)
1904 goto error;
1905 status = SetDVBT(state, IFreqkHz, OffsetkHz);
1906 if (status < 0)
1907 goto error;
1908 status = DVBTStart(state);
1909 if (status < 0)
1910 goto error;
1911 state->m_DrxkState = DRXK_DTV_STARTED;
1912 break;
1913 default:
1914 break;
1915 }
1916error:
1917 if (status < 0)
1918 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
1919 return status;
1920}
1921
1922static int ShutDown(struct drxk_state *state)
1923{
1924 dprintk(1, "\n");
1925
1926 MPEGTSStop(state);
1927 return 0;
1928}
1929
1930static int GetLockStatus(struct drxk_state *state, u32 *pLockStatus,
1931 u32 Time)
1932{
1933 int status = -EINVAL;
1934
1935 dprintk(1, "\n");
1936
1937 if (pLockStatus == NULL)
1938 goto error;
1939
1940 *pLockStatus = NOT_LOCKED;
1941
1942 /* define the SCU command code */
1943 switch (state->m_OperationMode) {
1944 case OM_QAM_ITU_A:
1945 case OM_QAM_ITU_B:
1946 case OM_QAM_ITU_C:
1947 status = GetQAMLockStatus(state, pLockStatus);
1948 break;
1949 case OM_DVBT:
1950 status = GetDVBTLockStatus(state, pLockStatus);
1951 break;
1952 default:
1953 break;
1954 }
1955error:
1956 if (status < 0)
1957 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
1958 return status;
1959}
1960
1961static int MPEGTSStart(struct drxk_state *state)
1962{
1963 int status;
1964
1965 u16 fecOcSncMode = 0;
1966
1967 /* Allow OC to sync again */
1968 status = read16(state, FEC_OC_SNC_MODE__A, &fecOcSncMode);
1969 if (status < 0)
1970 goto error;
1971 fecOcSncMode &= ~FEC_OC_SNC_MODE_SHUTDOWN__M;
1972 status = write16(state, FEC_OC_SNC_MODE__A, fecOcSncMode);
1973 if (status < 0)
1974 goto error;
1975 status = write16(state, FEC_OC_SNC_UNLOCK__A, 1);
1976error:
1977 if (status < 0)
1978 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
1979 return status;
1980}
1981
1982static int MPEGTSDtoInit(struct drxk_state *state)
1983{
1984 int status;
1985
1986 dprintk(1, "\n");
1987
1988 /* Rate integration settings */
1989 status = write16(state, FEC_OC_RCN_CTL_STEP_LO__A, 0x0000);
1990 if (status < 0)
1991 goto error;
1992 status = write16(state, FEC_OC_RCN_CTL_STEP_HI__A, 0x000C);
1993 if (status < 0)
1994 goto error;
1995 status = write16(state, FEC_OC_RCN_GAIN__A, 0x000A);
1996 if (status < 0)
1997 goto error;
1998 status = write16(state, FEC_OC_AVR_PARM_A__A, 0x0008);
1999 if (status < 0)
2000 goto error;
2001 status = write16(state, FEC_OC_AVR_PARM_B__A, 0x0006);
2002 if (status < 0)
2003 goto error;
2004 status = write16(state, FEC_OC_TMD_HI_MARGIN__A, 0x0680);
2005 if (status < 0)
2006 goto error;
2007 status = write16(state, FEC_OC_TMD_LO_MARGIN__A, 0x0080);
2008 if (status < 0)
2009 goto error;
2010 status = write16(state, FEC_OC_TMD_COUNT__A, 0x03F4);
2011 if (status < 0)
2012 goto error;
2013
2014 /* Additional configuration */
2015 status = write16(state, FEC_OC_OCR_INVERT__A, 0);
2016 if (status < 0)
2017 goto error;
2018 status = write16(state, FEC_OC_SNC_LWM__A, 2);
2019 if (status < 0)
2020 goto error;
2021 status = write16(state, FEC_OC_SNC_HWM__A, 12);
2022error:
2023 if (status < 0)
2024 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
2025
2026 return status;
2027}
2028
2029static int MPEGTSDtoSetup(struct drxk_state *state,
2030 enum OperationMode oMode)
2031{
2032 int status;
2033
2034 u16 fecOcRegMode = 0; /* FEC_OC_MODE register value */
2035 u16 fecOcRegIprMode = 0; /* FEC_OC_IPR_MODE register value */
2036 u16 fecOcDtoMode = 0; /* FEC_OC_IPR_INVERT register value */
2037 u16 fecOcFctMode = 0; /* FEC_OC_IPR_INVERT register value */
2038 u16 fecOcDtoPeriod = 2; /* FEC_OC_IPR_INVERT register value */
2039 u16 fecOcDtoBurstLen = 188; /* FEC_OC_IPR_INVERT register value */
2040 u32 fecOcRcnCtlRate = 0; /* FEC_OC_IPR_INVERT register value */
2041 u16 fecOcTmdMode = 0;
2042 u16 fecOcTmdIntUpdRate = 0;
2043 u32 maxBitRate = 0;
2044 bool staticCLK = false;
2045
2046 dprintk(1, "\n");
2047
2048 /* Check insertion of the Reed-Solomon parity bytes */
2049 status = read16(state, FEC_OC_MODE__A, &fecOcRegMode);
2050 if (status < 0)
2051 goto error;
2052 status = read16(state, FEC_OC_IPR_MODE__A, &fecOcRegIprMode);
2053 if (status < 0)
2054 goto error;
2055 fecOcRegMode &= (~FEC_OC_MODE_PARITY__M);
2056 fecOcRegIprMode &= (~FEC_OC_IPR_MODE_MVAL_DIS_PAR__M);
2057 if (state->m_insertRSByte == true) {
2058 /* enable parity symbol forward */
2059 fecOcRegMode |= FEC_OC_MODE_PARITY__M;
2060 /* MVAL disable during parity bytes */
2061 fecOcRegIprMode |= FEC_OC_IPR_MODE_MVAL_DIS_PAR__M;
2062 /* TS burst length to 204 */
2063 fecOcDtoBurstLen = 204;
2064 }
2065
2066 /* Check serial or parrallel output */
2067 fecOcRegIprMode &= (~(FEC_OC_IPR_MODE_SERIAL__M));
2068 if (state->m_enableParallel == false) {
2069 /* MPEG data output is serial -> set ipr_mode[0] */
2070 fecOcRegIprMode |= FEC_OC_IPR_MODE_SERIAL__M;
2071 }
2072
2073 switch (oMode) {
2074 case OM_DVBT:
2075 maxBitRate = state->m_DVBTBitrate;
2076 fecOcTmdMode = 3;
2077 fecOcRcnCtlRate = 0xC00000;
2078 staticCLK = state->m_DVBTStaticCLK;
2079 break;
2080 case OM_QAM_ITU_A: /* fallthrough */
2081 case OM_QAM_ITU_C:
2082 fecOcTmdMode = 0x0004;
2083 fecOcRcnCtlRate = 0xD2B4EE; /* good for >63 Mb/s */
2084 maxBitRate = state->m_DVBCBitrate;
2085 staticCLK = state->m_DVBCStaticCLK;
2086 break;
2087 default:
2088 status = -EINVAL;
2089 } /* switch (standard) */
2090 if (status < 0)
2091 goto error;
2092
2093 /* Configure DTO's */
2094 if (staticCLK) {
2095 u32 bitRate = 0;
2096
2097 /* Rational DTO for MCLK source (static MCLK rate),
2098 Dynamic DTO for optimal grouping
2099 (avoid intra-packet gaps),
2100 DTO offset enable to sync TS burst with MSTRT */
2101 fecOcDtoMode = (FEC_OC_DTO_MODE_DYNAMIC__M |
2102 FEC_OC_DTO_MODE_OFFSET_ENABLE__M);
2103 fecOcFctMode = (FEC_OC_FCT_MODE_RAT_ENA__M |
2104 FEC_OC_FCT_MODE_VIRT_ENA__M);
2105
2106 /* Check user defined bitrate */
2107 bitRate = maxBitRate;
2108 if (bitRate > 75900000UL) { /* max is 75.9 Mb/s */
2109 bitRate = 75900000UL;
2110 }
2111 /* Rational DTO period:
2112 dto_period = (Fsys / bitrate) - 2
2113
2114 Result should be floored,
2115 to make sure >= requested bitrate
2116 */
2117 fecOcDtoPeriod = (u16) (((state->m_sysClockFreq)
2118 * 1000) / bitRate);
2119 if (fecOcDtoPeriod <= 2)
2120 fecOcDtoPeriod = 0;
2121 else
2122 fecOcDtoPeriod -= 2;
2123 fecOcTmdIntUpdRate = 8;
2124 } else {
2125 /* (commonAttr->staticCLK == false) => dynamic mode */
2126 fecOcDtoMode = FEC_OC_DTO_MODE_DYNAMIC__M;
2127 fecOcFctMode = FEC_OC_FCT_MODE__PRE;
2128 fecOcTmdIntUpdRate = 5;
2129 }
2130
2131 /* Write appropriate registers with requested configuration */
2132 status = write16(state, FEC_OC_DTO_BURST_LEN__A, fecOcDtoBurstLen);
2133 if (status < 0)
2134 goto error;
2135 status = write16(state, FEC_OC_DTO_PERIOD__A, fecOcDtoPeriod);
2136 if (status < 0)
2137 goto error;
2138 status = write16(state, FEC_OC_DTO_MODE__A, fecOcDtoMode);
2139 if (status < 0)
2140 goto error;
2141 status = write16(state, FEC_OC_FCT_MODE__A, fecOcFctMode);
2142 if (status < 0)
2143 goto error;
2144 status = write16(state, FEC_OC_MODE__A, fecOcRegMode);
2145 if (status < 0)
2146 goto error;
2147 status = write16(state, FEC_OC_IPR_MODE__A, fecOcRegIprMode);
2148 if (status < 0)
2149 goto error;
2150
2151 /* Rate integration settings */
2152 status = write32(state, FEC_OC_RCN_CTL_RATE_LO__A, fecOcRcnCtlRate);
2153 if (status < 0)
2154 goto error;
2155 status = write16(state, FEC_OC_TMD_INT_UPD_RATE__A, fecOcTmdIntUpdRate);
2156 if (status < 0)
2157 goto error;
2158 status = write16(state, FEC_OC_TMD_MODE__A, fecOcTmdMode);
2159error:
2160 if (status < 0)
2161 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
2162 return status;
2163}
2164
2165static int MPEGTSConfigurePolarity(struct drxk_state *state)
2166{
2167 u16 fecOcRegIprInvert = 0;
2168
2169 /* Data mask for the output data byte */
2170 u16 InvertDataMask =
2171 FEC_OC_IPR_INVERT_MD7__M | FEC_OC_IPR_INVERT_MD6__M |
2172 FEC_OC_IPR_INVERT_MD5__M | FEC_OC_IPR_INVERT_MD4__M |
2173 FEC_OC_IPR_INVERT_MD3__M | FEC_OC_IPR_INVERT_MD2__M |
2174 FEC_OC_IPR_INVERT_MD1__M | FEC_OC_IPR_INVERT_MD0__M;
2175
2176 dprintk(1, "\n");
2177
2178 /* Control selective inversion of output bits */
2179 fecOcRegIprInvert &= (~(InvertDataMask));
2180 if (state->m_invertDATA == true)
2181 fecOcRegIprInvert |= InvertDataMask;
2182 fecOcRegIprInvert &= (~(FEC_OC_IPR_INVERT_MERR__M));
2183 if (state->m_invertERR == true)
2184 fecOcRegIprInvert |= FEC_OC_IPR_INVERT_MERR__M;
2185 fecOcRegIprInvert &= (~(FEC_OC_IPR_INVERT_MSTRT__M));
2186 if (state->m_invertSTR == true)
2187 fecOcRegIprInvert |= FEC_OC_IPR_INVERT_MSTRT__M;
2188 fecOcRegIprInvert &= (~(FEC_OC_IPR_INVERT_MVAL__M));
2189 if (state->m_invertVAL == true)
2190 fecOcRegIprInvert |= FEC_OC_IPR_INVERT_MVAL__M;
2191 fecOcRegIprInvert &= (~(FEC_OC_IPR_INVERT_MCLK__M));
2192 if (state->m_invertCLK == true)
2193 fecOcRegIprInvert |= FEC_OC_IPR_INVERT_MCLK__M;
2194
2195 return write16(state, FEC_OC_IPR_INVERT__A, fecOcRegIprInvert);
2196}
2197
2198#define SCU_RAM_AGC_KI_INV_RF_POL__M 0x4000
2199
2200static int SetAgcRf(struct drxk_state *state,
2201 struct SCfgAgc *pAgcCfg, bool isDTV)
2202{
2203 int status = -EINVAL;
2204 u16 data = 0;
2205 struct SCfgAgc *pIfAgcSettings;
2206
2207 dprintk(1, "\n");
2208
2209 if (pAgcCfg == NULL)
2210 goto error;
2211
2212 switch (pAgcCfg->ctrlMode) {
2213 case DRXK_AGC_CTRL_AUTO:
2214 /* Enable RF AGC DAC */
2215 status = read16(state, IQM_AF_STDBY__A, &data);
2216 if (status < 0)
2217 goto error;
2218 data &= ~IQM_AF_STDBY_STDBY_TAGC_RF_STANDBY;
2219 status = write16(state, IQM_AF_STDBY__A, data);
2220 if (status < 0)
2221 goto error;
2222 status = read16(state, SCU_RAM_AGC_CONFIG__A, &data);
2223 if (status < 0)
2224 goto error;
2225
2226 /* Enable SCU RF AGC loop */
2227 data &= ~SCU_RAM_AGC_CONFIG_DISABLE_RF_AGC__M;
2228
2229 /* Polarity */
2230 if (state->m_RfAgcPol)
2231 data |= SCU_RAM_AGC_CONFIG_INV_RF_POL__M;
2232 else
2233 data &= ~SCU_RAM_AGC_CONFIG_INV_RF_POL__M;
2234 status = write16(state, SCU_RAM_AGC_CONFIG__A, data);
2235 if (status < 0)
2236 goto error;
2237
2238 /* Set speed (using complementary reduction value) */
2239 status = read16(state, SCU_RAM_AGC_KI_RED__A, &data);
2240 if (status < 0)
2241 goto error;
2242
2243 data &= ~SCU_RAM_AGC_KI_RED_RAGC_RED__M;
2244 data |= (~(pAgcCfg->speed <<
2245 SCU_RAM_AGC_KI_RED_RAGC_RED__B)
2246 & SCU_RAM_AGC_KI_RED_RAGC_RED__M);
2247
2248 status = write16(state, SCU_RAM_AGC_KI_RED__A, data);
2249 if (status < 0)
2250 goto error;
2251
2252 if (IsDVBT(state))
2253 pIfAgcSettings = &state->m_dvbtIfAgcCfg;
2254 else if (IsQAM(state))
2255 pIfAgcSettings = &state->m_qamIfAgcCfg;
2256 else
2257 pIfAgcSettings = &state->m_atvIfAgcCfg;
2258 if (pIfAgcSettings == NULL) {
2259 status = -EINVAL;
2260 goto error;
2261 }
2262
2263 /* Set TOP, only if IF-AGC is in AUTO mode */
2264 if (pIfAgcSettings->ctrlMode == DRXK_AGC_CTRL_AUTO)
2265 status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, pAgcCfg->top);
2266 if (status < 0)
2267 goto error;
2268
2269 /* Cut-Off current */
2270 status = write16(state, SCU_RAM_AGC_RF_IACCU_HI_CO__A, pAgcCfg->cutOffCurrent);
2271 if (status < 0)
2272 goto error;
2273
2274 /* Max. output level */
2275 status = write16(state, SCU_RAM_AGC_RF_MAX__A, pAgcCfg->maxOutputLevel);
2276 if (status < 0)
2277 goto error;
2278
2279 break;
2280
2281 case DRXK_AGC_CTRL_USER:
2282 /* Enable RF AGC DAC */
2283 status = read16(state, IQM_AF_STDBY__A, &data);
2284 if (status < 0)
2285 goto error;
2286 data &= ~IQM_AF_STDBY_STDBY_TAGC_RF_STANDBY;
2287 status = write16(state, IQM_AF_STDBY__A, data);
2288 if (status < 0)
2289 goto error;
2290
2291 /* Disable SCU RF AGC loop */
2292 status = read16(state, SCU_RAM_AGC_CONFIG__A, &data);
2293 if (status < 0)
2294 goto error;
2295 data |= SCU_RAM_AGC_CONFIG_DISABLE_RF_AGC__M;
2296 if (state->m_RfAgcPol)
2297 data |= SCU_RAM_AGC_CONFIG_INV_RF_POL__M;
2298 else
2299 data &= ~SCU_RAM_AGC_CONFIG_INV_RF_POL__M;
2300 status = write16(state, SCU_RAM_AGC_CONFIG__A, data);
2301 if (status < 0)
2302 goto error;
2303
2304 /* SCU c.o.c. to 0, enabling full control range */
2305 status = write16(state, SCU_RAM_AGC_RF_IACCU_HI_CO__A, 0);
2306 if (status < 0)
2307 goto error;
2308
2309 /* Write value to output pin */
2310 status = write16(state, SCU_RAM_AGC_RF_IACCU_HI__A, pAgcCfg->outputLevel);
2311 if (status < 0)
2312 goto error;
2313 break;
2314
2315 case DRXK_AGC_CTRL_OFF:
2316 /* Disable RF AGC DAC */
2317 status = read16(state, IQM_AF_STDBY__A, &data);
2318 if (status < 0)
2319 goto error;
2320 data |= IQM_AF_STDBY_STDBY_TAGC_RF_STANDBY;
2321 status = write16(state, IQM_AF_STDBY__A, data);
2322 if (status < 0)
2323 goto error;
2324
2325 /* Disable SCU RF AGC loop */
2326 status = read16(state, SCU_RAM_AGC_CONFIG__A, &data);
2327 if (status < 0)
2328 goto error;
2329 data |= SCU_RAM_AGC_CONFIG_DISABLE_RF_AGC__M;
2330 status = write16(state, SCU_RAM_AGC_CONFIG__A, data);
2331 if (status < 0)
2332 goto error;
2333 break;
2334
2335 default:
2336 status = -EINVAL;
2337
2338 }
2339error:
2340 if (status < 0)
2341 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
2342 return status;
2343}
2344
2345#define SCU_RAM_AGC_KI_INV_IF_POL__M 0x2000
2346
2347static int SetAgcIf(struct drxk_state *state,
2348 struct SCfgAgc *pAgcCfg, bool isDTV)
2349{
2350 u16 data = 0;
2351 int status = 0;
2352 struct SCfgAgc *pRfAgcSettings;
2353
2354 dprintk(1, "\n");
2355
2356 switch (pAgcCfg->ctrlMode) {
2357 case DRXK_AGC_CTRL_AUTO:
2358
2359 /* Enable IF AGC DAC */
2360 status = read16(state, IQM_AF_STDBY__A, &data);
2361 if (status < 0)
2362 goto error;
2363 data &= ~IQM_AF_STDBY_STDBY_TAGC_IF_STANDBY;
2364 status = write16(state, IQM_AF_STDBY__A, data);
2365 if (status < 0)
2366 goto error;
2367
2368 status = read16(state, SCU_RAM_AGC_CONFIG__A, &data);
2369 if (status < 0)
2370 goto error;
2371
2372 /* Enable SCU IF AGC loop */
2373 data &= ~SCU_RAM_AGC_CONFIG_DISABLE_IF_AGC__M;
2374
2375 /* Polarity */
2376 if (state->m_IfAgcPol)
2377 data |= SCU_RAM_AGC_CONFIG_INV_IF_POL__M;
2378 else
2379 data &= ~SCU_RAM_AGC_CONFIG_INV_IF_POL__M;
2380 status = write16(state, SCU_RAM_AGC_CONFIG__A, data);
2381 if (status < 0)
2382 goto error;
2383
2384 /* Set speed (using complementary reduction value) */
2385 status = read16(state, SCU_RAM_AGC_KI_RED__A, &data);
2386 if (status < 0)
2387 goto error;
2388 data &= ~SCU_RAM_AGC_KI_RED_IAGC_RED__M;
2389 data |= (~(pAgcCfg->speed <<
2390 SCU_RAM_AGC_KI_RED_IAGC_RED__B)
2391 & SCU_RAM_AGC_KI_RED_IAGC_RED__M);
2392
2393 status = write16(state, SCU_RAM_AGC_KI_RED__A, data);
2394 if (status < 0)
2395 goto error;
2396
2397 if (IsQAM(state))
2398 pRfAgcSettings = &state->m_qamRfAgcCfg;
2399 else
2400 pRfAgcSettings = &state->m_atvRfAgcCfg;
2401 if (pRfAgcSettings == NULL)
2402 return -1;
2403 /* Restore TOP */
2404 status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, pRfAgcSettings->top);
2405 if (status < 0)
2406 goto error;
2407 break;
2408
2409 case DRXK_AGC_CTRL_USER:
2410
2411 /* Enable IF AGC DAC */
2412 status = read16(state, IQM_AF_STDBY__A, &data);
2413 if (status < 0)
2414 goto error;
2415 data &= ~IQM_AF_STDBY_STDBY_TAGC_IF_STANDBY;
2416 status = write16(state, IQM_AF_STDBY__A, data);
2417 if (status < 0)
2418 goto error;
2419
2420 status = read16(state, SCU_RAM_AGC_CONFIG__A, &data);
2421 if (status < 0)
2422 goto error;
2423
2424 /* Disable SCU IF AGC loop */
2425 data |= SCU_RAM_AGC_CONFIG_DISABLE_IF_AGC__M;
2426
2427 /* Polarity */
2428 if (state->m_IfAgcPol)
2429 data |= SCU_RAM_AGC_CONFIG_INV_IF_POL__M;
2430 else
2431 data &= ~SCU_RAM_AGC_CONFIG_INV_IF_POL__M;
2432 status = write16(state, SCU_RAM_AGC_CONFIG__A, data);
2433 if (status < 0)
2434 goto error;
2435
2436 /* Write value to output pin */
2437 status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, pAgcCfg->outputLevel);
2438 if (status < 0)
2439 goto error;
2440 break;
2441
2442 case DRXK_AGC_CTRL_OFF:
2443
2444 /* Disable If AGC DAC */
2445 status = read16(state, IQM_AF_STDBY__A, &data);
2446 if (status < 0)
2447 goto error;
2448 data |= IQM_AF_STDBY_STDBY_TAGC_IF_STANDBY;
2449 status = write16(state, IQM_AF_STDBY__A, data);
2450 if (status < 0)
2451 goto error;
2452
2453 /* Disable SCU IF AGC loop */
2454 status = read16(state, SCU_RAM_AGC_CONFIG__A, &data);
2455 if (status < 0)
2456 goto error;
2457 data |= SCU_RAM_AGC_CONFIG_DISABLE_IF_AGC__M;
2458 status = write16(state, SCU_RAM_AGC_CONFIG__A, data);
2459 if (status < 0)
2460 goto error;
2461 break;
2462 } /* switch (agcSettingsIf->ctrlMode) */
2463
2464 /* always set the top to support
2465 configurations without if-loop */
2466 status = write16(state, SCU_RAM_AGC_INGAIN_TGT_MIN__A, pAgcCfg->top);
2467error:
2468 if (status < 0)
2469 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
2470 return status;
2471}
2472
2473static int ReadIFAgc(struct drxk_state *state, u32 *pValue)
2474{
2475 u16 agcDacLvl;
2476 int status;
2477 u16 Level = 0;
2478
2479 dprintk(1, "\n");
2480
2481 status = read16(state, IQM_AF_AGC_IF__A, &agcDacLvl);
2482 if (status < 0) {
2483 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
2484 return status;
2485 }
2486
2487 *pValue = 0;
2488
2489 if (agcDacLvl > DRXK_AGC_DAC_OFFSET)
2490 Level = agcDacLvl - DRXK_AGC_DAC_OFFSET;
2491 if (Level < 14000)
2492 *pValue = (14000 - Level) / 4;
2493 else
2494 *pValue = 0;
2495
2496 return status;
2497}
2498
2499static int GetQAMSignalToNoise(struct drxk_state *state,
2500 s32 *pSignalToNoise)
2501{
2502 int status = 0;
2503 u16 qamSlErrPower = 0; /* accum. error between
2504 raw and sliced symbols */
2505 u32 qamSlSigPower = 0; /* used for MER, depends of
2506 QAM constellation */
2507 u32 qamSlMer = 0; /* QAM MER */
2508
2509 dprintk(1, "\n");
2510
2511 /* MER calculation */
2512
2513 /* get the register value needed for MER */
2514 status = read16(state, QAM_SL_ERR_POWER__A, &qamSlErrPower);
2515 if (status < 0) {
2516 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
2517 return -EINVAL;
2518 }
2519
2520 switch (state->param.u.qam.modulation) {
2521 case QAM_16:
2522 qamSlSigPower = DRXK_QAM_SL_SIG_POWER_QAM16 << 2;
2523 break;
2524 case QAM_32:
2525 qamSlSigPower = DRXK_QAM_SL_SIG_POWER_QAM32 << 2;
2526 break;
2527 case QAM_64:
2528 qamSlSigPower = DRXK_QAM_SL_SIG_POWER_QAM64 << 2;
2529 break;
2530 case QAM_128:
2531 qamSlSigPower = DRXK_QAM_SL_SIG_POWER_QAM128 << 2;
2532 break;
2533 default:
2534 case QAM_256:
2535 qamSlSigPower = DRXK_QAM_SL_SIG_POWER_QAM256 << 2;
2536 break;
2537 }
2538
2539 if (qamSlErrPower > 0) {
2540 qamSlMer = Log10Times100(qamSlSigPower) -
2541 Log10Times100((u32) qamSlErrPower);
2542 }
2543 *pSignalToNoise = qamSlMer;
2544
2545 return status;
2546}
2547
2548static int GetDVBTSignalToNoise(struct drxk_state *state,
2549 s32 *pSignalToNoise)
2550{
2551 int status;
2552 u16 regData = 0;
2553 u32 EqRegTdSqrErrI = 0;
2554 u32 EqRegTdSqrErrQ = 0;
2555 u16 EqRegTdSqrErrExp = 0;
2556 u16 EqRegTdTpsPwrOfs = 0;
2557 u16 EqRegTdReqSmbCnt = 0;
2558 u32 tpsCnt = 0;
2559 u32 SqrErrIQ = 0;
2560 u32 a = 0;
2561 u32 b = 0;
2562 u32 c = 0;
2563 u32 iMER = 0;
2564 u16 transmissionParams = 0;
2565
2566 dprintk(1, "\n");
2567
2568 status = read16(state, OFDM_EQ_TOP_TD_TPS_PWR_OFS__A, &EqRegTdTpsPwrOfs);
2569 if (status < 0)
2570 goto error;
2571 status = read16(state, OFDM_EQ_TOP_TD_REQ_SMB_CNT__A, &EqRegTdReqSmbCnt);
2572 if (status < 0)
2573 goto error;
2574 status = read16(state, OFDM_EQ_TOP_TD_SQR_ERR_EXP__A, &EqRegTdSqrErrExp);
2575 if (status < 0)
2576 goto error;
2577 status = read16(state, OFDM_EQ_TOP_TD_SQR_ERR_I__A, &regData);
2578 if (status < 0)
2579 goto error;
2580 /* Extend SQR_ERR_I operational range */
2581 EqRegTdSqrErrI = (u32) regData;
2582 if ((EqRegTdSqrErrExp > 11) &&
2583 (EqRegTdSqrErrI < 0x00000FFFUL)) {
2584 EqRegTdSqrErrI += 0x00010000UL;
2585 }
2586 status = read16(state, OFDM_EQ_TOP_TD_SQR_ERR_Q__A, &regData);
2587 if (status < 0)
2588 goto error;
2589 /* Extend SQR_ERR_Q operational range */
2590 EqRegTdSqrErrQ = (u32) regData;
2591 if ((EqRegTdSqrErrExp > 11) &&
2592 (EqRegTdSqrErrQ < 0x00000FFFUL))
2593 EqRegTdSqrErrQ += 0x00010000UL;
2594
2595 status = read16(state, OFDM_SC_RA_RAM_OP_PARAM__A, &transmissionParams);
2596 if (status < 0)
2597 goto error;
2598
2599 /* Check input data for MER */
2600
2601 /* MER calculation (in 0.1 dB) without math.h */
2602 if ((EqRegTdTpsPwrOfs == 0) || (EqRegTdReqSmbCnt == 0))
2603 iMER = 0;
2604 else if ((EqRegTdSqrErrI + EqRegTdSqrErrQ) == 0) {
2605 /* No error at all, this must be the HW reset value
2606 * Apparently no first measurement yet
2607 * Set MER to 0.0 */
2608 iMER = 0;
2609 } else {
2610 SqrErrIQ = (EqRegTdSqrErrI + EqRegTdSqrErrQ) <<
2611 EqRegTdSqrErrExp;
2612 if ((transmissionParams &
2613 OFDM_SC_RA_RAM_OP_PARAM_MODE__M)
2614 == OFDM_SC_RA_RAM_OP_PARAM_MODE_2K)
2615 tpsCnt = 17;
2616 else
2617 tpsCnt = 68;
2618
2619 /* IMER = 100 * log10 (x)
2620 where x = (EqRegTdTpsPwrOfs^2 *
2621 EqRegTdReqSmbCnt * tpsCnt)/SqrErrIQ
2622
2623 => IMER = a + b -c
2624 where a = 100 * log10 (EqRegTdTpsPwrOfs^2)
2625 b = 100 * log10 (EqRegTdReqSmbCnt * tpsCnt)
2626 c = 100 * log10 (SqrErrIQ)
2627 */
2628
2629 /* log(x) x = 9bits * 9bits->18 bits */
2630 a = Log10Times100(EqRegTdTpsPwrOfs *
2631 EqRegTdTpsPwrOfs);
2632 /* log(x) x = 16bits * 7bits->23 bits */
2633 b = Log10Times100(EqRegTdReqSmbCnt * tpsCnt);
2634 /* log(x) x = (16bits + 16bits) << 15 ->32 bits */
2635 c = Log10Times100(SqrErrIQ);
2636
2637 iMER = a + b;
2638 /* No negative MER, clip to zero */
2639 if (iMER > c)
2640 iMER -= c;
2641 else
2642 iMER = 0;
2643 }
2644 *pSignalToNoise = iMER;
2645
2646error:
2647 if (status < 0)
2648 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
2649 return status;
2650}
2651
2652static int GetSignalToNoise(struct drxk_state *state, s32 *pSignalToNoise)
2653{
2654 dprintk(1, "\n");
2655
2656 *pSignalToNoise = 0;
2657 switch (state->m_OperationMode) {
2658 case OM_DVBT:
2659 return GetDVBTSignalToNoise(state, pSignalToNoise);
2660 case OM_QAM_ITU_A:
2661 case OM_QAM_ITU_C:
2662 return GetQAMSignalToNoise(state, pSignalToNoise);
2663 default:
2664 break;
2665 }
2666 return 0;
2667}
2668
2669#if 0
2670static int GetDVBTQuality(struct drxk_state *state, s32 *pQuality)
2671{
2672 /* SNR Values for quasi errorfree reception rom Nordig 2.2 */
2673 int status = 0;
2674
2675 dprintk(1, "\n");
2676
2677 static s32 QE_SN[] = {
2678 51, /* QPSK 1/2 */
2679 69, /* QPSK 2/3 */
2680 79, /* QPSK 3/4 */
2681 89, /* QPSK 5/6 */
2682 97, /* QPSK 7/8 */
2683 108, /* 16-QAM 1/2 */
2684 131, /* 16-QAM 2/3 */
2685 146, /* 16-QAM 3/4 */
2686 156, /* 16-QAM 5/6 */
2687 160, /* 16-QAM 7/8 */
2688 165, /* 64-QAM 1/2 */
2689 187, /* 64-QAM 2/3 */
2690 202, /* 64-QAM 3/4 */
2691 216, /* 64-QAM 5/6 */
2692 225, /* 64-QAM 7/8 */
2693 };
2694
2695 *pQuality = 0;
2696
2697 do {
2698 s32 SignalToNoise = 0;
2699 u16 Constellation = 0;
2700 u16 CodeRate = 0;
2701 u32 SignalToNoiseRel;
2702 u32 BERQuality;
2703
2704 status = GetDVBTSignalToNoise(state, &SignalToNoise);
2705 if (status < 0)
2706 break;
2707 status = read16(state, OFDM_EQ_TOP_TD_TPS_CONST__A, &Constellation);
2708 if (status < 0)
2709 break;
2710 Constellation &= OFDM_EQ_TOP_TD_TPS_CONST__M;
2711
2712 status = read16(state, OFDM_EQ_TOP_TD_TPS_CODE_HP__A, &CodeRate);
2713 if (status < 0)
2714 break;
2715 CodeRate &= OFDM_EQ_TOP_TD_TPS_CODE_HP__M;
2716
2717 if (Constellation > OFDM_EQ_TOP_TD_TPS_CONST_64QAM ||
2718 CodeRate > OFDM_EQ_TOP_TD_TPS_CODE_LP_7_8)
2719 break;
2720 SignalToNoiseRel = SignalToNoise -
2721 QE_SN[Constellation * 5 + CodeRate];
2722 BERQuality = 100;
2723
2724 if (SignalToNoiseRel < -70)
2725 *pQuality = 0;
2726 else if (SignalToNoiseRel < 30)
2727 *pQuality = ((SignalToNoiseRel + 70) *
2728 BERQuality) / 100;
2729 else
2730 *pQuality = BERQuality;
2731 } while (0);
2732 return 0;
2733};
2734
2735static int GetDVBCQuality(struct drxk_state *state, s32 *pQuality)
2736{
2737 int status = 0;
2738 *pQuality = 0;
2739
2740 dprintk(1, "\n");
2741
2742 do {
2743 u32 SignalToNoise = 0;
2744 u32 BERQuality = 100;
2745 u32 SignalToNoiseRel = 0;
2746
2747 status = GetQAMSignalToNoise(state, &SignalToNoise);
2748 if (status < 0)
2749 break;
2750
2751 switch (state->param.u.qam.modulation) {
2752 case QAM_16:
2753 SignalToNoiseRel = SignalToNoise - 200;
2754 break;
2755 case QAM_32:
2756 SignalToNoiseRel = SignalToNoise - 230;
2757 break; /* Not in NorDig */
2758 case QAM_64:
2759 SignalToNoiseRel = SignalToNoise - 260;
2760 break;
2761 case QAM_128:
2762 SignalToNoiseRel = SignalToNoise - 290;
2763 break;
2764 default:
2765 case QAM_256:
2766 SignalToNoiseRel = SignalToNoise - 320;
2767 break;
2768 }
2769
2770 if (SignalToNoiseRel < -70)
2771 *pQuality = 0;
2772 else if (SignalToNoiseRel < 30)
2773 *pQuality = ((SignalToNoiseRel + 70) *
2774 BERQuality) / 100;
2775 else
2776 *pQuality = BERQuality;
2777 } while (0);
2778
2779 return status;
2780}
2781
2782static int GetQuality(struct drxk_state *state, s32 *pQuality)
2783{
2784 dprintk(1, "\n");
2785
2786 switch (state->m_OperationMode) {
2787 case OM_DVBT:
2788 return GetDVBTQuality(state, pQuality);
2789 case OM_QAM_ITU_A:
2790 return GetDVBCQuality(state, pQuality);
2791 default:
2792 break;
2793 }
2794
2795 return 0;
2796}
2797#endif
2798
2799/* Free data ram in SIO HI */
2800#define SIO_HI_RA_RAM_USR_BEGIN__A 0x420040
2801#define SIO_HI_RA_RAM_USR_END__A 0x420060
2802
2803#define DRXK_HI_ATOMIC_BUF_START (SIO_HI_RA_RAM_USR_BEGIN__A)
2804#define DRXK_HI_ATOMIC_BUF_END (SIO_HI_RA_RAM_USR_BEGIN__A + 7)
2805#define DRXK_HI_ATOMIC_READ SIO_HI_RA_RAM_PAR_3_ACP_RW_READ
2806#define DRXK_HI_ATOMIC_WRITE SIO_HI_RA_RAM_PAR_3_ACP_RW_WRITE
2807
2808#define DRXDAP_FASI_ADDR2BLOCK(addr) (((addr) >> 22) & 0x3F)
2809#define DRXDAP_FASI_ADDR2BANK(addr) (((addr) >> 16) & 0x3F)
2810#define DRXDAP_FASI_ADDR2OFFSET(addr) ((addr) & 0x7FFF)
2811
2812static int ConfigureI2CBridge(struct drxk_state *state, bool bEnableBridge)
2813{
2814 int status = -EINVAL;
2815
2816 dprintk(1, "\n");
2817
2818 if (state->m_DrxkState == DRXK_UNINITIALIZED)
2819 goto error;
2820 if (state->m_DrxkState == DRXK_POWERED_DOWN)
2821 goto error;
2822
2823 if (state->no_i2c_bridge)
2824 return 0;
2825
2826 status = write16(state, SIO_HI_RA_RAM_PAR_1__A, SIO_HI_RA_RAM_PAR_1_PAR1_SEC_KEY);
2827 if (status < 0)
2828 goto error;
2829 if (bEnableBridge) {
2830 status = write16(state, SIO_HI_RA_RAM_PAR_2__A, SIO_HI_RA_RAM_PAR_2_BRD_CFG_CLOSED);
2831 if (status < 0)
2832 goto error;
2833 } else {
2834 status = write16(state, SIO_HI_RA_RAM_PAR_2__A, SIO_HI_RA_RAM_PAR_2_BRD_CFG_OPEN);
2835 if (status < 0)
2836 goto error;
2837 }
2838
2839 status = HI_Command(state, SIO_HI_RA_RAM_CMD_BRDCTRL, 0);
2840
2841error:
2842 if (status < 0)
2843 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
2844 return status;
2845}
2846
2847static int SetPreSaw(struct drxk_state *state,
2848 struct SCfgPreSaw *pPreSawCfg)
2849{
2850 int status = -EINVAL;
2851
2852 dprintk(1, "\n");
2853
2854 if ((pPreSawCfg == NULL)
2855 || (pPreSawCfg->reference > IQM_AF_PDREF__M))
2856 goto error;
2857
2858 status = write16(state, IQM_AF_PDREF__A, pPreSawCfg->reference);
2859error:
2860 if (status < 0)
2861 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
2862 return status;
2863}
2864
2865static int BLDirectCmd(struct drxk_state *state, u32 targetAddr,
2866 u16 romOffset, u16 nrOfElements, u32 timeOut)
2867{
2868 u16 blStatus = 0;
2869 u16 offset = (u16) ((targetAddr >> 0) & 0x00FFFF);
2870 u16 blockbank = (u16) ((targetAddr >> 16) & 0x000FFF);
2871 int status;
2872 unsigned long end;
2873
2874 dprintk(1, "\n");
2875
2876 mutex_lock(&state->mutex);
2877 status = write16(state, SIO_BL_MODE__A, SIO_BL_MODE_DIRECT);
2878 if (status < 0)
2879 goto error;
2880 status = write16(state, SIO_BL_TGT_HDR__A, blockbank);
2881 if (status < 0)
2882 goto error;
2883 status = write16(state, SIO_BL_TGT_ADDR__A, offset);
2884 if (status < 0)
2885 goto error;
2886 status = write16(state, SIO_BL_SRC_ADDR__A, romOffset);
2887 if (status < 0)
2888 goto error;
2889 status = write16(state, SIO_BL_SRC_LEN__A, nrOfElements);
2890 if (status < 0)
2891 goto error;
2892 status = write16(state, SIO_BL_ENABLE__A, SIO_BL_ENABLE_ON);
2893 if (status < 0)
2894 goto error;
2895
2896 end = jiffies + msecs_to_jiffies(timeOut);
2897 do {
2898 status = read16(state, SIO_BL_STATUS__A, &blStatus);
2899 if (status < 0)
2900 goto error;
2901 } while ((blStatus == 0x1) && time_is_after_jiffies(end));
2902 if (blStatus == 0x1) {
2903 printk(KERN_ERR "drxk: SIO not ready\n");
2904 status = -EINVAL;
2905 goto error2;
2906 }
2907error:
2908 if (status < 0)
2909 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
2910error2:
2911 mutex_unlock(&state->mutex);
2912 return status;
2913
2914}
2915
2916static int ADCSyncMeasurement(struct drxk_state *state, u16 *count)
2917{
2918 u16 data = 0;
2919 int status;
2920
2921 dprintk(1, "\n");
2922
2923 /* Start measurement */
2924 status = write16(state, IQM_AF_COMM_EXEC__A, IQM_AF_COMM_EXEC_ACTIVE);
2925 if (status < 0)
2926 goto error;
2927 status = write16(state, IQM_AF_START_LOCK__A, 1);
2928 if (status < 0)
2929 goto error;
2930
2931 *count = 0;
2932 status = read16(state, IQM_AF_PHASE0__A, &data);
2933 if (status < 0)
2934 goto error;
2935 if (data == 127)
2936 *count = *count + 1;
2937 status = read16(state, IQM_AF_PHASE1__A, &data);
2938 if (status < 0)
2939 goto error;
2940 if (data == 127)
2941 *count = *count + 1;
2942 status = read16(state, IQM_AF_PHASE2__A, &data);
2943 if (status < 0)
2944 goto error;
2945 if (data == 127)
2946 *count = *count + 1;
2947
2948error:
2949 if (status < 0)
2950 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
2951 return status;
2952}
2953
2954static int ADCSynchronization(struct drxk_state *state)
2955{
2956 u16 count = 0;
2957 int status;
2958
2959 dprintk(1, "\n");
2960
2961 status = ADCSyncMeasurement(state, &count);
2962 if (status < 0)
2963 goto error;
2964
2965 if (count == 1) {
2966 /* Try sampling on a diffrent edge */
2967 u16 clkNeg = 0;
2968
2969 status = read16(state, IQM_AF_CLKNEG__A, &clkNeg);
2970 if (status < 0)
2971 goto error;
2972 if ((clkNeg | IQM_AF_CLKNEG_CLKNEGDATA__M) ==
2973 IQM_AF_CLKNEG_CLKNEGDATA_CLK_ADC_DATA_POS) {
2974 clkNeg &= (~(IQM_AF_CLKNEG_CLKNEGDATA__M));
2975 clkNeg |=
2976 IQM_AF_CLKNEG_CLKNEGDATA_CLK_ADC_DATA_NEG;
2977 } else {
2978 clkNeg &= (~(IQM_AF_CLKNEG_CLKNEGDATA__M));
2979 clkNeg |=
2980 IQM_AF_CLKNEG_CLKNEGDATA_CLK_ADC_DATA_POS;
2981 }
2982 status = write16(state, IQM_AF_CLKNEG__A, clkNeg);
2983 if (status < 0)
2984 goto error;
2985 status = ADCSyncMeasurement(state, &count);
2986 if (status < 0)
2987 goto error;
2988 }
2989
2990 if (count < 2)
2991 status = -EINVAL;
2992error:
2993 if (status < 0)
2994 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
2995 return status;
2996}
2997
2998static int SetFrequencyShifter(struct drxk_state *state,
2999 u16 intermediateFreqkHz,
3000 s32 tunerFreqOffset, bool isDTV)
3001{
3002 bool selectPosImage = false;
3003 u32 rfFreqResidual = tunerFreqOffset;
3004 u32 fmFrequencyShift = 0;
3005 bool tunerMirror = !state->m_bMirrorFreqSpect;
3006 u32 adcFreq;
3007 bool adcFlip;
3008 int status;
3009 u32 ifFreqActual;
3010 u32 samplingFrequency = (u32) (state->m_sysClockFreq / 3);
3011 u32 frequencyShift;
3012 bool imageToSelect;
3013
3014 dprintk(1, "\n");
3015
3016 /*
3017 Program frequency shifter
3018 No need to account for mirroring on RF
3019 */
3020 if (isDTV) {
3021 if ((state->m_OperationMode == OM_QAM_ITU_A) ||
3022 (state->m_OperationMode == OM_QAM_ITU_C) ||
3023 (state->m_OperationMode == OM_DVBT))
3024 selectPosImage = true;
3025 else
3026 selectPosImage = false;
3027 }
3028 if (tunerMirror)
3029 /* tuner doesn't mirror */
3030 ifFreqActual = intermediateFreqkHz +
3031 rfFreqResidual + fmFrequencyShift;
3032 else
3033 /* tuner mirrors */
3034 ifFreqActual = intermediateFreqkHz -
3035 rfFreqResidual - fmFrequencyShift;
3036 if (ifFreqActual > samplingFrequency / 2) {
3037 /* adc mirrors */
3038 adcFreq = samplingFrequency - ifFreqActual;
3039 adcFlip = true;
3040 } else {
3041 /* adc doesn't mirror */
3042 adcFreq = ifFreqActual;
3043 adcFlip = false;
3044 }
3045
3046 frequencyShift = adcFreq;
3047 imageToSelect = state->m_rfmirror ^ tunerMirror ^
3048 adcFlip ^ selectPosImage;
3049 state->m_IqmFsRateOfs =
3050 Frac28a((frequencyShift), samplingFrequency);
3051
3052 if (imageToSelect)
3053 state->m_IqmFsRateOfs = ~state->m_IqmFsRateOfs + 1;
3054
3055 /* Program frequency shifter with tuner offset compensation */
3056 /* frequencyShift += tunerFreqOffset; TODO */
3057 status = write32(state, IQM_FS_RATE_OFS_LO__A,
3058 state->m_IqmFsRateOfs);
3059 if (status < 0)
3060 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
3061 return status;
3062}
3063
3064static int InitAGC(struct drxk_state *state, bool isDTV)
3065{
3066 u16 ingainTgt = 0;
3067 u16 ingainTgtMin = 0;
3068 u16 ingainTgtMax = 0;
3069 u16 clpCyclen = 0;
3070 u16 clpSumMin = 0;
3071 u16 clpDirTo = 0;
3072 u16 snsSumMin = 0;
3073 u16 snsSumMax = 0;
3074 u16 clpSumMax = 0;
3075 u16 snsDirTo = 0;
3076 u16 kiInnergainMin = 0;
3077 u16 ifIaccuHiTgt = 0;
3078 u16 ifIaccuHiTgtMin = 0;
3079 u16 ifIaccuHiTgtMax = 0;
3080 u16 data = 0;
3081 u16 fastClpCtrlDelay = 0;
3082 u16 clpCtrlMode = 0;
3083 int status = 0;
3084
3085 dprintk(1, "\n");
3086
3087 /* Common settings */
3088 snsSumMax = 1023;
3089 ifIaccuHiTgtMin = 2047;
3090 clpCyclen = 500;
3091 clpSumMax = 1023;
3092
3093 /* AGCInit() not available for DVBT; init done in microcode */
3094 if (!IsQAM(state)) {
3095 printk(KERN_ERR "drxk: %s: mode %d is not DVB-C\n", __func__, state->m_OperationMode);
3096 return -EINVAL;
3097 }
3098
3099 /* FIXME: Analog TV AGC require different settings */
3100
3101 /* Standard specific settings */
3102 clpSumMin = 8;
3103 clpDirTo = (u16) -9;
3104 clpCtrlMode = 0;
3105 snsSumMin = 8;
3106 snsDirTo = (u16) -9;
3107 kiInnergainMin = (u16) -1030;
3108 ifIaccuHiTgtMax = 0x2380;
3109 ifIaccuHiTgt = 0x2380;
3110 ingainTgtMin = 0x0511;
3111 ingainTgt = 0x0511;
3112 ingainTgtMax = 5119;
3113 fastClpCtrlDelay = state->m_qamIfAgcCfg.FastClipCtrlDelay;
3114
3115 status = write16(state, SCU_RAM_AGC_FAST_CLP_CTRL_DELAY__A, fastClpCtrlDelay);
3116 if (status < 0)
3117 goto error;
3118
3119 status = write16(state, SCU_RAM_AGC_CLP_CTRL_MODE__A, clpCtrlMode);
3120 if (status < 0)
3121 goto error;
3122 status = write16(state, SCU_RAM_AGC_INGAIN_TGT__A, ingainTgt);
3123 if (status < 0)
3124 goto error;
3125 status = write16(state, SCU_RAM_AGC_INGAIN_TGT_MIN__A, ingainTgtMin);
3126 if (status < 0)
3127 goto error;
3128 status = write16(state, SCU_RAM_AGC_INGAIN_TGT_MAX__A, ingainTgtMax);
3129 if (status < 0)
3130 goto error;
3131 status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT_MIN__A, ifIaccuHiTgtMin);
3132 if (status < 0)
3133 goto error;
3134 status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, ifIaccuHiTgtMax);
3135 if (status < 0)
3136 goto error;
3137 status = write16(state, SCU_RAM_AGC_IF_IACCU_HI__A, 0);
3138 if (status < 0)
3139 goto error;
3140 status = write16(state, SCU_RAM_AGC_IF_IACCU_LO__A, 0);
3141 if (status < 0)
3142 goto error;
3143 status = write16(state, SCU_RAM_AGC_RF_IACCU_HI__A, 0);
3144 if (status < 0)
3145 goto error;
3146 status = write16(state, SCU_RAM_AGC_RF_IACCU_LO__A, 0);
3147 if (status < 0)
3148 goto error;
3149 status = write16(state, SCU_RAM_AGC_CLP_SUM_MAX__A, clpSumMax);
3150 if (status < 0)
3151 goto error;
3152 status = write16(state, SCU_RAM_AGC_SNS_SUM_MAX__A, snsSumMax);
3153 if (status < 0)
3154 goto error;
3155
3156 status = write16(state, SCU_RAM_AGC_KI_INNERGAIN_MIN__A, kiInnergainMin);
3157 if (status < 0)
3158 goto error;
3159 status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT__A, ifIaccuHiTgt);
3160 if (status < 0)
3161 goto error;
3162 status = write16(state, SCU_RAM_AGC_CLP_CYCLEN__A, clpCyclen);
3163 if (status < 0)
3164 goto error;
3165
3166 status = write16(state, SCU_RAM_AGC_RF_SNS_DEV_MAX__A, 1023);
3167 if (status < 0)
3168 goto error;
3169 status = write16(state, SCU_RAM_AGC_RF_SNS_DEV_MIN__A, (u16) -1023);
3170 if (status < 0)
3171 goto error;
3172 status = write16(state, SCU_RAM_AGC_FAST_SNS_CTRL_DELAY__A, 50);
3173 if (status < 0)
3174 goto error;
3175
3176 status = write16(state, SCU_RAM_AGC_KI_MAXMINGAIN_TH__A, 20);
3177 if (status < 0)
3178 goto error;
3179 status = write16(state, SCU_RAM_AGC_CLP_SUM_MIN__A, clpSumMin);
3180 if (status < 0)
3181 goto error;
3182 status = write16(state, SCU_RAM_AGC_SNS_SUM_MIN__A, snsSumMin);
3183 if (status < 0)
3184 goto error;
3185 status = write16(state, SCU_RAM_AGC_CLP_DIR_TO__A, clpDirTo);
3186 if (status < 0)
3187 goto error;
3188 status = write16(state, SCU_RAM_AGC_SNS_DIR_TO__A, snsDirTo);
3189 if (status < 0)
3190 goto error;
3191 status = write16(state, SCU_RAM_AGC_KI_MINGAIN__A, 0x7fff);
3192 if (status < 0)
3193 goto error;
3194 status = write16(state, SCU_RAM_AGC_KI_MAXGAIN__A, 0x0);
3195 if (status < 0)
3196 goto error;
3197 status = write16(state, SCU_RAM_AGC_KI_MIN__A, 0x0117);
3198 if (status < 0)
3199 goto error;
3200 status = write16(state, SCU_RAM_AGC_KI_MAX__A, 0x0657);
3201 if (status < 0)
3202 goto error;
3203 status = write16(state, SCU_RAM_AGC_CLP_SUM__A, 0);
3204 if (status < 0)
3205 goto error;
3206 status = write16(state, SCU_RAM_AGC_CLP_CYCCNT__A, 0);
3207 if (status < 0)
3208 goto error;
3209 status = write16(state, SCU_RAM_AGC_CLP_DIR_WD__A, 0);
3210 if (status < 0)
3211 goto error;
3212 status = write16(state, SCU_RAM_AGC_CLP_DIR_STP__A, 1);
3213 if (status < 0)
3214 goto error;
3215 status = write16(state, SCU_RAM_AGC_SNS_SUM__A, 0);
3216 if (status < 0)
3217 goto error;
3218 status = write16(state, SCU_RAM_AGC_SNS_CYCCNT__A, 0);
3219 if (status < 0)
3220 goto error;
3221 status = write16(state, SCU_RAM_AGC_SNS_DIR_WD__A, 0);
3222 if (status < 0)
3223 goto error;
3224 status = write16(state, SCU_RAM_AGC_SNS_DIR_STP__A, 1);
3225 if (status < 0)
3226 goto error;
3227 status = write16(state, SCU_RAM_AGC_SNS_CYCLEN__A, 500);
3228 if (status < 0)
3229 goto error;
3230 status = write16(state, SCU_RAM_AGC_KI_CYCLEN__A, 500);
3231 if (status < 0)
3232 goto error;
3233
3234 /* Initialize inner-loop KI gain factors */
3235 status = read16(state, SCU_RAM_AGC_KI__A, &data);
3236 if (status < 0)
3237 goto error;
3238
3239 data = 0x0657;
3240 data &= ~SCU_RAM_AGC_KI_RF__M;
3241 data |= (DRXK_KI_RAGC_QAM << SCU_RAM_AGC_KI_RF__B);
3242 data &= ~SCU_RAM_AGC_KI_IF__M;
3243 data |= (DRXK_KI_IAGC_QAM << SCU_RAM_AGC_KI_IF__B);
3244
3245 status = write16(state, SCU_RAM_AGC_KI__A, data);
3246error:
3247 if (status < 0)
3248 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
3249 return status;
3250}
3251
3252static int DVBTQAMGetAccPktErr(struct drxk_state *state, u16 *packetErr)
3253{
3254 int status;
3255
3256 dprintk(1, "\n");
3257 if (packetErr == NULL)
3258 status = write16(state, SCU_RAM_FEC_ACCUM_PKT_FAILURES__A, 0);
3259 else
3260 status = read16(state, SCU_RAM_FEC_ACCUM_PKT_FAILURES__A, packetErr);
3261 if (status < 0)
3262 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
3263 return status;
3264}
3265
3266static int DVBTScCommand(struct drxk_state *state,
3267 u16 cmd, u16 subcmd,
3268 u16 param0, u16 param1, u16 param2,
3269 u16 param3, u16 param4)
3270{
3271 u16 curCmd = 0;
3272 u16 errCode = 0;
3273 u16 retryCnt = 0;
3274 u16 scExec = 0;
3275 int status;
3276
3277 dprintk(1, "\n");
3278 status = read16(state, OFDM_SC_COMM_EXEC__A, &scExec);
3279 if (scExec != 1) {
3280 /* SC is not running */
3281 status = -EINVAL;
3282 }
3283 if (status < 0)
3284 goto error;
3285
3286 /* Wait until sc is ready to receive command */
3287 retryCnt = 0;
3288 do {
3289 msleep(1);
3290 status = read16(state, OFDM_SC_RA_RAM_CMD__A, &curCmd);
3291 retryCnt++;
3292 } while ((curCmd != 0) && (retryCnt < DRXK_MAX_RETRIES));
3293 if (retryCnt >= DRXK_MAX_RETRIES && (status < 0))
3294 goto error;
3295
3296 /* Write sub-command */
3297 switch (cmd) {
3298 /* All commands using sub-cmd */
3299 case OFDM_SC_RA_RAM_CMD_PROC_START:
3300 case OFDM_SC_RA_RAM_CMD_SET_PREF_PARAM:
3301 case OFDM_SC_RA_RAM_CMD_PROGRAM_PARAM:
3302 status = write16(state, OFDM_SC_RA_RAM_CMD_ADDR__A, subcmd);
3303 if (status < 0)
3304 goto error;
3305 break;
3306 default:
3307 /* Do nothing */
3308 break;
3309 }
3310
3311 /* Write needed parameters and the command */
3312 switch (cmd) {
3313 /* All commands using 5 parameters */
3314 /* All commands using 4 parameters */
3315 /* All commands using 3 parameters */
3316 /* All commands using 2 parameters */
3317 case OFDM_SC_RA_RAM_CMD_PROC_START:
3318 case OFDM_SC_RA_RAM_CMD_SET_PREF_PARAM:
3319 case OFDM_SC_RA_RAM_CMD_PROGRAM_PARAM:
3320 status = write16(state, OFDM_SC_RA_RAM_PARAM1__A, param1);
3321 /* All commands using 1 parameters */
3322 case OFDM_SC_RA_RAM_CMD_SET_ECHO_TIMING:
3323 case OFDM_SC_RA_RAM_CMD_USER_IO:
3324 status = write16(state, OFDM_SC_RA_RAM_PARAM0__A, param0);
3325 /* All commands using 0 parameters */
3326 case OFDM_SC_RA_RAM_CMD_GET_OP_PARAM:
3327 case OFDM_SC_RA_RAM_CMD_NULL:
3328 /* Write command */
3329 status = write16(state, OFDM_SC_RA_RAM_CMD__A, cmd);
3330 break;
3331 default:
3332 /* Unknown command */
3333 status = -EINVAL;
3334 }
3335 if (status < 0)
3336 goto error;
3337
3338 /* Wait until sc is ready processing command */
3339 retryCnt = 0;
3340 do {
3341 msleep(1);
3342 status = read16(state, OFDM_SC_RA_RAM_CMD__A, &curCmd);
3343 retryCnt++;
3344 } while ((curCmd != 0) && (retryCnt < DRXK_MAX_RETRIES));
3345 if (retryCnt >= DRXK_MAX_RETRIES && (status < 0))
3346 goto error;
3347
3348 /* Check for illegal cmd */
3349 status = read16(state, OFDM_SC_RA_RAM_CMD_ADDR__A, &errCode);
3350 if (errCode == 0xFFFF) {
3351 /* illegal command */
3352 status = -EINVAL;
3353 }
3354 if (status < 0)
3355 goto error;
3356
3357 /* Retreive results parameters from SC */
3358 switch (cmd) {
3359 /* All commands yielding 5 results */
3360 /* All commands yielding 4 results */
3361 /* All commands yielding 3 results */
3362 /* All commands yielding 2 results */
3363 /* All commands yielding 1 result */
3364 case OFDM_SC_RA_RAM_CMD_USER_IO:
3365 case OFDM_SC_RA_RAM_CMD_GET_OP_PARAM:
3366 status = read16(state, OFDM_SC_RA_RAM_PARAM0__A, &(param0));
3367 /* All commands yielding 0 results */
3368 case OFDM_SC_RA_RAM_CMD_SET_ECHO_TIMING:
3369 case OFDM_SC_RA_RAM_CMD_SET_TIMER:
3370 case OFDM_SC_RA_RAM_CMD_PROC_START:
3371 case OFDM_SC_RA_RAM_CMD_SET_PREF_PARAM:
3372 case OFDM_SC_RA_RAM_CMD_PROGRAM_PARAM:
3373 case OFDM_SC_RA_RAM_CMD_NULL:
3374 break;
3375 default:
3376 /* Unknown command */
3377 status = -EINVAL;
3378 break;
3379 } /* switch (cmd->cmd) */
3380error:
3381 if (status < 0)
3382 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
3383 return status;
3384}
3385
3386static int PowerUpDVBT(struct drxk_state *state)
3387{
3388 enum DRXPowerMode powerMode = DRX_POWER_UP;
3389 int status;
3390
3391 dprintk(1, "\n");
3392 status = CtrlPowerMode(state, &powerMode);
3393 if (status < 0)
3394 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
3395 return status;
3396}
3397
3398static int DVBTCtrlSetIncEnable(struct drxk_state *state, bool *enabled)
3399{
3400 int status;
3401
3402 dprintk(1, "\n");
3403 if (*enabled == true)
3404 status = write16(state, IQM_CF_BYPASSDET__A, 0);
3405 else
3406 status = write16(state, IQM_CF_BYPASSDET__A, 1);
3407 if (status < 0)
3408 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
3409 return status;
3410}
3411
3412#define DEFAULT_FR_THRES_8K 4000
3413static int DVBTCtrlSetFrEnable(struct drxk_state *state, bool *enabled)
3414{
3415
3416 int status;
3417
3418 dprintk(1, "\n");
3419 if (*enabled == true) {
3420 /* write mask to 1 */
3421 status = write16(state, OFDM_SC_RA_RAM_FR_THRES_8K__A,
3422 DEFAULT_FR_THRES_8K);
3423 } else {
3424 /* write mask to 0 */
3425 status = write16(state, OFDM_SC_RA_RAM_FR_THRES_8K__A, 0);
3426 }
3427 if (status < 0)
3428 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
3429
3430 return status;
3431}
3432
3433static int DVBTCtrlSetEchoThreshold(struct drxk_state *state,
3434 struct DRXKCfgDvbtEchoThres_t *echoThres)
3435{
3436 u16 data = 0;
3437 int status;
3438
3439 dprintk(1, "\n");
3440 status = read16(state, OFDM_SC_RA_RAM_ECHO_THRES__A, &data);
3441 if (status < 0)
3442 goto error;
3443
3444 switch (echoThres->fftMode) {
3445 case DRX_FFTMODE_2K:
3446 data &= ~OFDM_SC_RA_RAM_ECHO_THRES_2K__M;
3447 data |= ((echoThres->threshold <<
3448 OFDM_SC_RA_RAM_ECHO_THRES_2K__B)
3449 & (OFDM_SC_RA_RAM_ECHO_THRES_2K__M));
3450 break;
3451 case DRX_FFTMODE_8K:
3452 data &= ~OFDM_SC_RA_RAM_ECHO_THRES_8K__M;
3453 data |= ((echoThres->threshold <<
3454 OFDM_SC_RA_RAM_ECHO_THRES_8K__B)
3455 & (OFDM_SC_RA_RAM_ECHO_THRES_8K__M));
3456 break;
3457 default:
3458 return -EINVAL;
3459 }
3460
3461 status = write16(state, OFDM_SC_RA_RAM_ECHO_THRES__A, data);
3462error:
3463 if (status < 0)
3464 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
3465 return status;
3466}
3467
3468static int DVBTCtrlSetSqiSpeed(struct drxk_state *state,
3469 enum DRXKCfgDvbtSqiSpeed *speed)
3470{
3471 int status = -EINVAL;
3472
3473 dprintk(1, "\n");
3474
3475 switch (*speed) {
3476 case DRXK_DVBT_SQI_SPEED_FAST:
3477 case DRXK_DVBT_SQI_SPEED_MEDIUM:
3478 case DRXK_DVBT_SQI_SPEED_SLOW:
3479 break;
3480 default:
3481 goto error;
3482 }
3483 status = write16(state, SCU_RAM_FEC_PRE_RS_BER_FILTER_SH__A,
3484 (u16) *speed);
3485error:
3486 if (status < 0)
3487 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
3488 return status;
3489}
3490
3491/*============================================================================*/
3492
3493/**
3494* \brief Activate DVBT specific presets
3495* \param demod instance of demodulator.
3496* \return DRXStatus_t.
3497*
3498* Called in DVBTSetStandard
3499*
3500*/
3501static int DVBTActivatePresets(struct drxk_state *state)
3502{
3503 int status;
3504 bool setincenable = false;
3505 bool setfrenable = true;
3506
3507 struct DRXKCfgDvbtEchoThres_t echoThres2k = { 0, DRX_FFTMODE_2K };
3508 struct DRXKCfgDvbtEchoThres_t echoThres8k = { 0, DRX_FFTMODE_8K };
3509
3510 dprintk(1, "\n");
3511 status = DVBTCtrlSetIncEnable(state, &setincenable);
3512 if (status < 0)
3513 goto error;
3514 status = DVBTCtrlSetFrEnable(state, &setfrenable);
3515 if (status < 0)
3516 goto error;
3517 status = DVBTCtrlSetEchoThreshold(state, &echoThres2k);
3518 if (status < 0)
3519 goto error;
3520 status = DVBTCtrlSetEchoThreshold(state, &echoThres8k);
3521 if (status < 0)
3522 goto error;
3523 status = write16(state, SCU_RAM_AGC_INGAIN_TGT_MAX__A, state->m_dvbtIfAgcCfg.IngainTgtMax);
3524error:
3525 if (status < 0)
3526 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
3527 return status;
3528}
3529
3530/*============================================================================*/
3531
3532/**
3533* \brief Initialize channelswitch-independent settings for DVBT.
3534* \param demod instance of demodulator.
3535* \return DRXStatus_t.
3536*
3537* For ROM code channel filter taps are loaded from the bootloader. For microcode
3538* the DVB-T taps from the drxk_filters.h are used.
3539*/
3540static int SetDVBTStandard(struct drxk_state *state,
3541 enum OperationMode oMode)
3542{
3543 u16 cmdResult = 0;
3544 u16 data = 0;
3545 int status;
3546
3547 dprintk(1, "\n");
3548
3549 PowerUpDVBT(state);
3550 /* added antenna switch */
3551 SwitchAntennaToDVBT(state);
3552 /* send OFDM reset command */
3553 status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM | SCU_RAM_COMMAND_CMD_DEMOD_RESET, 0, NULL, 1, &cmdResult);
3554 if (status < 0)
3555 goto error;
3556
3557 /* send OFDM setenv command */
3558 status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM | SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV, 0, NULL, 1, &cmdResult);
3559 if (status < 0)
3560 goto error;
3561
3562 /* reset datapath for OFDM, processors first */
3563 status = write16(state, OFDM_SC_COMM_EXEC__A, OFDM_SC_COMM_EXEC_STOP);
3564 if (status < 0)
3565 goto error;
3566 status = write16(state, OFDM_LC_COMM_EXEC__A, OFDM_LC_COMM_EXEC_STOP);
3567 if (status < 0)
3568 goto error;
3569 status = write16(state, IQM_COMM_EXEC__A, IQM_COMM_EXEC_B_STOP);
3570 if (status < 0)
3571 goto error;
3572
3573 /* IQM setup */
3574 /* synchronize on ofdstate->m_festart */
3575 status = write16(state, IQM_AF_UPD_SEL__A, 1);
3576 if (status < 0)
3577 goto error;
3578 /* window size for clipping ADC detection */
3579 status = write16(state, IQM_AF_CLP_LEN__A, 0);
3580 if (status < 0)
3581 goto error;
3582 /* window size for for sense pre-SAW detection */
3583 status = write16(state, IQM_AF_SNS_LEN__A, 0);
3584 if (status < 0)
3585 goto error;
3586 /* sense threshold for sense pre-SAW detection */
3587 status = write16(state, IQM_AF_AMUX__A, IQM_AF_AMUX_SIGNAL2ADC);
3588 if (status < 0)
3589 goto error;
3590 status = SetIqmAf(state, true);
3591 if (status < 0)
3592 goto error;
3593
3594 status = write16(state, IQM_AF_AGC_RF__A, 0);
3595 if (status < 0)
3596 goto error;
3597
3598 /* Impulse noise cruncher setup */
3599 status = write16(state, IQM_AF_INC_LCT__A, 0); /* crunch in IQM_CF */
3600 if (status < 0)
3601 goto error;
3602 status = write16(state, IQM_CF_DET_LCT__A, 0); /* detect in IQM_CF */
3603 if (status < 0)
3604 goto error;
3605 status = write16(state, IQM_CF_WND_LEN__A, 3); /* peak detector window length */
3606 if (status < 0)
3607 goto error;
3608
3609 status = write16(state, IQM_RC_STRETCH__A, 16);
3610 if (status < 0)
3611 goto error;
3612 status = write16(state, IQM_CF_OUT_ENA__A, 0x4); /* enable output 2 */
3613 if (status < 0)
3614 goto error;
3615 status = write16(state, IQM_CF_DS_ENA__A, 0x4); /* decimate output 2 */
3616 if (status < 0)
3617 goto error;
3618 status = write16(state, IQM_CF_SCALE__A, 1600);
3619 if (status < 0)
3620 goto error;
3621 status = write16(state, IQM_CF_SCALE_SH__A, 0);
3622 if (status < 0)
3623 goto error;
3624
3625 /* virtual clipping threshold for clipping ADC detection */
3626 status = write16(state, IQM_AF_CLP_TH__A, 448);
3627 if (status < 0)
3628 goto error;
3629 status = write16(state, IQM_CF_DATATH__A, 495); /* crunching threshold */
3630 if (status < 0)
3631 goto error;
3632
3633 status = BLChainCmd(state, DRXK_BL_ROM_OFFSET_TAPS_DVBT, DRXK_BLCC_NR_ELEMENTS_TAPS, DRXK_BLC_TIMEOUT);
3634 if (status < 0)
3635 goto error;
3636
3637 status = write16(state, IQM_CF_PKDTH__A, 2); /* peak detector threshold */
3638 if (status < 0)
3639 goto error;
3640 status = write16(state, IQM_CF_POW_MEAS_LEN__A, 2);
3641 if (status < 0)
3642 goto error;
3643 /* enable power measurement interrupt */
3644 status = write16(state, IQM_CF_COMM_INT_MSK__A, 1);
3645 if (status < 0)
3646 goto error;
3647 status = write16(state, IQM_COMM_EXEC__A, IQM_COMM_EXEC_B_ACTIVE);
3648 if (status < 0)
3649 goto error;
3650
3651 /* IQM will not be reset from here, sync ADC and update/init AGC */
3652 status = ADCSynchronization(state);
3653 if (status < 0)
3654 goto error;
3655 status = SetPreSaw(state, &state->m_dvbtPreSawCfg);
3656 if (status < 0)
3657 goto error;
3658
3659 /* Halt SCU to enable safe non-atomic accesses */
3660 status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_HOLD);
3661 if (status < 0)
3662 goto error;
3663
3664 status = SetAgcRf(state, &state->m_dvbtRfAgcCfg, true);
3665 if (status < 0)
3666 goto error;
3667 status = SetAgcIf(state, &state->m_dvbtIfAgcCfg, true);
3668 if (status < 0)
3669 goto error;
3670
3671 /* Set Noise Estimation notch width and enable DC fix */
3672 status = read16(state, OFDM_SC_RA_RAM_CONFIG__A, &data);
3673 if (status < 0)
3674 goto error;
3675 data |= OFDM_SC_RA_RAM_CONFIG_NE_FIX_ENABLE__M;
3676 status = write16(state, OFDM_SC_RA_RAM_CONFIG__A, data);
3677 if (status < 0)
3678 goto error;
3679
3680 /* Activate SCU to enable SCU commands */
3681 status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_ACTIVE);
3682 if (status < 0)
3683 goto error;
3684
3685 if (!state->m_DRXK_A3_ROM_CODE) {
3686 /* AGCInit() is not done for DVBT, so set agcFastClipCtrlDelay */
3687 status = write16(state, SCU_RAM_AGC_FAST_CLP_CTRL_DELAY__A, state->m_dvbtIfAgcCfg.FastClipCtrlDelay);
3688 if (status < 0)
3689 goto error;
3690 }
3691
3692 /* OFDM_SC setup */
3693#ifdef COMPILE_FOR_NONRT
3694 status = write16(state, OFDM_SC_RA_RAM_BE_OPT_DELAY__A, 1);
3695 if (status < 0)
3696 goto error;
3697 status = write16(state, OFDM_SC_RA_RAM_BE_OPT_INIT_DELAY__A, 2);
3698 if (status < 0)
3699 goto error;
3700#endif
3701
3702 /* FEC setup */
3703 status = write16(state, FEC_DI_INPUT_CTL__A, 1); /* OFDM input */
3704 if (status < 0)
3705 goto error;
3706
3707
3708#ifdef COMPILE_FOR_NONRT
3709 status = write16(state, FEC_RS_MEASUREMENT_PERIOD__A, 0x400);
3710 if (status < 0)
3711 goto error;
3712#else
3713 status = write16(state, FEC_RS_MEASUREMENT_PERIOD__A, 0x1000);
3714 if (status < 0)
3715 goto error;
3716#endif
3717 status = write16(state, FEC_RS_MEASUREMENT_PRESCALE__A, 0x0001);
3718 if (status < 0)
3719 goto error;
3720
3721 /* Setup MPEG bus */
3722 status = MPEGTSDtoSetup(state, OM_DVBT);
3723 if (status < 0)
3724 goto error;
3725 /* Set DVBT Presets */
3726 status = DVBTActivatePresets(state);
3727 if (status < 0)
3728 goto error;
3729
3730error:
3731 if (status < 0)
3732 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
3733 return status;
3734}
3735
3736/*============================================================================*/
3737/**
3738* \brief Start dvbt demodulating for channel.
3739* \param demod instance of demodulator.
3740* \return DRXStatus_t.
3741*/
3742static int DVBTStart(struct drxk_state *state)
3743{
3744 u16 param1;
3745 int status;
3746 /* DRXKOfdmScCmd_t scCmd; */
3747
3748 dprintk(1, "\n");
3749 /* Start correct processes to get in lock */
3750 /* DRXK: OFDM_SC_RA_RAM_PROC_LOCKTRACK is no longer in mapfile! */
3751 param1 = OFDM_SC_RA_RAM_LOCKTRACK_MIN;
3752 status = DVBTScCommand(state, OFDM_SC_RA_RAM_CMD_PROC_START, 0, OFDM_SC_RA_RAM_SW_EVENT_RUN_NMASK__M, param1, 0, 0, 0);
3753 if (status < 0)
3754 goto error;
3755 /* Start FEC OC */
3756 status = MPEGTSStart(state);
3757 if (status < 0)
3758 goto error;
3759 status = write16(state, FEC_COMM_EXEC__A, FEC_COMM_EXEC_ACTIVE);
3760 if (status < 0)
3761 goto error;
3762error:
3763 if (status < 0)
3764 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
3765 return status;
3766}
3767
3768
3769/*============================================================================*/
3770
3771/**
3772* \brief Set up dvbt demodulator for channel.
3773* \param demod instance of demodulator.
3774* \return DRXStatus_t.
3775* // original DVBTSetChannel()
3776*/
3777static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz,
3778 s32 tunerFreqOffset)
3779{
3780 u16 cmdResult = 0;
3781 u16 transmissionParams = 0;
3782 u16 operationMode = 0;
3783 u32 iqmRcRateOfs = 0;
3784 u32 bandwidth = 0;
3785 u16 param1;
3786 int status;
3787
3788 dprintk(1, "IF =%d, TFO = %d\n", IntermediateFreqkHz, tunerFreqOffset);
3789
3790 status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM | SCU_RAM_COMMAND_CMD_DEMOD_STOP, 0, NULL, 1, &cmdResult);
3791 if (status < 0)
3792 goto error;
3793
3794 /* Halt SCU to enable safe non-atomic accesses */
3795 status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_HOLD);
3796 if (status < 0)
3797 goto error;
3798
3799 /* Stop processors */
3800 status = write16(state, OFDM_SC_COMM_EXEC__A, OFDM_SC_COMM_EXEC_STOP);
3801 if (status < 0)
3802 goto error;
3803 status = write16(state, OFDM_LC_COMM_EXEC__A, OFDM_LC_COMM_EXEC_STOP);
3804 if (status < 0)
3805 goto error;
3806
3807 /* Mandatory fix, always stop CP, required to set spl offset back to
3808 hardware default (is set to 0 by ucode during pilot detection */
3809 status = write16(state, OFDM_CP_COMM_EXEC__A, OFDM_CP_COMM_EXEC_STOP);
3810 if (status < 0)
3811 goto error;
3812
3813 /*== Write channel settings to device =====================================*/
3814
3815 /* mode */
3816 switch (state->param.u.ofdm.transmission_mode) {
3817 case TRANSMISSION_MODE_AUTO:
3818 default:
3819 operationMode |= OFDM_SC_RA_RAM_OP_AUTO_MODE__M;
3820 /* fall through , try first guess DRX_FFTMODE_8K */
3821 case TRANSMISSION_MODE_8K:
3822 transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_MODE_8K;
3823 break;
3824 case TRANSMISSION_MODE_2K:
3825 transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_MODE_2K;
3826 break;
3827 }
3828
3829 /* guard */
3830 switch (state->param.u.ofdm.guard_interval) {
3831 default:
3832 case GUARD_INTERVAL_AUTO:
3833 operationMode |= OFDM_SC_RA_RAM_OP_AUTO_GUARD__M;
3834 /* fall through , try first guess DRX_GUARD_1DIV4 */
3835 case GUARD_INTERVAL_1_4:
3836 transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_4;
3837 break;
3838 case GUARD_INTERVAL_1_32:
3839 transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_32;
3840 break;
3841 case GUARD_INTERVAL_1_16:
3842 transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_16;
3843 break;
3844 case GUARD_INTERVAL_1_8:
3845 transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_8;
3846 break;
3847 }
3848
3849 /* hierarchy */
3850 switch (state->param.u.ofdm.hierarchy_information) {
3851 case HIERARCHY_AUTO:
3852 case HIERARCHY_NONE:
3853 default:
3854 operationMode |= OFDM_SC_RA_RAM_OP_AUTO_HIER__M;
3855 /* fall through , try first guess SC_RA_RAM_OP_PARAM_HIER_NO */
3856 /* transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_HIER_NO; */
3857 /* break; */
3858 case HIERARCHY_1:
3859 transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_HIER_A1;
3860 break;
3861 case HIERARCHY_2:
3862 transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_HIER_A2;
3863 break;
3864 case HIERARCHY_4:
3865 transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_HIER_A4;
3866 break;
3867 }
3868
3869
3870 /* constellation */
3871 switch (state->param.u.ofdm.constellation) {
3872 case QAM_AUTO:
3873 default:
3874 operationMode |= OFDM_SC_RA_RAM_OP_AUTO_CONST__M;
3875 /* fall through , try first guess DRX_CONSTELLATION_QAM64 */
3876 case QAM_64:
3877 transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM64;
3878 break;
3879 case QPSK:
3880 transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_CONST_QPSK;
3881 break;
3882 case QAM_16:
3883 transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM16;
3884 break;
3885 }
3886#if 0
3887 /* No hierachical channels support in BDA */
3888 /* Priority (only for hierarchical channels) */
3889 switch (channel->priority) {
3890 case DRX_PRIORITY_LOW:
3891 transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_PRIO_LO;
3892 WR16(devAddr, OFDM_EC_SB_PRIOR__A,
3893 OFDM_EC_SB_PRIOR_LO);
3894 break;
3895 case DRX_PRIORITY_HIGH:
3896 transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_PRIO_HI;
3897 WR16(devAddr, OFDM_EC_SB_PRIOR__A,
3898 OFDM_EC_SB_PRIOR_HI));
3899 break;
3900 case DRX_PRIORITY_UNKNOWN: /* fall through */
3901 default:
3902 status = -EINVAL;
3903 goto error;
3904 }
3905#else
3906 /* Set Priorty high */
3907 transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_PRIO_HI;
3908 status = write16(state, OFDM_EC_SB_PRIOR__A, OFDM_EC_SB_PRIOR_HI);
3909 if (status < 0)
3910 goto error;
3911#endif
3912
3913 /* coderate */
3914 switch (state->param.u.ofdm.code_rate_HP) {
3915 case FEC_AUTO:
3916 default:
3917 operationMode |= OFDM_SC_RA_RAM_OP_AUTO_RATE__M;
3918 /* fall through , try first guess DRX_CODERATE_2DIV3 */
3919 case FEC_2_3:
3920 transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_RATE_2_3;
3921 break;
3922 case FEC_1_2:
3923 transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_RATE_1_2;
3924 break;
3925 case FEC_3_4:
3926 transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_RATE_3_4;
3927 break;
3928 case FEC_5_6:
3929 transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_RATE_5_6;
3930 break;
3931 case FEC_7_8:
3932 transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_RATE_7_8;
3933 break;
3934 }
3935
3936 /* SAW filter selection: normaly not necesarry, but if wanted
3937 the application can select a SAW filter via the driver by using UIOs */
3938 /* First determine real bandwidth (Hz) */
3939 /* Also set delay for impulse noise cruncher */
3940 /* Also set parameters for EC_OC fix, note EC_OC_REG_TMD_HIL_MAR is changed
3941 by SC for fix for some 8K,1/8 guard but is restored by InitEC and ResetEC
3942 functions */
3943 switch (state->param.u.ofdm.bandwidth) {
3944 case BANDWIDTH_AUTO:
3945 case BANDWIDTH_8_MHZ:
3946 bandwidth = DRXK_BANDWIDTH_8MHZ_IN_HZ;
3947 status = write16(state, OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A, 3052);
3948 if (status < 0)
3949 goto error;
3950 /* cochannel protection for PAL 8 MHz */
3951 status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_LEFT__A, 7);
3952 if (status < 0)
3953 goto error;
3954 status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_RIGHT__A, 7);
3955 if (status < 0)
3956 goto error;
3957 status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_LEFT__A, 7);
3958 if (status < 0)
3959 goto error;
3960 status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_RIGHT__A, 1);
3961 if (status < 0)
3962 goto error;
3963 break;
3964 case BANDWIDTH_7_MHZ:
3965 bandwidth = DRXK_BANDWIDTH_7MHZ_IN_HZ;
3966 status = write16(state, OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A, 3491);
3967 if (status < 0)
3968 goto error;
3969 /* cochannel protection for PAL 7 MHz */
3970 status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_LEFT__A, 8);
3971 if (status < 0)
3972 goto error;
3973 status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_RIGHT__A, 8);
3974 if (status < 0)
3975 goto error;
3976 status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_LEFT__A, 4);
3977 if (status < 0)
3978 goto error;
3979 status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_RIGHT__A, 1);
3980 if (status < 0)
3981 goto error;
3982 break;
3983 case BANDWIDTH_6_MHZ:
3984 bandwidth = DRXK_BANDWIDTH_6MHZ_IN_HZ;
3985 status = write16(state, OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A, 4073);
3986 if (status < 0)
3987 goto error;
3988 /* cochannel protection for NTSC 6 MHz */
3989 status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_LEFT__A, 19);
3990 if (status < 0)
3991 goto error;
3992 status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_RIGHT__A, 19);
3993 if (status < 0)
3994 goto error;
3995 status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_LEFT__A, 14);
3996 if (status < 0)
3997 goto error;
3998 status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_RIGHT__A, 1);
3999 if (status < 0)
4000 goto error;
4001 break;
4002 default:
4003 status = -EINVAL;
4004 goto error;
4005 }
4006
4007 if (iqmRcRateOfs == 0) {
4008 /* Now compute IQM_RC_RATE_OFS
4009 (((SysFreq/BandWidth)/2)/2) -1) * 2^23)
4010 =>
4011 ((SysFreq / BandWidth) * (2^21)) - (2^23)
4012 */
4013 /* (SysFreq / BandWidth) * (2^28) */
4014 /* assert (MAX(sysClk)/MIN(bandwidth) < 16)
4015 => assert(MAX(sysClk) < 16*MIN(bandwidth))
4016 => assert(109714272 > 48000000) = true so Frac 28 can be used */
4017 iqmRcRateOfs = Frac28a((u32)
4018 ((state->m_sysClockFreq *
4019 1000) / 3), bandwidth);
4020 /* (SysFreq / BandWidth) * (2^21), rounding before truncating */
4021 if ((iqmRcRateOfs & 0x7fL) >= 0x40)
4022 iqmRcRateOfs += 0x80L;
4023 iqmRcRateOfs = iqmRcRateOfs >> 7;
4024 /* ((SysFreq / BandWidth) * (2^21)) - (2^23) */
4025 iqmRcRateOfs = iqmRcRateOfs - (1 << 23);
4026 }
4027
4028 iqmRcRateOfs &=
4029 ((((u32) IQM_RC_RATE_OFS_HI__M) <<
4030 IQM_RC_RATE_OFS_LO__W) | IQM_RC_RATE_OFS_LO__M);
4031 status = write32(state, IQM_RC_RATE_OFS_LO__A, iqmRcRateOfs);
4032 if (status < 0)
4033 goto error;
4034
4035 /* Bandwidth setting done */
4036
4037#if 0
4038 status = DVBTSetFrequencyShift(demod, channel, tunerOffset);
4039 if (status < 0)
4040 goto error;
4041#endif
4042 status = SetFrequencyShifter(state, IntermediateFreqkHz, tunerFreqOffset, true);
4043 if (status < 0)
4044 goto error;
4045
4046 /*== Start SC, write channel settings to SC ===============================*/
4047
4048 /* Activate SCU to enable SCU commands */
4049 status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_ACTIVE);
4050 if (status < 0)
4051 goto error;
4052
4053 /* Enable SC after setting all other parameters */
4054 status = write16(state, OFDM_SC_COMM_STATE__A, 0);
4055 if (status < 0)
4056 goto error;
4057 status = write16(state, OFDM_SC_COMM_EXEC__A, 1);
4058 if (status < 0)
4059 goto error;
4060
4061
4062 status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM | SCU_RAM_COMMAND_CMD_DEMOD_START, 0, NULL, 1, &cmdResult);
4063 if (status < 0)
4064 goto error;
4065
4066 /* Write SC parameter registers, set all AUTO flags in operation mode */
4067 param1 = (OFDM_SC_RA_RAM_OP_AUTO_MODE__M |
4068 OFDM_SC_RA_RAM_OP_AUTO_GUARD__M |
4069 OFDM_SC_RA_RAM_OP_AUTO_CONST__M |
4070 OFDM_SC_RA_RAM_OP_AUTO_HIER__M |
4071 OFDM_SC_RA_RAM_OP_AUTO_RATE__M);
4072 status = DVBTScCommand(state, OFDM_SC_RA_RAM_CMD_SET_PREF_PARAM,
4073 0, transmissionParams, param1, 0, 0, 0);
4074 if (status < 0)
4075 goto error;
4076
4077 if (!state->m_DRXK_A3_ROM_CODE)
4078 status = DVBTCtrlSetSqiSpeed(state, &state->m_sqiSpeed);
4079error:
4080 if (status < 0)
4081 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
4082
4083 return status;
4084}
4085
4086
4087/*============================================================================*/
4088
4089/**
4090* \brief Retreive lock status .
4091* \param demod Pointer to demodulator instance.
4092* \param lockStat Pointer to lock status structure.
4093* \return DRXStatus_t.
4094*
4095*/
4096static int GetDVBTLockStatus(struct drxk_state *state, u32 *pLockStatus)
4097{
4098 int status;
4099 const u16 mpeg_lock_mask = (OFDM_SC_RA_RAM_LOCK_MPEG__M |
4100 OFDM_SC_RA_RAM_LOCK_FEC__M);
4101 const u16 fec_lock_mask = (OFDM_SC_RA_RAM_LOCK_FEC__M);
4102 const u16 demod_lock_mask = OFDM_SC_RA_RAM_LOCK_DEMOD__M;
4103
4104 u16 ScRaRamLock = 0;
4105 u16 ScCommExec = 0;
4106
4107 dprintk(1, "\n");
4108
4109 *pLockStatus = NOT_LOCKED;
4110 /* driver 0.9.0 */
4111 /* Check if SC is running */
4112 status = read16(state, OFDM_SC_COMM_EXEC__A, &ScCommExec);
4113 if (status < 0)
4114 goto end;
4115 if (ScCommExec == OFDM_SC_COMM_EXEC_STOP)
4116 goto end;
4117
4118 status = read16(state, OFDM_SC_RA_RAM_LOCK__A, &ScRaRamLock);
4119 if (status < 0)
4120 goto end;
4121
4122 if ((ScRaRamLock & mpeg_lock_mask) == mpeg_lock_mask)
4123 *pLockStatus = MPEG_LOCK;
4124 else if ((ScRaRamLock & fec_lock_mask) == fec_lock_mask)
4125 *pLockStatus = FEC_LOCK;
4126 else if ((ScRaRamLock & demod_lock_mask) == demod_lock_mask)
4127 *pLockStatus = DEMOD_LOCK;
4128 else if (ScRaRamLock & OFDM_SC_RA_RAM_LOCK_NODVBT__M)
4129 *pLockStatus = NEVER_LOCK;
4130end:
4131 if (status < 0)
4132 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
4133
4134 return status;
4135}
4136
4137static int PowerUpQAM(struct drxk_state *state)
4138{
4139 enum DRXPowerMode powerMode = DRXK_POWER_DOWN_OFDM;
4140 int status;
4141
4142 dprintk(1, "\n");
4143 status = CtrlPowerMode(state, &powerMode);
4144 if (status < 0)
4145 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
4146
4147 return status;
4148}
4149
4150
4151/** Power Down QAM */
4152static int PowerDownQAM(struct drxk_state *state)
4153{
4154 u16 data = 0;
4155 u16 cmdResult;
4156 int status = 0;
4157
4158 dprintk(1, "\n");
4159 status = read16(state, SCU_COMM_EXEC__A, &data);
4160 if (status < 0)
4161 goto error;
4162 if (data == SCU_COMM_EXEC_ACTIVE) {
4163 /*
4164 STOP demodulator
4165 QAM and HW blocks
4166 */
4167 /* stop all comstate->m_exec */
4168 status = write16(state, QAM_COMM_EXEC__A, QAM_COMM_EXEC_STOP);
4169 if (status < 0)
4170 goto error;
4171 status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_STOP, 0, NULL, 1, &cmdResult);
4172 if (status < 0)
4173 goto error;
4174 }
4175 /* powerdown AFE */
4176 status = SetIqmAf(state, false);
4177
4178error:
4179 if (status < 0)
4180 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
4181
4182 return status;
4183}
4184
4185/*============================================================================*/
4186
4187/**
4188* \brief Setup of the QAM Measurement intervals for signal quality
4189* \param demod instance of demod.
4190* \param constellation current constellation.
4191* \return DRXStatus_t.
4192*
4193* NOTE:
4194* Take into account that for certain settings the errorcounters can overflow.
4195* The implementation does not check this.
4196*
4197*/
4198static int SetQAMMeasurement(struct drxk_state *state,
4199 enum EDrxkConstellation constellation,
4200 u32 symbolRate)
4201{
4202 u32 fecBitsDesired = 0; /* BER accounting period */
4203 u32 fecRsPeriodTotal = 0; /* Total period */
4204 u16 fecRsPrescale = 0; /* ReedSolomon Measurement Prescale */
4205 u16 fecRsPeriod = 0; /* Value for corresponding I2C register */
4206 int status = 0;
4207
4208 dprintk(1, "\n");
4209
4210 fecRsPrescale = 1;
4211 /* fecBitsDesired = symbolRate [kHz] *
4212 FrameLenght [ms] *
4213 (constellation + 1) *
4214 SyncLoss (== 1) *
4215 ViterbiLoss (==1)
4216 */
4217 switch (constellation) {
4218 case DRX_CONSTELLATION_QAM16:
4219 fecBitsDesired = 4 * symbolRate;
4220 break;
4221 case DRX_CONSTELLATION_QAM32:
4222 fecBitsDesired = 5 * symbolRate;
4223 break;
4224 case DRX_CONSTELLATION_QAM64:
4225 fecBitsDesired = 6 * symbolRate;
4226 break;
4227 case DRX_CONSTELLATION_QAM128:
4228 fecBitsDesired = 7 * symbolRate;
4229 break;
4230 case DRX_CONSTELLATION_QAM256:
4231 fecBitsDesired = 8 * symbolRate;
4232 break;
4233 default:
4234 status = -EINVAL;
4235 }
4236 if (status < 0)
4237 goto error;
4238
4239 fecBitsDesired /= 1000; /* symbolRate [Hz] -> symbolRate [kHz] */
4240 fecBitsDesired *= 500; /* meas. period [ms] */
4241
4242 /* Annex A/C: bits/RsPeriod = 204 * 8 = 1632 */
4243 /* fecRsPeriodTotal = fecBitsDesired / 1632 */
4244 fecRsPeriodTotal = (fecBitsDesired / 1632UL) + 1; /* roughly ceil */
4245
4246 /* fecRsPeriodTotal = fecRsPrescale * fecRsPeriod */
4247 fecRsPrescale = 1 + (u16) (fecRsPeriodTotal >> 16);
4248 if (fecRsPrescale == 0) {
4249 /* Divide by zero (though impossible) */
4250 status = -EINVAL;
4251 if (status < 0)
4252 goto error;
4253 }
4254 fecRsPeriod =
4255 ((u16) fecRsPeriodTotal +
4256 (fecRsPrescale >> 1)) / fecRsPrescale;
4257
4258 /* write corresponding registers */
4259 status = write16(state, FEC_RS_MEASUREMENT_PERIOD__A, fecRsPeriod);
4260 if (status < 0)
4261 goto error;
4262 status = write16(state, FEC_RS_MEASUREMENT_PRESCALE__A, fecRsPrescale);
4263 if (status < 0)
4264 goto error;
4265 status = write16(state, FEC_OC_SNC_FAIL_PERIOD__A, fecRsPeriod);
4266error:
4267 if (status < 0)
4268 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
4269 return status;
4270}
4271
4272static int SetQAM16(struct drxk_state *state)
4273{
4274 int status = 0;
4275
4276 dprintk(1, "\n");
4277 /* QAM Equalizer Setup */
4278 /* Equalizer */
4279 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD0__A, 13517);
4280 if (status < 0)
4281 goto error;
4282 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD1__A, 13517);
4283 if (status < 0)
4284 goto error;
4285 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD2__A, 13517);
4286 if (status < 0)
4287 goto error;
4288 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD3__A, 13517);
4289 if (status < 0)
4290 goto error;
4291 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD4__A, 13517);
4292 if (status < 0)
4293 goto error;
4294 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD5__A, 13517);
4295 if (status < 0)
4296 goto error;
4297 /* Decision Feedback Equalizer */
4298 status = write16(state, QAM_DQ_QUAL_FUN0__A, 2);
4299 if (status < 0)
4300 goto error;
4301 status = write16(state, QAM_DQ_QUAL_FUN1__A, 2);
4302 if (status < 0)
4303 goto error;
4304 status = write16(state, QAM_DQ_QUAL_FUN2__A, 2);
4305 if (status < 0)
4306 goto error;
4307 status = write16(state, QAM_DQ_QUAL_FUN3__A, 2);
4308 if (status < 0)
4309 goto error;
4310 status = write16(state, QAM_DQ_QUAL_FUN4__A, 2);
4311 if (status < 0)
4312 goto error;
4313 status = write16(state, QAM_DQ_QUAL_FUN5__A, 0);
4314 if (status < 0)
4315 goto error;
4316
4317 status = write16(state, QAM_SY_SYNC_HWM__A, 5);
4318 if (status < 0)
4319 goto error;
4320 status = write16(state, QAM_SY_SYNC_AWM__A, 4);
4321 if (status < 0)
4322 goto error;
4323 status = write16(state, QAM_SY_SYNC_LWM__A, 3);
4324 if (status < 0)
4325 goto error;
4326
4327 /* QAM Slicer Settings */
4328 status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, DRXK_QAM_SL_SIG_POWER_QAM16);
4329 if (status < 0)
4330 goto error;
4331
4332 /* QAM Loop Controller Coeficients */
4333 status = write16(state, SCU_RAM_QAM_LC_CA_FINE__A, 15);
4334 if (status < 0)
4335 goto error;
4336 status = write16(state, SCU_RAM_QAM_LC_CA_COARSE__A, 40);
4337 if (status < 0)
4338 goto error;
4339 status = write16(state, SCU_RAM_QAM_LC_EP_FINE__A, 12);
4340 if (status < 0)
4341 goto error;
4342 status = write16(state, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24);
4343 if (status < 0)
4344 goto error;
4345 status = write16(state, SCU_RAM_QAM_LC_EP_COARSE__A, 24);
4346 if (status < 0)
4347 goto error;
4348 status = write16(state, SCU_RAM_QAM_LC_EI_FINE__A, 12);
4349 if (status < 0)
4350 goto error;
4351 status = write16(state, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16);
4352 if (status < 0)
4353 goto error;
4354 status = write16(state, SCU_RAM_QAM_LC_EI_COARSE__A, 16);
4355 if (status < 0)
4356 goto error;
4357
4358 status = write16(state, SCU_RAM_QAM_LC_CP_FINE__A, 5);
4359 if (status < 0)
4360 goto error;
4361 status = write16(state, SCU_RAM_QAM_LC_CP_MEDIUM__A, 20);
4362 if (status < 0)
4363 goto error;
4364 status = write16(state, SCU_RAM_QAM_LC_CP_COARSE__A, 80);
4365 if (status < 0)
4366 goto error;
4367 status = write16(state, SCU_RAM_QAM_LC_CI_FINE__A, 5);
4368 if (status < 0)
4369 goto error;
4370 status = write16(state, SCU_RAM_QAM_LC_CI_MEDIUM__A, 20);
4371 if (status < 0)
4372 goto error;
4373 status = write16(state, SCU_RAM_QAM_LC_CI_COARSE__A, 50);
4374 if (status < 0)
4375 goto error;
4376 status = write16(state, SCU_RAM_QAM_LC_CF_FINE__A, 16);
4377 if (status < 0)
4378 goto error;
4379 status = write16(state, SCU_RAM_QAM_LC_CF_MEDIUM__A, 16);
4380 if (status < 0)
4381 goto error;
4382 status = write16(state, SCU_RAM_QAM_LC_CF_COARSE__A, 32);
4383 if (status < 0)
4384 goto error;
4385 status = write16(state, SCU_RAM_QAM_LC_CF1_FINE__A, 5);
4386 if (status < 0)
4387 goto error;
4388 status = write16(state, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 10);
4389 if (status < 0)
4390 goto error;
4391 status = write16(state, SCU_RAM_QAM_LC_CF1_COARSE__A, 10);
4392 if (status < 0)
4393 goto error;
4394
4395
4396 /* QAM State Machine (FSM) Thresholds */
4397
4398 status = write16(state, SCU_RAM_QAM_FSM_RTH__A, 140);
4399 if (status < 0)
4400 goto error;
4401 status = write16(state, SCU_RAM_QAM_FSM_FTH__A, 50);
4402 if (status < 0)
4403 goto error;
4404 status = write16(state, SCU_RAM_QAM_FSM_CTH__A, 95);
4405 if (status < 0)
4406 goto error;
4407 status = write16(state, SCU_RAM_QAM_FSM_PTH__A, 120);
4408 if (status < 0)
4409 goto error;
4410 status = write16(state, SCU_RAM_QAM_FSM_QTH__A, 230);
4411 if (status < 0)
4412 goto error;
4413 status = write16(state, SCU_RAM_QAM_FSM_MTH__A, 105);
4414 if (status < 0)
4415 goto error;
4416
4417 status = write16(state, SCU_RAM_QAM_FSM_RATE_LIM__A, 40);
4418 if (status < 0)
4419 goto error;
4420 status = write16(state, SCU_RAM_QAM_FSM_COUNT_LIM__A, 4);
4421 if (status < 0)
4422 goto error;
4423 status = write16(state, SCU_RAM_QAM_FSM_FREQ_LIM__A, 24);
4424 if (status < 0)
4425 goto error;
4426
4427
4428 /* QAM FSM Tracking Parameters */
4429
4430 status = write16(state, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, (u16) 16);
4431 if (status < 0)
4432 goto error;
4433 status = write16(state, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, (u16) 220);
4434 if (status < 0)
4435 goto error;
4436 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, (u16) 25);
4437 if (status < 0)
4438 goto error;
4439 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, (u16) 6);
4440 if (status < 0)
4441 goto error;
4442 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, (u16) -24);
4443 if (status < 0)
4444 goto error;
4445 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, (u16) -65);
4446 if (status < 0)
4447 goto error;
4448 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16) -127);
4449 if (status < 0)
4450 goto error;
4451
4452error:
4453 if (status < 0)
4454 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
4455 return status;
4456}
4457
4458/*============================================================================*/
4459
4460/**
4461* \brief QAM32 specific setup
4462* \param demod instance of demod.
4463* \return DRXStatus_t.
4464*/
4465static int SetQAM32(struct drxk_state *state)
4466{
4467 int status = 0;
4468
4469 dprintk(1, "\n");
4470
4471 /* QAM Equalizer Setup */
4472 /* Equalizer */
4473 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD0__A, 6707);
4474 if (status < 0)
4475 goto error;
4476 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD1__A, 6707);
4477 if (status < 0)
4478 goto error;
4479 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD2__A, 6707);
4480 if (status < 0)
4481 goto error;
4482 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD3__A, 6707);
4483 if (status < 0)
4484 goto error;
4485 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD4__A, 6707);
4486 if (status < 0)
4487 goto error;
4488 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD5__A, 6707);
4489 if (status < 0)
4490 goto error;
4491
4492 /* Decision Feedback Equalizer */
4493 status = write16(state, QAM_DQ_QUAL_FUN0__A, 3);
4494 if (status < 0)
4495 goto error;
4496 status = write16(state, QAM_DQ_QUAL_FUN1__A, 3);
4497 if (status < 0)
4498 goto error;
4499 status = write16(state, QAM_DQ_QUAL_FUN2__A, 3);
4500 if (status < 0)
4501 goto error;
4502 status = write16(state, QAM_DQ_QUAL_FUN3__A, 3);
4503 if (status < 0)
4504 goto error;
4505 status = write16(state, QAM_DQ_QUAL_FUN4__A, 3);
4506 if (status < 0)
4507 goto error;
4508 status = write16(state, QAM_DQ_QUAL_FUN5__A, 0);
4509 if (status < 0)
4510 goto error;
4511
4512 status = write16(state, QAM_SY_SYNC_HWM__A, 6);
4513 if (status < 0)
4514 goto error;
4515 status = write16(state, QAM_SY_SYNC_AWM__A, 5);
4516 if (status < 0)
4517 goto error;
4518 status = write16(state, QAM_SY_SYNC_LWM__A, 3);
4519 if (status < 0)
4520 goto error;
4521
4522 /* QAM Slicer Settings */
4523
4524 status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, DRXK_QAM_SL_SIG_POWER_QAM32);
4525 if (status < 0)
4526 goto error;
4527
4528
4529 /* QAM Loop Controller Coeficients */
4530
4531 status = write16(state, SCU_RAM_QAM_LC_CA_FINE__A, 15);
4532 if (status < 0)
4533 goto error;
4534 status = write16(state, SCU_RAM_QAM_LC_CA_COARSE__A, 40);
4535 if (status < 0)
4536 goto error;
4537 status = write16(state, SCU_RAM_QAM_LC_EP_FINE__A, 12);
4538 if (status < 0)
4539 goto error;
4540 status = write16(state, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24);
4541 if (status < 0)
4542 goto error;
4543 status = write16(state, SCU_RAM_QAM_LC_EP_COARSE__A, 24);
4544 if (status < 0)
4545 goto error;
4546 status = write16(state, SCU_RAM_QAM_LC_EI_FINE__A, 12);
4547 if (status < 0)
4548 goto error;
4549 status = write16(state, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16);
4550 if (status < 0)
4551 goto error;
4552 status = write16(state, SCU_RAM_QAM_LC_EI_COARSE__A, 16);
4553 if (status < 0)
4554 goto error;
4555
4556 status = write16(state, SCU_RAM_QAM_LC_CP_FINE__A, 5);
4557 if (status < 0)
4558 goto error;
4559 status = write16(state, SCU_RAM_QAM_LC_CP_MEDIUM__A, 20);
4560 if (status < 0)
4561 goto error;
4562 status = write16(state, SCU_RAM_QAM_LC_CP_COARSE__A, 80);
4563 if (status < 0)
4564 goto error;
4565 status = write16(state, SCU_RAM_QAM_LC_CI_FINE__A, 5);
4566 if (status < 0)
4567 goto error;
4568 status = write16(state, SCU_RAM_QAM_LC_CI_MEDIUM__A, 20);
4569 if (status < 0)
4570 goto error;
4571 status = write16(state, SCU_RAM_QAM_LC_CI_COARSE__A, 50);
4572 if (status < 0)
4573 goto error;
4574 status = write16(state, SCU_RAM_QAM_LC_CF_FINE__A, 16);
4575 if (status < 0)
4576 goto error;
4577 status = write16(state, SCU_RAM_QAM_LC_CF_MEDIUM__A, 16);
4578 if (status < 0)
4579 goto error;
4580 status = write16(state, SCU_RAM_QAM_LC_CF_COARSE__A, 16);
4581 if (status < 0)
4582 goto error;
4583 status = write16(state, SCU_RAM_QAM_LC_CF1_FINE__A, 5);
4584 if (status < 0)
4585 goto error;
4586 status = write16(state, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 10);
4587 if (status < 0)
4588 goto error;
4589 status = write16(state, SCU_RAM_QAM_LC_CF1_COARSE__A, 0);
4590 if (status < 0)
4591 goto error;
4592
4593
4594 /* QAM State Machine (FSM) Thresholds */
4595
4596 status = write16(state, SCU_RAM_QAM_FSM_RTH__A, 90);
4597 if (status < 0)
4598 goto error;
4599 status = write16(state, SCU_RAM_QAM_FSM_FTH__A, 50);
4600 if (status < 0)
4601 goto error;
4602 status = write16(state, SCU_RAM_QAM_FSM_CTH__A, 80);
4603 if (status < 0)
4604 goto error;
4605 status = write16(state, SCU_RAM_QAM_FSM_PTH__A, 100);
4606 if (status < 0)
4607 goto error;
4608 status = write16(state, SCU_RAM_QAM_FSM_QTH__A, 170);
4609 if (status < 0)
4610 goto error;
4611 status = write16(state, SCU_RAM_QAM_FSM_MTH__A, 100);
4612 if (status < 0)
4613 goto error;
4614
4615 status = write16(state, SCU_RAM_QAM_FSM_RATE_LIM__A, 40);
4616 if (status < 0)
4617 goto error;
4618 status = write16(state, SCU_RAM_QAM_FSM_COUNT_LIM__A, 4);
4619 if (status < 0)
4620 goto error;
4621 status = write16(state, SCU_RAM_QAM_FSM_FREQ_LIM__A, 10);
4622 if (status < 0)
4623 goto error;
4624
4625
4626 /* QAM FSM Tracking Parameters */
4627
4628 status = write16(state, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, (u16) 12);
4629 if (status < 0)
4630 goto error;
4631 status = write16(state, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, (u16) 140);
4632 if (status < 0)
4633 goto error;
4634 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, (u16) -8);
4635 if (status < 0)
4636 goto error;
4637 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, (u16) -16);
4638 if (status < 0)
4639 goto error;
4640 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, (u16) -26);
4641 if (status < 0)
4642 goto error;
4643 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, (u16) -56);
4644 if (status < 0)
4645 goto error;
4646 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16) -86);
4647error:
4648 if (status < 0)
4649 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
4650 return status;
4651}
4652
4653/*============================================================================*/
4654
4655/**
4656* \brief QAM64 specific setup
4657* \param demod instance of demod.
4658* \return DRXStatus_t.
4659*/
4660static int SetQAM64(struct drxk_state *state)
4661{
4662 int status = 0;
4663
4664 dprintk(1, "\n");
4665 /* QAM Equalizer Setup */
4666 /* Equalizer */
4667 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD0__A, 13336);
4668 if (status < 0)
4669 goto error;
4670 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD1__A, 12618);
4671 if (status < 0)
4672 goto error;
4673 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD2__A, 11988);
4674 if (status < 0)
4675 goto error;
4676 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD3__A, 13809);
4677 if (status < 0)
4678 goto error;
4679 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD4__A, 13809);
4680 if (status < 0)
4681 goto error;
4682 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD5__A, 15609);
4683 if (status < 0)
4684 goto error;
4685
4686 /* Decision Feedback Equalizer */
4687 status = write16(state, QAM_DQ_QUAL_FUN0__A, 4);
4688 if (status < 0)
4689 goto error;
4690 status = write16(state, QAM_DQ_QUAL_FUN1__A, 4);
4691 if (status < 0)
4692 goto error;
4693 status = write16(state, QAM_DQ_QUAL_FUN2__A, 4);
4694 if (status < 0)
4695 goto error;
4696 status = write16(state, QAM_DQ_QUAL_FUN3__A, 4);
4697 if (status < 0)
4698 goto error;
4699 status = write16(state, QAM_DQ_QUAL_FUN4__A, 3);
4700 if (status < 0)
4701 goto error;
4702 status = write16(state, QAM_DQ_QUAL_FUN5__A, 0);
4703 if (status < 0)
4704 goto error;
4705
4706 status = write16(state, QAM_SY_SYNC_HWM__A, 5);
4707 if (status < 0)
4708 goto error;
4709 status = write16(state, QAM_SY_SYNC_AWM__A, 4);
4710 if (status < 0)
4711 goto error;
4712 status = write16(state, QAM_SY_SYNC_LWM__A, 3);
4713 if (status < 0)
4714 goto error;
4715
4716 /* QAM Slicer Settings */
4717 status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, DRXK_QAM_SL_SIG_POWER_QAM64);
4718 if (status < 0)
4719 goto error;
4720
4721
4722 /* QAM Loop Controller Coeficients */
4723
4724 status = write16(state, SCU_RAM_QAM_LC_CA_FINE__A, 15);
4725 if (status < 0)
4726 goto error;
4727 status = write16(state, SCU_RAM_QAM_LC_CA_COARSE__A, 40);
4728 if (status < 0)
4729 goto error;
4730 status = write16(state, SCU_RAM_QAM_LC_EP_FINE__A, 12);
4731 if (status < 0)
4732 goto error;
4733 status = write16(state, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24);
4734 if (status < 0)
4735 goto error;
4736 status = write16(state, SCU_RAM_QAM_LC_EP_COARSE__A, 24);
4737 if (status < 0)
4738 goto error;
4739 status = write16(state, SCU_RAM_QAM_LC_EI_FINE__A, 12);
4740 if (status < 0)
4741 goto error;
4742 status = write16(state, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16);
4743 if (status < 0)
4744 goto error;
4745 status = write16(state, SCU_RAM_QAM_LC_EI_COARSE__A, 16);
4746 if (status < 0)
4747 goto error;
4748
4749 status = write16(state, SCU_RAM_QAM_LC_CP_FINE__A, 5);
4750 if (status < 0)
4751 goto error;
4752 status = write16(state, SCU_RAM_QAM_LC_CP_MEDIUM__A, 30);
4753 if (status < 0)
4754 goto error;
4755 status = write16(state, SCU_RAM_QAM_LC_CP_COARSE__A, 100);
4756 if (status < 0)
4757 goto error;
4758 status = write16(state, SCU_RAM_QAM_LC_CI_FINE__A, 5);
4759 if (status < 0)
4760 goto error;
4761 status = write16(state, SCU_RAM_QAM_LC_CI_MEDIUM__A, 30);
4762 if (status < 0)
4763 goto error;
4764 status = write16(state, SCU_RAM_QAM_LC_CI_COARSE__A, 50);
4765 if (status < 0)
4766 goto error;
4767 status = write16(state, SCU_RAM_QAM_LC_CF_FINE__A, 16);
4768 if (status < 0)
4769 goto error;
4770 status = write16(state, SCU_RAM_QAM_LC_CF_MEDIUM__A, 25);
4771 if (status < 0)
4772 goto error;
4773 status = write16(state, SCU_RAM_QAM_LC_CF_COARSE__A, 48);
4774 if (status < 0)
4775 goto error;
4776 status = write16(state, SCU_RAM_QAM_LC_CF1_FINE__A, 5);
4777 if (status < 0)
4778 goto error;
4779 status = write16(state, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 10);
4780 if (status < 0)
4781 goto error;
4782 status = write16(state, SCU_RAM_QAM_LC_CF1_COARSE__A, 10);
4783 if (status < 0)
4784 goto error;
4785
4786
4787 /* QAM State Machine (FSM) Thresholds */
4788
4789 status = write16(state, SCU_RAM_QAM_FSM_RTH__A, 100);
4790 if (status < 0)
4791 goto error;
4792 status = write16(state, SCU_RAM_QAM_FSM_FTH__A, 60);
4793 if (status < 0)
4794 goto error;
4795 status = write16(state, SCU_RAM_QAM_FSM_CTH__A, 80);
4796 if (status < 0)
4797 goto error;
4798 status = write16(state, SCU_RAM_QAM_FSM_PTH__A, 110);
4799 if (status < 0)
4800 goto error;
4801 status = write16(state, SCU_RAM_QAM_FSM_QTH__A, 200);
4802 if (status < 0)
4803 goto error;
4804 status = write16(state, SCU_RAM_QAM_FSM_MTH__A, 95);
4805 if (status < 0)
4806 goto error;
4807
4808 status = write16(state, SCU_RAM_QAM_FSM_RATE_LIM__A, 40);
4809 if (status < 0)
4810 goto error;
4811 status = write16(state, SCU_RAM_QAM_FSM_COUNT_LIM__A, 4);
4812 if (status < 0)
4813 goto error;
4814 status = write16(state, SCU_RAM_QAM_FSM_FREQ_LIM__A, 15);
4815 if (status < 0)
4816 goto error;
4817
4818
4819 /* QAM FSM Tracking Parameters */
4820
4821 status = write16(state, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, (u16) 12);
4822 if (status < 0)
4823 goto error;
4824 status = write16(state, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, (u16) 141);
4825 if (status < 0)
4826 goto error;
4827 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, (u16) 7);
4828 if (status < 0)
4829 goto error;
4830 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, (u16) 0);
4831 if (status < 0)
4832 goto error;
4833 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, (u16) -15);
4834 if (status < 0)
4835 goto error;
4836 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, (u16) -45);
4837 if (status < 0)
4838 goto error;
4839 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16) -80);
4840error:
4841 if (status < 0)
4842 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
4843
4844 return status;
4845}
4846
4847/*============================================================================*/
4848
4849/**
4850* \brief QAM128 specific setup
4851* \param demod: instance of demod.
4852* \return DRXStatus_t.
4853*/
4854static int SetQAM128(struct drxk_state *state)
4855{
4856 int status = 0;
4857
4858 dprintk(1, "\n");
4859 /* QAM Equalizer Setup */
4860 /* Equalizer */
4861 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD0__A, 6564);
4862 if (status < 0)
4863 goto error;
4864 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD1__A, 6598);
4865 if (status < 0)
4866 goto error;
4867 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD2__A, 6394);
4868 if (status < 0)
4869 goto error;
4870 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD3__A, 6409);
4871 if (status < 0)
4872 goto error;
4873 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD4__A, 6656);
4874 if (status < 0)
4875 goto error;
4876 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD5__A, 7238);
4877 if (status < 0)
4878 goto error;
4879
4880 /* Decision Feedback Equalizer */
4881 status = write16(state, QAM_DQ_QUAL_FUN0__A, 6);
4882 if (status < 0)
4883 goto error;
4884 status = write16(state, QAM_DQ_QUAL_FUN1__A, 6);
4885 if (status < 0)
4886 goto error;
4887 status = write16(state, QAM_DQ_QUAL_FUN2__A, 6);
4888 if (status < 0)
4889 goto error;
4890 status = write16(state, QAM_DQ_QUAL_FUN3__A, 6);
4891 if (status < 0)
4892 goto error;
4893 status = write16(state, QAM_DQ_QUAL_FUN4__A, 5);
4894 if (status < 0)
4895 goto error;
4896 status = write16(state, QAM_DQ_QUAL_FUN5__A, 0);
4897 if (status < 0)
4898 goto error;
4899
4900 status = write16(state, QAM_SY_SYNC_HWM__A, 6);
4901 if (status < 0)
4902 goto error;
4903 status = write16(state, QAM_SY_SYNC_AWM__A, 5);
4904 if (status < 0)
4905 goto error;
4906 status = write16(state, QAM_SY_SYNC_LWM__A, 3);
4907 if (status < 0)
4908 goto error;
4909
4910
4911 /* QAM Slicer Settings */
4912
4913 status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, DRXK_QAM_SL_SIG_POWER_QAM128);
4914 if (status < 0)
4915 goto error;
4916
4917
4918 /* QAM Loop Controller Coeficients */
4919
4920 status = write16(state, SCU_RAM_QAM_LC_CA_FINE__A, 15);
4921 if (status < 0)
4922 goto error;
4923 status = write16(state, SCU_RAM_QAM_LC_CA_COARSE__A, 40);
4924 if (status < 0)
4925 goto error;
4926 status = write16(state, SCU_RAM_QAM_LC_EP_FINE__A, 12);
4927 if (status < 0)
4928 goto error;
4929 status = write16(state, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24);
4930 if (status < 0)
4931 goto error;
4932 status = write16(state, SCU_RAM_QAM_LC_EP_COARSE__A, 24);
4933 if (status < 0)
4934 goto error;
4935 status = write16(state, SCU_RAM_QAM_LC_EI_FINE__A, 12);
4936 if (status < 0)
4937 goto error;
4938 status = write16(state, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16);
4939 if (status < 0)
4940 goto error;
4941 status = write16(state, SCU_RAM_QAM_LC_EI_COARSE__A, 16);
4942 if (status < 0)
4943 goto error;
4944
4945 status = write16(state, SCU_RAM_QAM_LC_CP_FINE__A, 5);
4946 if (status < 0)
4947 goto error;
4948 status = write16(state, SCU_RAM_QAM_LC_CP_MEDIUM__A, 40);
4949 if (status < 0)
4950 goto error;
4951 status = write16(state, SCU_RAM_QAM_LC_CP_COARSE__A, 120);
4952 if (status < 0)
4953 goto error;
4954 status = write16(state, SCU_RAM_QAM_LC_CI_FINE__A, 5);
4955 if (status < 0)
4956 goto error;
4957 status = write16(state, SCU_RAM_QAM_LC_CI_MEDIUM__A, 40);
4958 if (status < 0)
4959 goto error;
4960 status = write16(state, SCU_RAM_QAM_LC_CI_COARSE__A, 60);
4961 if (status < 0)
4962 goto error;
4963 status = write16(state, SCU_RAM_QAM_LC_CF_FINE__A, 16);
4964 if (status < 0)
4965 goto error;
4966 status = write16(state, SCU_RAM_QAM_LC_CF_MEDIUM__A, 25);
4967 if (status < 0)
4968 goto error;
4969 status = write16(state, SCU_RAM_QAM_LC_CF_COARSE__A, 64);
4970 if (status < 0)
4971 goto error;
4972 status = write16(state, SCU_RAM_QAM_LC_CF1_FINE__A, 5);
4973 if (status < 0)
4974 goto error;
4975 status = write16(state, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 10);
4976 if (status < 0)
4977 goto error;
4978 status = write16(state, SCU_RAM_QAM_LC_CF1_COARSE__A, 0);
4979 if (status < 0)
4980 goto error;
4981
4982
4983 /* QAM State Machine (FSM) Thresholds */
4984
4985 status = write16(state, SCU_RAM_QAM_FSM_RTH__A, 50);
4986 if (status < 0)
4987 goto error;
4988 status = write16(state, SCU_RAM_QAM_FSM_FTH__A, 60);
4989 if (status < 0)
4990 goto error;
4991 status = write16(state, SCU_RAM_QAM_FSM_CTH__A, 80);
4992 if (status < 0)
4993 goto error;
4994 status = write16(state, SCU_RAM_QAM_FSM_PTH__A, 100);
4995 if (status < 0)
4996 goto error;
4997 status = write16(state, SCU_RAM_QAM_FSM_QTH__A, 140);
4998 if (status < 0)
4999 goto error;
5000 status = write16(state, SCU_RAM_QAM_FSM_MTH__A, 100);
5001 if (status < 0)
5002 goto error;
5003
5004 status = write16(state, SCU_RAM_QAM_FSM_RATE_LIM__A, 40);
5005 if (status < 0)
5006 goto error;
5007 status = write16(state, SCU_RAM_QAM_FSM_COUNT_LIM__A, 5);
5008 if (status < 0)
5009 goto error;
5010
5011 status = write16(state, SCU_RAM_QAM_FSM_FREQ_LIM__A, 12);
5012 if (status < 0)
5013 goto error;
5014
5015 /* QAM FSM Tracking Parameters */
5016
5017 status = write16(state, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, (u16) 8);
5018 if (status < 0)
5019 goto error;
5020 status = write16(state, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, (u16) 65);
5021 if (status < 0)
5022 goto error;
5023 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, (u16) 5);
5024 if (status < 0)
5025 goto error;
5026 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, (u16) 3);
5027 if (status < 0)
5028 goto error;
5029 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, (u16) -1);
5030 if (status < 0)
5031 goto error;
5032 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, (u16) -12);
5033 if (status < 0)
5034 goto error;
5035 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16) -23);
5036error:
5037 if (status < 0)
5038 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
5039
5040 return status;
5041}
5042
5043/*============================================================================*/
5044
5045/**
5046* \brief QAM256 specific setup
5047* \param demod: instance of demod.
5048* \return DRXStatus_t.
5049*/
5050static int SetQAM256(struct drxk_state *state)
5051{
5052 int status = 0;
5053
5054 dprintk(1, "\n");
5055 /* QAM Equalizer Setup */
5056 /* Equalizer */
5057 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD0__A, 11502);
5058 if (status < 0)
5059 goto error;
5060 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD1__A, 12084);
5061 if (status < 0)
5062 goto error;
5063 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD2__A, 12543);
5064 if (status < 0)
5065 goto error;
5066 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD3__A, 12931);
5067 if (status < 0)
5068 goto error;
5069 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD4__A, 13629);
5070 if (status < 0)
5071 goto error;
5072 status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD5__A, 15385);
5073 if (status < 0)
5074 goto error;
5075
5076 /* Decision Feedback Equalizer */
5077 status = write16(state, QAM_DQ_QUAL_FUN0__A, 8);
5078 if (status < 0)
5079 goto error;
5080 status = write16(state, QAM_DQ_QUAL_FUN1__A, 8);
5081 if (status < 0)
5082 goto error;
5083 status = write16(state, QAM_DQ_QUAL_FUN2__A, 8);
5084 if (status < 0)
5085 goto error;
5086 status = write16(state, QAM_DQ_QUAL_FUN3__A, 8);
5087 if (status < 0)
5088 goto error;
5089 status = write16(state, QAM_DQ_QUAL_FUN4__A, 6);
5090 if (status < 0)
5091 goto error;
5092 status = write16(state, QAM_DQ_QUAL_FUN5__A, 0);
5093 if (status < 0)
5094 goto error;
5095
5096 status = write16(state, QAM_SY_SYNC_HWM__A, 5);
5097 if (status < 0)
5098 goto error;
5099 status = write16(state, QAM_SY_SYNC_AWM__A, 4);
5100 if (status < 0)
5101 goto error;
5102 status = write16(state, QAM_SY_SYNC_LWM__A, 3);
5103 if (status < 0)
5104 goto error;
5105
5106 /* QAM Slicer Settings */
5107
5108 status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, DRXK_QAM_SL_SIG_POWER_QAM256);
5109 if (status < 0)
5110 goto error;
5111
5112
5113 /* QAM Loop Controller Coeficients */
5114
5115 status = write16(state, SCU_RAM_QAM_LC_CA_FINE__A, 15);
5116 if (status < 0)
5117 goto error;
5118 status = write16(state, SCU_RAM_QAM_LC_CA_COARSE__A, 40);
5119 if (status < 0)
5120 goto error;
5121 status = write16(state, SCU_RAM_QAM_LC_EP_FINE__A, 12);
5122 if (status < 0)
5123 goto error;
5124 status = write16(state, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24);
5125 if (status < 0)
5126 goto error;
5127 status = write16(state, SCU_RAM_QAM_LC_EP_COARSE__A, 24);
5128 if (status < 0)
5129 goto error;
5130 status = write16(state, SCU_RAM_QAM_LC_EI_FINE__A, 12);
5131 if (status < 0)
5132 goto error;
5133 status = write16(state, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16);
5134 if (status < 0)
5135 goto error;
5136 status = write16(state, SCU_RAM_QAM_LC_EI_COARSE__A, 16);
5137 if (status < 0)
5138 goto error;
5139
5140 status = write16(state, SCU_RAM_QAM_LC_CP_FINE__A, 5);
5141 if (status < 0)
5142 goto error;
5143 status = write16(state, SCU_RAM_QAM_LC_CP_MEDIUM__A, 50);
5144 if (status < 0)
5145 goto error;
5146 status = write16(state, SCU_RAM_QAM_LC_CP_COARSE__A, 250);
5147 if (status < 0)
5148 goto error;
5149 status = write16(state, SCU_RAM_QAM_LC_CI_FINE__A, 5);
5150 if (status < 0)
5151 goto error;
5152 status = write16(state, SCU_RAM_QAM_LC_CI_MEDIUM__A, 50);
5153 if (status < 0)
5154 goto error;
5155 status = write16(state, SCU_RAM_QAM_LC_CI_COARSE__A, 125);
5156 if (status < 0)
5157 goto error;
5158 status = write16(state, SCU_RAM_QAM_LC_CF_FINE__A, 16);
5159 if (status < 0)
5160 goto error;
5161 status = write16(state, SCU_RAM_QAM_LC_CF_MEDIUM__A, 25);
5162 if (status < 0)
5163 goto error;
5164 status = write16(state, SCU_RAM_QAM_LC_CF_COARSE__A, 48);
5165 if (status < 0)
5166 goto error;
5167 status = write16(state, SCU_RAM_QAM_LC_CF1_FINE__A, 5);
5168 if (status < 0)
5169 goto error;
5170 status = write16(state, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 10);
5171 if (status < 0)
5172 goto error;
5173 status = write16(state, SCU_RAM_QAM_LC_CF1_COARSE__A, 10);
5174 if (status < 0)
5175 goto error;
5176
5177
5178 /* QAM State Machine (FSM) Thresholds */
5179
5180 status = write16(state, SCU_RAM_QAM_FSM_RTH__A, 50);
5181 if (status < 0)
5182 goto error;
5183 status = write16(state, SCU_RAM_QAM_FSM_FTH__A, 60);
5184 if (status < 0)
5185 goto error;
5186 status = write16(state, SCU_RAM_QAM_FSM_CTH__A, 80);
5187 if (status < 0)
5188 goto error;
5189 status = write16(state, SCU_RAM_QAM_FSM_PTH__A, 100);
5190 if (status < 0)
5191 goto error;
5192 status = write16(state, SCU_RAM_QAM_FSM_QTH__A, 150);
5193 if (status < 0)
5194 goto error;
5195 status = write16(state, SCU_RAM_QAM_FSM_MTH__A, 110);
5196 if (status < 0)
5197 goto error;
5198
5199 status = write16(state, SCU_RAM_QAM_FSM_RATE_LIM__A, 40);
5200 if (status < 0)
5201 goto error;
5202 status = write16(state, SCU_RAM_QAM_FSM_COUNT_LIM__A, 4);
5203 if (status < 0)
5204 goto error;
5205 status = write16(state, SCU_RAM_QAM_FSM_FREQ_LIM__A, 12);
5206 if (status < 0)
5207 goto error;
5208
5209
5210 /* QAM FSM Tracking Parameters */
5211
5212 status = write16(state, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, (u16) 8);
5213 if (status < 0)
5214 goto error;
5215 status = write16(state, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, (u16) 74);
5216 if (status < 0)
5217 goto error;
5218 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, (u16) 18);
5219 if (status < 0)
5220 goto error;
5221 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, (u16) 13);
5222 if (status < 0)
5223 goto error;
5224 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, (u16) 7);
5225 if (status < 0)
5226 goto error;
5227 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, (u16) 0);
5228 if (status < 0)
5229 goto error;
5230 status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16) -8);
5231error:
5232 if (status < 0)
5233 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
5234 return status;
5235}
5236
5237
5238/*============================================================================*/
5239/**
5240* \brief Reset QAM block.
5241* \param demod: instance of demod.
5242* \param channel: pointer to channel data.
5243* \return DRXStatus_t.
5244*/
5245static int QAMResetQAM(struct drxk_state *state)
5246{
5247 int status;
5248 u16 cmdResult;
5249
5250 dprintk(1, "\n");
5251 /* Stop QAM comstate->m_exec */
5252 status = write16(state, QAM_COMM_EXEC__A, QAM_COMM_EXEC_STOP);
5253 if (status < 0)
5254 goto error;
5255
5256 status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_RESET, 0, NULL, 1, &cmdResult);
5257error:
5258 if (status < 0)
5259 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
5260 return status;
5261}
5262
5263/*============================================================================*/
5264
5265/**
5266* \brief Set QAM symbolrate.
5267* \param demod: instance of demod.
5268* \param channel: pointer to channel data.
5269* \return DRXStatus_t.
5270*/
5271static int QAMSetSymbolrate(struct drxk_state *state)
5272{
5273 u32 adcFrequency = 0;
5274 u32 symbFreq = 0;
5275 u32 iqmRcRate = 0;
5276 u16 ratesel = 0;
5277 u32 lcSymbRate = 0;
5278 int status;
5279
5280 dprintk(1, "\n");
5281 /* Select & calculate correct IQM rate */
5282 adcFrequency = (state->m_sysClockFreq * 1000) / 3;
5283 ratesel = 0;
5284 /* printk(KERN_DEBUG "drxk: SR %d\n", state->param.u.qam.symbol_rate); */
5285 if (state->param.u.qam.symbol_rate <= 1188750)
5286 ratesel = 3;
5287 else if (state->param.u.qam.symbol_rate <= 2377500)
5288 ratesel = 2;
5289 else if (state->param.u.qam.symbol_rate <= 4755000)
5290 ratesel = 1;
5291 status = write16(state, IQM_FD_RATESEL__A, ratesel);
5292 if (status < 0)
5293 goto error;
5294
5295 /*
5296 IqmRcRate = ((Fadc / (symbolrate * (4<<ratesel))) - 1) * (1<<23)
5297 */
5298 symbFreq = state->param.u.qam.symbol_rate * (1 << ratesel);
5299 if (symbFreq == 0) {
5300 /* Divide by zero */
5301 status = -EINVAL;
5302 goto error;
5303 }
5304 iqmRcRate = (adcFrequency / symbFreq) * (1 << 21) +
5305 (Frac28a((adcFrequency % symbFreq), symbFreq) >> 7) -
5306 (1 << 23);
5307 status = write32(state, IQM_RC_RATE_OFS_LO__A, iqmRcRate);
5308 if (status < 0)
5309 goto error;
5310 state->m_iqmRcRate = iqmRcRate;
5311 /*
5312 LcSymbFreq = round (.125 * symbolrate / adcFreq * (1<<15))
5313 */
5314 symbFreq = state->param.u.qam.symbol_rate;
5315 if (adcFrequency == 0) {
5316 /* Divide by zero */
5317 status = -EINVAL;
5318 goto error;
5319 }
5320 lcSymbRate = (symbFreq / adcFrequency) * (1 << 12) +
5321 (Frac28a((symbFreq % adcFrequency), adcFrequency) >>
5322 16);
5323 if (lcSymbRate > 511)
5324 lcSymbRate = 511;
5325 status = write16(state, QAM_LC_SYMBOL_FREQ__A, (u16) lcSymbRate);
5326
5327error:
5328 if (status < 0)
5329 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
5330 return status;
5331}
5332
5333/*============================================================================*/
5334
5335/**
5336* \brief Get QAM lock status.
5337* \param demod: instance of demod.
5338* \param channel: pointer to channel data.
5339* \return DRXStatus_t.
5340*/
5341
5342static int GetQAMLockStatus(struct drxk_state *state, u32 *pLockStatus)
5343{
5344 int status;
5345 u16 Result[2] = { 0, 0 };
5346
5347 dprintk(1, "\n");
5348 *pLockStatus = NOT_LOCKED;
5349 status = scu_command(state,
5350 SCU_RAM_COMMAND_STANDARD_QAM |
5351 SCU_RAM_COMMAND_CMD_DEMOD_GET_LOCK, 0, NULL, 2,
5352 Result);
5353 if (status < 0)
5354 printk(KERN_ERR "drxk: %s status = %08x\n", __func__, status);
5355
5356 if (Result[1] < SCU_RAM_QAM_LOCKED_LOCKED_DEMOD_LOCKED) {
5357 /* 0x0000 NOT LOCKED */
5358 } else if (Result[1] < SCU_RAM_QAM_LOCKED_LOCKED_LOCKED) {
5359 /* 0x4000 DEMOD LOCKED */
5360 *pLockStatus = DEMOD_LOCK;
5361 } else if (Result[1] < SCU_RAM_QAM_LOCKED_LOCKED_NEVER_LOCK) {
5362 /* 0x8000 DEMOD + FEC LOCKED (system lock) */
5363 *pLockStatus = MPEG_LOCK;
5364 } else {
5365 /* 0xC000 NEVER LOCKED */
5366 /* (system will never be able to lock to the signal) */
5367 /* TODO: check this, intermediate & standard specific lock states are not
5368 taken into account here */
5369 *pLockStatus = NEVER_LOCK;
5370 }
5371 return status;
5372}
5373
5374#define QAM_MIRROR__M 0x03
5375#define QAM_MIRROR_NORMAL 0x00
5376#define QAM_MIRRORED 0x01
5377#define QAM_MIRROR_AUTO_ON 0x02
5378#define QAM_LOCKRANGE__M 0x10
5379#define QAM_LOCKRANGE_NORMAL 0x10
5380
5381static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz,
5382 s32 tunerFreqOffset)
5383{
5384 int status;
5385 u16 setParamParameters[4] = { 0, 0, 0, 0 };
5386 u16 cmdResult;
5387
5388 dprintk(1, "\n");
5389 /*
5390 * STEP 1: reset demodulator
5391 * resets FEC DI and FEC RS
5392 * resets QAM block
5393 * resets SCU variables
5394 */
5395 status = write16(state, FEC_DI_COMM_EXEC__A, FEC_DI_COMM_EXEC_STOP);
5396 if (status < 0)
5397 goto error;
5398 status = write16(state, FEC_RS_COMM_EXEC__A, FEC_RS_COMM_EXEC_STOP);
5399 if (status < 0)
5400 goto error;
5401 status = QAMResetQAM(state);
5402 if (status < 0)
5403 goto error;
5404
5405 /*
5406 * STEP 2: configure demodulator
5407 * -set params; resets IQM,QAM,FEC HW; initializes some
5408 * SCU variables
5409 */
5410 status = QAMSetSymbolrate(state);
5411 if (status < 0)
5412 goto error;
5413
5414 /* Set params */
5415 switch (state->param.u.qam.modulation) {
5416 case QAM_256:
5417 state->m_Constellation = DRX_CONSTELLATION_QAM256;
5418 break;
5419 case QAM_AUTO:
5420 case QAM_64:
5421 state->m_Constellation = DRX_CONSTELLATION_QAM64;
5422 break;
5423 case QAM_16:
5424 state->m_Constellation = DRX_CONSTELLATION_QAM16;
5425 break;
5426 case QAM_32:
5427 state->m_Constellation = DRX_CONSTELLATION_QAM32;
5428 break;
5429 case QAM_128:
5430 state->m_Constellation = DRX_CONSTELLATION_QAM128;
5431 break;
5432 default:
5433 status = -EINVAL;
5434 break;
5435 }
5436 if (status < 0)
5437 goto error;
5438 setParamParameters[0] = state->m_Constellation; /* constellation */
5439 setParamParameters[1] = DRXK_QAM_I12_J17; /* interleave mode */
5440 if (state->m_OperationMode == OM_QAM_ITU_C)
5441 setParamParameters[2] = QAM_TOP_ANNEX_C;
5442 else
5443 setParamParameters[2] = QAM_TOP_ANNEX_A;
5444 setParamParameters[3] |= (QAM_MIRROR_AUTO_ON);
5445 /* Env parameters */
5446 /* check for LOCKRANGE Extented */
5447 /* setParamParameters[3] |= QAM_LOCKRANGE_NORMAL; */
5448
5449 status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM, 4, setParamParameters, 1, &cmdResult);
5450 if (status < 0) {
5451 /* Fall-back to the simpler call */
5452 if (state->m_OperationMode == OM_QAM_ITU_C)
5453 setParamParameters[0] = QAM_TOP_ANNEX_C;
5454 else
5455 setParamParameters[0] = QAM_TOP_ANNEX_A;
5456 status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV, 1, setParamParameters, 1, &cmdResult);
5457 if (status < 0)
5458 goto error;
5459
5460 setParamParameters[0] = state->m_Constellation; /* constellation */
5461 setParamParameters[1] = DRXK_QAM_I12_J17; /* interleave mode */
5462 status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM, 2, setParamParameters, 1, &cmdResult);
5463 }
5464 if (status < 0)
5465 goto error;
5466
5467 /*
5468 * STEP 3: enable the system in a mode where the ADC provides valid
5469 * signal setup constellation independent registers
5470 */
5471#if 0
5472 status = SetFrequency(channel, tunerFreqOffset));
5473 if (status < 0)
5474 goto error;
5475#endif
5476 status = SetFrequencyShifter(state, IntermediateFreqkHz, tunerFreqOffset, true);
5477 if (status < 0)
5478 goto error;
5479
5480 /* Setup BER measurement */
5481 status = SetQAMMeasurement(state, state->m_Constellation, state->param.u. qam.symbol_rate);
5482 if (status < 0)
5483 goto error;
5484
5485 /* Reset default values */
5486 status = write16(state, IQM_CF_SCALE_SH__A, IQM_CF_SCALE_SH__PRE);
5487 if (status < 0)
5488 goto error;
5489 status = write16(state, QAM_SY_TIMEOUT__A, QAM_SY_TIMEOUT__PRE);
5490 if (status < 0)
5491 goto error;
5492
5493 /* Reset default LC values */
5494 status = write16(state, QAM_LC_RATE_LIMIT__A, 3);
5495 if (status < 0)
5496 goto error;
5497 status = write16(state, QAM_LC_LPF_FACTORP__A, 4);
5498 if (status < 0)
5499 goto error;
5500 status = write16(state, QAM_LC_LPF_FACTORI__A, 4);
5501 if (status < 0)
5502 goto error;
5503 status = write16(state, QAM_LC_MODE__A, 7);
5504 if (status < 0)
5505 goto error;
5506
5507 status = write16(state, QAM_LC_QUAL_TAB0__A, 1);
5508 if (status < 0)
5509 goto error;
5510 status = write16(state, QAM_LC_QUAL_TAB1__A, 1);
5511 if (status < 0)
5512 goto error;
5513 status = write16(state, QAM_LC_QUAL_TAB2__A, 1);
5514 if (status < 0)
5515 goto error;
5516 status = write16(state, QAM_LC_QUAL_TAB3__A, 1);
5517 if (status < 0)
5518 goto error;
5519 status = write16(state, QAM_LC_QUAL_TAB4__A, 2);
5520 if (status < 0)
5521 goto error;
5522 status = write16(state, QAM_LC_QUAL_TAB5__A, 2);
5523 if (status < 0)
5524 goto error;
5525 status = write16(state, QAM_LC_QUAL_TAB6__A, 2);
5526 if (status < 0)
5527 goto error;
5528 status = write16(state, QAM_LC_QUAL_TAB8__A, 2);
5529 if (status < 0)
5530 goto error;
5531 status = write16(state, QAM_LC_QUAL_TAB9__A, 2);
5532 if (status < 0)
5533 goto error;
5534 status = write16(state, QAM_LC_QUAL_TAB10__A, 2);
5535 if (status < 0)
5536 goto error;
5537 status = write16(state, QAM_LC_QUAL_TAB12__A, 2);
5538 if (status < 0)
5539 goto error;
5540 status = write16(state, QAM_LC_QUAL_TAB15__A, 3);
5541 if (status < 0)
5542 goto error;
5543 status = write16(state, QAM_LC_QUAL_TAB16__A, 3);
5544 if (status < 0)
5545 goto error;
5546 status = write16(state, QAM_LC_QUAL_TAB20__A, 4);
5547 if (status < 0)
5548 goto error;
5549 status = write16(state, QAM_LC_QUAL_TAB25__A, 4);
5550 if (status < 0)
5551 goto error;
5552
5553 /* Mirroring, QAM-block starting point not inverted */
5554 status = write16(state, QAM_SY_SP_INV__A, QAM_SY_SP_INV_SPECTRUM_INV_DIS);
5555 if (status < 0)
5556 goto error;
5557
5558 /* Halt SCU to enable safe non-atomic accesses */
5559 status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_HOLD);
5560 if (status < 0)
5561 goto error;
5562
5563 /* STEP 4: constellation specific setup */
5564 switch (state->param.u.qam.modulation) {
5565 case QAM_16:
5566 status = SetQAM16(state);
5567 break;
5568 case QAM_32:
5569 status = SetQAM32(state);
5570 break;
5571 case QAM_AUTO:
5572 case QAM_64:
5573 status = SetQAM64(state);
5574 break;
5575 case QAM_128:
5576 status = SetQAM128(state);
5577 break;
5578 case QAM_256:
5579 status = SetQAM256(state);
5580 break;
5581 default:
5582 status = -EINVAL;
5583 break;
5584 }
5585 if (status < 0)
5586 goto error;
5587
5588 /* Activate SCU to enable SCU commands */
5589 status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_ACTIVE);
5590 if (status < 0)
5591 goto error;
5592
5593 /* Re-configure MPEG output, requires knowledge of channel bitrate */
5594 /* extAttr->currentChannel.constellation = channel->constellation; */
5595 /* extAttr->currentChannel.symbolrate = channel->symbolrate; */
5596 status = MPEGTSDtoSetup(state, state->m_OperationMode);
5597 if (status < 0)
5598 goto error;
5599
5600 /* Start processes */
5601 status = MPEGTSStart(state);
5602 if (status < 0)
5603 goto error;
5604 status = write16(state, FEC_COMM_EXEC__A, FEC_COMM_EXEC_ACTIVE);
5605 if (status < 0)
5606 goto error;
5607 status = write16(state, QAM_COMM_EXEC__A, QAM_COMM_EXEC_ACTIVE);
5608 if (status < 0)
5609 goto error;
5610 status = write16(state, IQM_COMM_EXEC__A, IQM_COMM_EXEC_B_ACTIVE);
5611 if (status < 0)
5612 goto error;
5613
5614 /* STEP 5: start QAM demodulator (starts FEC, QAM and IQM HW) */
5615 status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_START, 0, NULL, 1, &cmdResult);
5616 if (status < 0)
5617 goto error;
5618
5619 /* update global DRXK data container */
5620/*? extAttr->qamInterleaveMode = DRXK_QAM_I12_J17; */
5621
5622error:
5623 if (status < 0)
5624 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
5625 return status;
5626}
5627
5628static int SetQAMStandard(struct drxk_state *state,
5629 enum OperationMode oMode)
5630{
5631 int status;
5632#ifdef DRXK_QAM_TAPS
5633#define DRXK_QAMA_TAPS_SELECT
5634#include "drxk_filters.h"
5635#undef DRXK_QAMA_TAPS_SELECT
5636#endif
5637
5638 dprintk(1, "\n");
5639
5640 /* added antenna switch */
5641 SwitchAntennaToQAM(state);
5642
5643 /* Ensure correct power-up mode */
5644 status = PowerUpQAM(state);
5645 if (status < 0)
5646 goto error;
5647 /* Reset QAM block */
5648 status = QAMResetQAM(state);
5649 if (status < 0)
5650 goto error;
5651
5652 /* Setup IQM */
5653
5654 status = write16(state, IQM_COMM_EXEC__A, IQM_COMM_EXEC_B_STOP);
5655 if (status < 0)
5656 goto error;
5657 status = write16(state, IQM_AF_AMUX__A, IQM_AF_AMUX_SIGNAL2ADC);
5658 if (status < 0)
5659 goto error;
5660
5661 /* Upload IQM Channel Filter settings by
5662 boot loader from ROM table */
5663 switch (oMode) {
5664 case OM_QAM_ITU_A:
5665 status = BLChainCmd(state, DRXK_BL_ROM_OFFSET_TAPS_ITU_A, DRXK_BLCC_NR_ELEMENTS_TAPS, DRXK_BLC_TIMEOUT);
5666 break;
5667 case OM_QAM_ITU_C:
5668 status = BLDirectCmd(state, IQM_CF_TAP_RE0__A, DRXK_BL_ROM_OFFSET_TAPS_ITU_C, DRXK_BLDC_NR_ELEMENTS_TAPS, DRXK_BLC_TIMEOUT);
5669 if (status < 0)
5670 goto error;
5671 status = BLDirectCmd(state, IQM_CF_TAP_IM0__A, DRXK_BL_ROM_OFFSET_TAPS_ITU_C, DRXK_BLDC_NR_ELEMENTS_TAPS, DRXK_BLC_TIMEOUT);
5672 break;
5673 default:
5674 status = -EINVAL;
5675 }
5676 if (status < 0)
5677 goto error;
5678
5679 status = write16(state, IQM_CF_OUT_ENA__A, (1 << IQM_CF_OUT_ENA_QAM__B));
5680 if (status < 0)
5681 goto error;
5682 status = write16(state, IQM_CF_SYMMETRIC__A, 0);
5683 if (status < 0)
5684 goto error;
5685 status = write16(state, IQM_CF_MIDTAP__A, ((1 << IQM_CF_MIDTAP_RE__B) | (1 << IQM_CF_MIDTAP_IM__B)));
5686 if (status < 0)
5687 goto error;
5688
5689 status = write16(state, IQM_RC_STRETCH__A, 21);
5690 if (status < 0)
5691 goto error;
5692 status = write16(state, IQM_AF_CLP_LEN__A, 0);
5693 if (status < 0)
5694 goto error;
5695 status = write16(state, IQM_AF_CLP_TH__A, 448);
5696 if (status < 0)
5697 goto error;
5698 status = write16(state, IQM_AF_SNS_LEN__A, 0);
5699 if (status < 0)
5700 goto error;
5701 status = write16(state, IQM_CF_POW_MEAS_LEN__A, 0);
5702 if (status < 0)
5703 goto error;
5704
5705 status = write16(state, IQM_FS_ADJ_SEL__A, 1);
5706 if (status < 0)
5707 goto error;
5708 status = write16(state, IQM_RC_ADJ_SEL__A, 1);
5709 if (status < 0)
5710 goto error;
5711 status = write16(state, IQM_CF_ADJ_SEL__A, 1);
5712 if (status < 0)
5713 goto error;
5714 status = write16(state, IQM_AF_UPD_SEL__A, 0);
5715 if (status < 0)
5716 goto error;
5717
5718 /* IQM Impulse Noise Processing Unit */
5719 status = write16(state, IQM_CF_CLP_VAL__A, 500);
5720 if (status < 0)
5721 goto error;
5722 status = write16(state, IQM_CF_DATATH__A, 1000);
5723 if (status < 0)
5724 goto error;
5725 status = write16(state, IQM_CF_BYPASSDET__A, 1);
5726 if (status < 0)
5727 goto error;
5728 status = write16(state, IQM_CF_DET_LCT__A, 0);
5729 if (status < 0)
5730 goto error;
5731 status = write16(state, IQM_CF_WND_LEN__A, 1);
5732 if (status < 0)
5733 goto error;
5734 status = write16(state, IQM_CF_PKDTH__A, 1);
5735 if (status < 0)
5736 goto error;
5737 status = write16(state, IQM_AF_INC_BYPASS__A, 1);
5738 if (status < 0)
5739 goto error;
5740
5741 /* turn on IQMAF. Must be done before setAgc**() */
5742 status = SetIqmAf(state, true);
5743 if (status < 0)
5744 goto error;
5745 status = write16(state, IQM_AF_START_LOCK__A, 0x01);
5746 if (status < 0)
5747 goto error;
5748
5749 /* IQM will not be reset from here, sync ADC and update/init AGC */
5750 status = ADCSynchronization(state);
5751 if (status < 0)
5752 goto error;
5753
5754 /* Set the FSM step period */
5755 status = write16(state, SCU_RAM_QAM_FSM_STEP_PERIOD__A, 2000);
5756 if (status < 0)
5757 goto error;
5758
5759 /* Halt SCU to enable safe non-atomic accesses */
5760 status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_HOLD);
5761 if (status < 0)
5762 goto error;
5763
5764 /* No more resets of the IQM, current standard correctly set =>
5765 now AGCs can be configured. */
5766
5767 status = InitAGC(state, true);
5768 if (status < 0)
5769 goto error;
5770 status = SetPreSaw(state, &(state->m_qamPreSawCfg));
5771 if (status < 0)
5772 goto error;
5773
5774 /* Configure AGC's */
5775 status = SetAgcRf(state, &(state->m_qamRfAgcCfg), true);
5776 if (status < 0)
5777 goto error;
5778 status = SetAgcIf(state, &(state->m_qamIfAgcCfg), true);
5779 if (status < 0)
5780 goto error;
5781
5782 /* Activate SCU to enable SCU commands */
5783 status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_ACTIVE);
5784error:
5785 if (status < 0)
5786 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
5787 return status;
5788}
5789
5790static int WriteGPIO(struct drxk_state *state)
5791{
5792 int status;
5793 u16 value = 0;
5794
5795 dprintk(1, "\n");
5796 /* stop lock indicator process */
5797 status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE);
5798 if (status < 0)
5799 goto error;
5800
5801 /* Write magic word to enable pdr reg write */
5802 status = write16(state, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY);
5803 if (status < 0)
5804 goto error;
5805
5806 if (state->m_hasSAWSW) {
5807 if (state->UIO_mask & 0x0001) { /* UIO-1 */
5808 /* write to io pad configuration register - output mode */
5809 status = write16(state, SIO_PDR_SMA_TX_CFG__A, state->m_GPIOCfg);
5810 if (status < 0)
5811 goto error;
5812
5813 /* use corresponding bit in io data output registar */
5814 status = read16(state, SIO_PDR_UIO_OUT_LO__A, &value);
5815 if (status < 0)
5816 goto error;
5817 if ((state->m_GPIO & 0x0001) == 0)
5818 value &= 0x7FFF; /* write zero to 15th bit - 1st UIO */
5819 else
5820 value |= 0x8000; /* write one to 15th bit - 1st UIO */
5821 /* write back to io data output register */
5822 status = write16(state, SIO_PDR_UIO_OUT_LO__A, value);
5823 if (status < 0)
5824 goto error;
5825 }
5826 if (state->UIO_mask & 0x0002) { /* UIO-2 */
5827 /* write to io pad configuration register - output mode */
5828 status = write16(state, SIO_PDR_SMA_TX_CFG__A, state->m_GPIOCfg);
5829 if (status < 0)
5830 goto error;
5831
5832 /* use corresponding bit in io data output registar */
5833 status = read16(state, SIO_PDR_UIO_OUT_LO__A, &value);
5834 if (status < 0)
5835 goto error;
5836 if ((state->m_GPIO & 0x0002) == 0)
5837 value &= 0xBFFF; /* write zero to 14th bit - 2st UIO */
5838 else
5839 value |= 0x4000; /* write one to 14th bit - 2st UIO */
5840 /* write back to io data output register */
5841 status = write16(state, SIO_PDR_UIO_OUT_LO__A, value);
5842 if (status < 0)
5843 goto error;
5844 }
5845 if (state->UIO_mask & 0x0004) { /* UIO-3 */
5846 /* write to io pad configuration register - output mode */
5847 status = write16(state, SIO_PDR_SMA_TX_CFG__A, state->m_GPIOCfg);
5848 if (status < 0)
5849 goto error;
5850
5851 /* use corresponding bit in io data output registar */
5852 status = read16(state, SIO_PDR_UIO_OUT_LO__A, &value);
5853 if (status < 0)
5854 goto error;
5855 if ((state->m_GPIO & 0x0004) == 0)
5856 value &= 0xFFFB; /* write zero to 2nd bit - 3rd UIO */
5857 else
5858 value |= 0x0004; /* write one to 2nd bit - 3rd UIO */
5859 /* write back to io data output register */
5860 status = write16(state, SIO_PDR_UIO_OUT_LO__A, value);
5861 if (status < 0)
5862 goto error;
5863 }
5864 }
5865 /* Write magic word to disable pdr reg write */
5866 status = write16(state, SIO_TOP_COMM_KEY__A, 0x0000);
5867error:
5868 if (status < 0)
5869 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
5870 return status;
5871}
5872
5873static int SwitchAntennaToQAM(struct drxk_state *state)
5874{
5875 int status = 0;
5876 bool gpio_state;
5877
5878 dprintk(1, "\n");
5879
5880 if (!state->antenna_gpio)
5881 return 0;
5882
5883 gpio_state = state->m_GPIO & state->antenna_gpio;
5884
5885 if (state->antenna_dvbt ^ gpio_state) {
5886 /* Antenna is on DVB-T mode. Switch */
5887 if (state->antenna_dvbt)
5888 state->m_GPIO &= ~state->antenna_gpio;
5889 else
5890 state->m_GPIO |= state->antenna_gpio;
5891 status = WriteGPIO(state);
5892 }
5893 if (status < 0)
5894 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
5895 return status;
5896}
5897
5898static int SwitchAntennaToDVBT(struct drxk_state *state)
5899{
5900 int status = 0;
5901 bool gpio_state;
5902
5903 dprintk(1, "\n");
5904
5905 if (!state->antenna_gpio)
5906 return 0;
5907
5908 gpio_state = state->m_GPIO & state->antenna_gpio;
5909
5910 if (!(state->antenna_dvbt ^ gpio_state)) {
5911 /* Antenna is on DVB-C mode. Switch */
5912 if (state->antenna_dvbt)
5913 state->m_GPIO |= state->antenna_gpio;
5914 else
5915 state->m_GPIO &= ~state->antenna_gpio;
5916 status = WriteGPIO(state);
5917 }
5918 if (status < 0)
5919 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
5920 return status;
5921}
5922
5923
5924static int PowerDownDevice(struct drxk_state *state)
5925{
5926 /* Power down to requested mode */
5927 /* Backup some register settings */
5928 /* Set pins with possible pull-ups connected to them in input mode */
5929 /* Analog power down */
5930 /* ADC power down */
5931 /* Power down device */
5932 int status;
5933
5934 dprintk(1, "\n");
5935 if (state->m_bPDownOpenBridge) {
5936 /* Open I2C bridge before power down of DRXK */
5937 status = ConfigureI2CBridge(state, true);
5938 if (status < 0)
5939 goto error;
5940 }
5941 /* driver 0.9.0 */
5942 status = DVBTEnableOFDMTokenRing(state, false);
5943 if (status < 0)
5944 goto error;
5945
5946 status = write16(state, SIO_CC_PWD_MODE__A, SIO_CC_PWD_MODE_LEVEL_CLOCK);
5947 if (status < 0)
5948 goto error;
5949 status = write16(state, SIO_CC_UPDATE__A, SIO_CC_UPDATE_KEY);
5950 if (status < 0)
5951 goto error;
5952 state->m_HICfgCtrl |= SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ;
5953 status = HI_CfgCommand(state);
5954error:
5955 if (status < 0)
5956 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
5957
5958 return status;
5959}
5960
5961static int load_microcode(struct drxk_state *state, const char *mc_name)
5962{
5963 const struct firmware *fw = NULL;
5964 int err = 0;
5965
5966 dprintk(1, "\n");
5967
5968 err = request_firmware(&fw, mc_name, state->i2c->dev.parent);
5969 if (err < 0) {
5970 printk(KERN_ERR
5971 "drxk: Could not load firmware file %s.\n", mc_name);
5972 printk(KERN_INFO
5973 "drxk: Copy %s to your hotplug directory!\n", mc_name);
5974 return err;
5975 }
5976 err = DownloadMicrocode(state, fw->data, fw->size);
5977 release_firmware(fw);
5978 return err;
5979}
5980
5981static int init_drxk(struct drxk_state *state)
5982{
5983 int status = 0;
5984 enum DRXPowerMode powerMode = DRXK_POWER_DOWN_OFDM;
5985 u16 driverVersion;
5986
5987 dprintk(1, "\n");
5988 if ((state->m_DrxkState == DRXK_UNINITIALIZED)) {
5989 status = PowerUpDevice(state);
5990 if (status < 0)
5991 goto error;
5992 status = DRXX_Open(state);
5993 if (status < 0)
5994 goto error;
5995 /* Soft reset of OFDM-, sys- and osc-clockdomain */
5996 status = write16(state, SIO_CC_SOFT_RST__A, SIO_CC_SOFT_RST_OFDM__M | SIO_CC_SOFT_RST_SYS__M | SIO_CC_SOFT_RST_OSC__M);
5997 if (status < 0)
5998 goto error;
5999 status = write16(state, SIO_CC_UPDATE__A, SIO_CC_UPDATE_KEY);
6000 if (status < 0)
6001 goto error;
6002 /* TODO is this needed, if yes how much delay in worst case scenario */
6003 msleep(1);
6004 state->m_DRXK_A3_PATCH_CODE = true;
6005 status = GetDeviceCapabilities(state);
6006 if (status < 0)
6007 goto error;
6008
6009 /* Bridge delay, uses oscilator clock */
6010 /* Delay = (delay (nano seconds) * oscclk (kHz))/ 1000 */
6011 /* SDA brdige delay */
6012 state->m_HICfgBridgeDelay =
6013 (u16) ((state->m_oscClockFreq / 1000) *
6014 HI_I2C_BRIDGE_DELAY) / 1000;
6015 /* Clipping */
6016 if (state->m_HICfgBridgeDelay >
6017 SIO_HI_RA_RAM_PAR_3_CFG_DBL_SDA__M) {
6018 state->m_HICfgBridgeDelay =
6019 SIO_HI_RA_RAM_PAR_3_CFG_DBL_SDA__M;
6020 }
6021 /* SCL bridge delay, same as SDA for now */
6022 state->m_HICfgBridgeDelay +=
6023 state->m_HICfgBridgeDelay <<
6024 SIO_HI_RA_RAM_PAR_3_CFG_DBL_SCL__B;
6025
6026 status = InitHI(state);
6027 if (status < 0)
6028 goto error;
6029 /* disable various processes */
6030#if NOA1ROM
6031 if (!(state->m_DRXK_A1_ROM_CODE)
6032 && !(state->m_DRXK_A2_ROM_CODE))
6033#endif
6034 {
6035 status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE);
6036 if (status < 0)
6037 goto error;
6038 }
6039
6040 /* disable MPEG port */
6041 status = MPEGTSDisable(state);
6042 if (status < 0)
6043 goto error;
6044
6045 /* Stop AUD and SCU */
6046 status = write16(state, AUD_COMM_EXEC__A, AUD_COMM_EXEC_STOP);
6047 if (status < 0)
6048 goto error;
6049 status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_STOP);
6050 if (status < 0)
6051 goto error;
6052
6053 /* enable token-ring bus through OFDM block for possible ucode upload */
6054 status = write16(state, SIO_OFDM_SH_OFDM_RING_ENABLE__A, SIO_OFDM_SH_OFDM_RING_ENABLE_ON);
6055 if (status < 0)
6056 goto error;
6057
6058 /* include boot loader section */
6059 status = write16(state, SIO_BL_COMM_EXEC__A, SIO_BL_COMM_EXEC_ACTIVE);
6060 if (status < 0)
6061 goto error;
6062 status = BLChainCmd(state, 0, 6, 100);
6063 if (status < 0)
6064 goto error;
6065
6066 if (!state->microcode_name)
6067 load_microcode(state, "drxk_a3.mc");
6068 else
6069 load_microcode(state, state->microcode_name);
6070
6071 /* disable token-ring bus through OFDM block for possible ucode upload */
6072 status = write16(state, SIO_OFDM_SH_OFDM_RING_ENABLE__A, SIO_OFDM_SH_OFDM_RING_ENABLE_OFF);
6073 if (status < 0)
6074 goto error;
6075
6076 /* Run SCU for a little while to initialize microcode version numbers */
6077 status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_ACTIVE);
6078 if (status < 0)
6079 goto error;
6080 status = DRXX_Open(state);
6081 if (status < 0)
6082 goto error;
6083 /* added for test */
6084 msleep(30);
6085
6086 powerMode = DRXK_POWER_DOWN_OFDM;
6087 status = CtrlPowerMode(state, &powerMode);
6088 if (status < 0)
6089 goto error;
6090
6091 /* Stamp driver version number in SCU data RAM in BCD code
6092 Done to enable field application engineers to retreive drxdriver version
6093 via I2C from SCU RAM.
6094 Not using SCU command interface for SCU register access since no
6095 microcode may be present.
6096 */
6097 driverVersion =
6098 (((DRXK_VERSION_MAJOR / 100) % 10) << 12) +
6099 (((DRXK_VERSION_MAJOR / 10) % 10) << 8) +
6100 ((DRXK_VERSION_MAJOR % 10) << 4) +
6101 (DRXK_VERSION_MINOR % 10);
6102 status = write16(state, SCU_RAM_DRIVER_VER_HI__A, driverVersion);
6103 if (status < 0)
6104 goto error;
6105 driverVersion =
6106 (((DRXK_VERSION_PATCH / 1000) % 10) << 12) +
6107 (((DRXK_VERSION_PATCH / 100) % 10) << 8) +
6108 (((DRXK_VERSION_PATCH / 10) % 10) << 4) +
6109 (DRXK_VERSION_PATCH % 10);
6110 status = write16(state, SCU_RAM_DRIVER_VER_LO__A, driverVersion);
6111 if (status < 0)
6112 goto error;
6113
6114 printk(KERN_INFO "DRXK driver version %d.%d.%d\n",
6115 DRXK_VERSION_MAJOR, DRXK_VERSION_MINOR,
6116 DRXK_VERSION_PATCH);
6117
6118 /* Dirty fix of default values for ROM/PATCH microcode
6119 Dirty because this fix makes it impossible to setup suitable values
6120 before calling DRX_Open. This solution requires changes to RF AGC speed
6121 to be done via the CTRL function after calling DRX_Open */
6122
6123 /* m_dvbtRfAgcCfg.speed = 3; */
6124
6125 /* Reset driver debug flags to 0 */
6126 status = write16(state, SCU_RAM_DRIVER_DEBUG__A, 0);
6127 if (status < 0)
6128 goto error;
6129 /* driver 0.9.0 */
6130 /* Setup FEC OC:
6131 NOTE: No more full FEC resets allowed afterwards!! */
6132 status = write16(state, FEC_COMM_EXEC__A, FEC_COMM_EXEC_STOP);
6133 if (status < 0)
6134 goto error;
6135 /* MPEGTS functions are still the same */
6136 status = MPEGTSDtoInit(state);
6137 if (status < 0)
6138 goto error;
6139 status = MPEGTSStop(state);
6140 if (status < 0)
6141 goto error;
6142 status = MPEGTSConfigurePolarity(state);
6143 if (status < 0)
6144 goto error;
6145 status = MPEGTSConfigurePins(state, state->m_enableMPEGOutput);
6146 if (status < 0)
6147 goto error;
6148 /* added: configure GPIO */
6149 status = WriteGPIO(state);
6150 if (status < 0)
6151 goto error;
6152
6153 state->m_DrxkState = DRXK_STOPPED;
6154
6155 if (state->m_bPowerDown) {
6156 status = PowerDownDevice(state);
6157 if (status < 0)
6158 goto error;
6159 state->m_DrxkState = DRXK_POWERED_DOWN;
6160 } else
6161 state->m_DrxkState = DRXK_STOPPED;
6162 }
6163error:
6164 if (status < 0)
6165 printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
6166
6167 return status;
6168}
6169
6170static void drxk_c_release(struct dvb_frontend *fe)
6171{
6172 struct drxk_state *state = fe->demodulator_priv;
6173
6174 dprintk(1, "\n");
6175 kfree(state);
6176}
6177
6178static int drxk_c_init(struct dvb_frontend *fe)
6179{
6180 struct drxk_state *state = fe->demodulator_priv;
6181
6182 dprintk(1, "\n");
6183 if (mutex_trylock(&state->ctlock) == 0)
6184 return -EBUSY;
6185 SetOperationMode(state, OM_QAM_ITU_A);
6186 return 0;
6187}
6188
6189static int drxk_c_sleep(struct dvb_frontend *fe)
6190{
6191 struct drxk_state *state = fe->demodulator_priv;
6192
6193 dprintk(1, "\n");
6194 ShutDown(state);
6195 mutex_unlock(&state->ctlock);
6196 return 0;
6197}
6198
6199static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
6200{
6201 struct drxk_state *state = fe->demodulator_priv;
6202
6203 dprintk(1, "%s\n", enable ? "enable" : "disable");
6204 return ConfigureI2CBridge(state, enable ? true : false);
6205}
6206
6207static int drxk_set_parameters(struct dvb_frontend *fe,
6208 struct dvb_frontend_parameters *p)
6209{
6210 struct drxk_state *state = fe->demodulator_priv;
6211 u32 IF;
6212
6213 dprintk(1, "\n");
6214 if (fe->ops.i2c_gate_ctrl)
6215 fe->ops.i2c_gate_ctrl(fe, 1);
6216 if (fe->ops.tuner_ops.set_params)
6217 fe->ops.tuner_ops.set_params(fe, p);
6218 if (fe->ops.i2c_gate_ctrl)
6219 fe->ops.i2c_gate_ctrl(fe, 0);
6220 state->param = *p;
6221 fe->ops.tuner_ops.get_frequency(fe, &IF);
6222 Start(state, 0, IF);
6223
6224 /* printk(KERN_DEBUG "drxk: %s IF=%d done\n", __func__, IF); */
6225
6226 return 0;
6227}
6228
6229static int drxk_c_get_frontend(struct dvb_frontend *fe,
6230 struct dvb_frontend_parameters *p)
6231{
6232 dprintk(1, "\n");
6233 return 0;
6234}
6235
6236static int drxk_read_status(struct dvb_frontend *fe, fe_status_t *status)
6237{
6238 struct drxk_state *state = fe->demodulator_priv;
6239 u32 stat;
6240
6241 dprintk(1, "\n");
6242 *status = 0;
6243 GetLockStatus(state, &stat, 0);
6244 if (stat == MPEG_LOCK)
6245 *status |= 0x1f;
6246 if (stat == FEC_LOCK)
6247 *status |= 0x0f;
6248 if (stat == DEMOD_LOCK)
6249 *status |= 0x07;
6250 return 0;
6251}
6252
6253static int drxk_read_ber(struct dvb_frontend *fe, u32 *ber)
6254{
6255 dprintk(1, "\n");
6256
6257 *ber = 0;
6258 return 0;
6259}
6260
6261static int drxk_read_signal_strength(struct dvb_frontend *fe,
6262 u16 *strength)
6263{
6264 struct drxk_state *state = fe->demodulator_priv;
6265 u32 val = 0;
6266
6267 dprintk(1, "\n");
6268 ReadIFAgc(state, &val);
6269 *strength = val & 0xffff;
6270 return 0;
6271}
6272
6273static int drxk_read_snr(struct dvb_frontend *fe, u16 *snr)
6274{
6275 struct drxk_state *state = fe->demodulator_priv;
6276 s32 snr2;
6277
6278 dprintk(1, "\n");
6279 GetSignalToNoise(state, &snr2);
6280 *snr = snr2 & 0xffff;
6281 return 0;
6282}
6283
6284static int drxk_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
6285{
6286 struct drxk_state *state = fe->demodulator_priv;
6287 u16 err;
6288
6289 dprintk(1, "\n");
6290 DVBTQAMGetAccPktErr(state, &err);
6291 *ucblocks = (u32) err;
6292 return 0;
6293}
6294
6295static int drxk_c_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings
6296 *sets)
6297{
6298 dprintk(1, "\n");
6299 sets->min_delay_ms = 3000;
6300 sets->max_drift = 0;
6301 sets->step_size = 0;
6302 return 0;
6303}
6304
6305static void drxk_t_release(struct dvb_frontend *fe)
6306{
6307 /*
6308 * There's nothing to release here, as the state struct
6309 * is already freed by drxk_c_release.
6310 */
6311}
6312
6313static int drxk_t_init(struct dvb_frontend *fe)
6314{
6315 struct drxk_state *state = fe->demodulator_priv;
6316
6317 dprintk(1, "\n");
6318 if (mutex_trylock(&state->ctlock) == 0)
6319 return -EBUSY;
6320 SetOperationMode(state, OM_DVBT);
6321 return 0;
6322}
6323
6324static int drxk_t_sleep(struct dvb_frontend *fe)
6325{
6326 struct drxk_state *state = fe->demodulator_priv;
6327
6328 dprintk(1, "\n");
6329 mutex_unlock(&state->ctlock);
6330 return 0;
6331}
6332
6333static int drxk_t_get_frontend(struct dvb_frontend *fe,
6334 struct dvb_frontend_parameters *p)
6335{
6336 dprintk(1, "\n");
6337
6338 return 0;
6339}
6340
6341static struct dvb_frontend_ops drxk_c_ops = {
6342 .info = {
6343 .name = "DRXK DVB-C",
6344 .type = FE_QAM,
6345 .frequency_stepsize = 62500,
6346 .frequency_min = 47000000,
6347 .frequency_max = 862000000,
6348 .symbol_rate_min = 870000,
6349 .symbol_rate_max = 11700000,
6350 .caps = FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 |
6351 FE_CAN_QAM_128 | FE_CAN_QAM_256 | FE_CAN_FEC_AUTO},
6352 .release = drxk_c_release,
6353 .init = drxk_c_init,
6354 .sleep = drxk_c_sleep,
6355 .i2c_gate_ctrl = drxk_gate_ctrl,
6356
6357 .set_frontend = drxk_set_parameters,
6358 .get_frontend = drxk_c_get_frontend,
6359 .get_tune_settings = drxk_c_get_tune_settings,
6360
6361 .read_status = drxk_read_status,
6362 .read_ber = drxk_read_ber,
6363 .read_signal_strength = drxk_read_signal_strength,
6364 .read_snr = drxk_read_snr,
6365 .read_ucblocks = drxk_read_ucblocks,
6366};
6367
6368static struct dvb_frontend_ops drxk_t_ops = {
6369 .info = {
6370 .name = "DRXK DVB-T",
6371 .type = FE_OFDM,
6372 .frequency_min = 47125000,
6373 .frequency_max = 865000000,
6374 .frequency_stepsize = 166667,
6375 .frequency_tolerance = 0,
6376 .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
6377 FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
6378 FE_CAN_FEC_AUTO |
6379 FE_CAN_QAM_16 | FE_CAN_QAM_64 |
6380 FE_CAN_QAM_AUTO |
6381 FE_CAN_TRANSMISSION_MODE_AUTO |
6382 FE_CAN_GUARD_INTERVAL_AUTO |
6383 FE_CAN_HIERARCHY_AUTO | FE_CAN_RECOVER | FE_CAN_MUTE_TS},
6384 .release = drxk_t_release,
6385 .init = drxk_t_init,
6386 .sleep = drxk_t_sleep,
6387 .i2c_gate_ctrl = drxk_gate_ctrl,
6388
6389 .set_frontend = drxk_set_parameters,
6390 .get_frontend = drxk_t_get_frontend,
6391
6392 .read_status = drxk_read_status,
6393 .read_ber = drxk_read_ber,
6394 .read_signal_strength = drxk_read_signal_strength,
6395 .read_snr = drxk_read_snr,
6396 .read_ucblocks = drxk_read_ucblocks,
6397};
6398
6399struct dvb_frontend *drxk_attach(const struct drxk_config *config,
6400 struct i2c_adapter *i2c,
6401 struct dvb_frontend **fe_t)
6402{
6403 struct drxk_state *state = NULL;
6404 u8 adr = config->adr;
6405
6406 dprintk(1, "\n");
6407 state = kzalloc(sizeof(struct drxk_state), GFP_KERNEL);
6408 if (!state)
6409 return NULL;
6410
6411 state->i2c = i2c;
6412 state->demod_address = adr;
6413 state->single_master = config->single_master;
6414 state->microcode_name = config->microcode_name;
6415 state->no_i2c_bridge = config->no_i2c_bridge;
6416 state->antenna_gpio = config->antenna_gpio;
6417 state->antenna_dvbt = config->antenna_dvbt;
6418
6419 /* NOTE: as more UIO bits will be used, add them to the mask */
6420 state->UIO_mask = config->antenna_gpio;
6421
6422 /* Default gpio to DVB-C */
6423 if (!state->antenna_dvbt && state->antenna_gpio)
6424 state->m_GPIO |= state->antenna_gpio;
6425 else
6426 state->m_GPIO &= ~state->antenna_gpio;
6427
6428 mutex_init(&state->mutex);
6429 mutex_init(&state->ctlock);
6430
6431 memcpy(&state->c_frontend.ops, &drxk_c_ops,
6432 sizeof(struct dvb_frontend_ops));
6433 memcpy(&state->t_frontend.ops, &drxk_t_ops,
6434 sizeof(struct dvb_frontend_ops));
6435 state->c_frontend.demodulator_priv = state;
6436 state->t_frontend.demodulator_priv = state;
6437
6438 init_state(state);
6439 if (init_drxk(state) < 0)
6440 goto error;
6441 *fe_t = &state->t_frontend;
6442
6443 return &state->c_frontend;
6444
6445error:
6446 printk(KERN_ERR "drxk: not found\n");
6447 kfree(state);
6448 return NULL;
6449}
6450EXPORT_SYMBOL(drxk_attach);
6451
6452MODULE_DESCRIPTION("DRX-K driver");
6453MODULE_AUTHOR("Ralph Metzler");
6454MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/drxk_hard.h b/drivers/media/dvb/frontends/drxk_hard.h
new file mode 100644
index 000000000000..a05c32eecdcc
--- /dev/null
+++ b/drivers/media/dvb/frontends/drxk_hard.h
@@ -0,0 +1,348 @@
1#include "drxk_map.h"
2
3#define DRXK_VERSION_MAJOR 0
4#define DRXK_VERSION_MINOR 9
5#define DRXK_VERSION_PATCH 4300
6
7#define HI_I2C_DELAY 42
8#define HI_I2C_BRIDGE_DELAY 350
9#define DRXK_MAX_RETRIES 100
10
11#define DRIVER_4400 1
12
13#define DRXX_JTAGID 0x039210D9
14#define DRXX_J_JTAGID 0x239310D9
15#define DRXX_K_JTAGID 0x039210D9
16
17#define DRX_UNKNOWN 254
18#define DRX_AUTO 255
19
20#define DRX_SCU_READY 0
21#define DRXK_MAX_WAITTIME (200)
22#define SCU_RESULT_OK 0
23#define SCU_RESULT_SIZE -4
24#define SCU_RESULT_INVPAR -3
25#define SCU_RESULT_UNKSTD -2
26#define SCU_RESULT_UNKCMD -1
27
28#ifndef DRXK_OFDM_TR_SHUTDOWN_TIMEOUT
29#define DRXK_OFDM_TR_SHUTDOWN_TIMEOUT (200)
30#endif
31
32#define DRXK_8VSB_MPEG_BIT_RATE 19392658UL /*bps*/
33#define DRXK_DVBT_MPEG_BIT_RATE 32000000UL /*bps*/
34#define DRXK_QAM16_MPEG_BIT_RATE 27000000UL /*bps*/
35#define DRXK_QAM32_MPEG_BIT_RATE 33000000UL /*bps*/
36#define DRXK_QAM64_MPEG_BIT_RATE 40000000UL /*bps*/
37#define DRXK_QAM128_MPEG_BIT_RATE 46000000UL /*bps*/
38#define DRXK_QAM256_MPEG_BIT_RATE 52000000UL /*bps*/
39#define DRXK_MAX_MPEG_BIT_RATE 52000000UL /*bps*/
40
41#define IQM_CF_OUT_ENA_OFDM__M 0x4
42#define IQM_FS_ADJ_SEL_B_QAM 0x1
43#define IQM_FS_ADJ_SEL_B_OFF 0x0
44#define IQM_FS_ADJ_SEL_B_VSB 0x2
45#define IQM_RC_ADJ_SEL_B_OFF 0x0
46#define IQM_RC_ADJ_SEL_B_QAM 0x1
47#define IQM_RC_ADJ_SEL_B_VSB 0x2
48
49enum OperationMode {
50 OM_NONE,
51 OM_QAM_ITU_A,
52 OM_QAM_ITU_B,
53 OM_QAM_ITU_C,
54 OM_DVBT
55};
56
57enum DRXPowerMode {
58 DRX_POWER_UP = 0,
59 DRX_POWER_MODE_1,
60 DRX_POWER_MODE_2,
61 DRX_POWER_MODE_3,
62 DRX_POWER_MODE_4,
63 DRX_POWER_MODE_5,
64 DRX_POWER_MODE_6,
65 DRX_POWER_MODE_7,
66 DRX_POWER_MODE_8,
67
68 DRX_POWER_MODE_9,
69 DRX_POWER_MODE_10,
70 DRX_POWER_MODE_11,
71 DRX_POWER_MODE_12,
72 DRX_POWER_MODE_13,
73 DRX_POWER_MODE_14,
74 DRX_POWER_MODE_15,
75 DRX_POWER_MODE_16,
76 DRX_POWER_DOWN = 255
77};
78
79
80/** /brief Intermediate power mode for DRXK, power down OFDM clock domain */
81#ifndef DRXK_POWER_DOWN_OFDM
82#define DRXK_POWER_DOWN_OFDM DRX_POWER_MODE_1
83#endif
84
85/** /brief Intermediate power mode for DRXK, power down core (sysclk) */
86#ifndef DRXK_POWER_DOWN_CORE
87#define DRXK_POWER_DOWN_CORE DRX_POWER_MODE_9
88#endif
89
90/** /brief Intermediate power mode for DRXK, power down pll (only osc runs) */
91#ifndef DRXK_POWER_DOWN_PLL
92#define DRXK_POWER_DOWN_PLL DRX_POWER_MODE_10
93#endif
94
95
96enum AGC_CTRL_MODE { DRXK_AGC_CTRL_AUTO = 0, DRXK_AGC_CTRL_USER, DRXK_AGC_CTRL_OFF };
97enum EDrxkState { DRXK_UNINITIALIZED = 0, DRXK_STOPPED, DRXK_DTV_STARTED, DRXK_ATV_STARTED, DRXK_POWERED_DOWN };
98enum EDrxkCoefArrayIndex {
99 DRXK_COEF_IDX_MN = 0,
100 DRXK_COEF_IDX_FM ,
101 DRXK_COEF_IDX_L ,
102 DRXK_COEF_IDX_LP ,
103 DRXK_COEF_IDX_BG ,
104 DRXK_COEF_IDX_DK ,
105 DRXK_COEF_IDX_I ,
106 DRXK_COEF_IDX_MAX
107};
108enum EDrxkSifAttenuation {
109 DRXK_SIF_ATTENUATION_0DB,
110 DRXK_SIF_ATTENUATION_3DB,
111 DRXK_SIF_ATTENUATION_6DB,
112 DRXK_SIF_ATTENUATION_9DB
113};
114enum EDrxkConstellation {
115 DRX_CONSTELLATION_BPSK = 0,
116 DRX_CONSTELLATION_QPSK,
117 DRX_CONSTELLATION_PSK8,
118 DRX_CONSTELLATION_QAM16,
119 DRX_CONSTELLATION_QAM32,
120 DRX_CONSTELLATION_QAM64,
121 DRX_CONSTELLATION_QAM128,
122 DRX_CONSTELLATION_QAM256,
123 DRX_CONSTELLATION_QAM512,
124 DRX_CONSTELLATION_QAM1024,
125 DRX_CONSTELLATION_UNKNOWN = DRX_UNKNOWN,
126 DRX_CONSTELLATION_AUTO = DRX_AUTO
127};
128enum EDrxkInterleaveMode {
129 DRXK_QAM_I12_J17 = 16,
130 DRXK_QAM_I_UNKNOWN = DRX_UNKNOWN
131};
132enum {
133 DRXK_SPIN_A1 = 0,
134 DRXK_SPIN_A2,
135 DRXK_SPIN_A3,
136 DRXK_SPIN_UNKNOWN
137};
138
139enum DRXKCfgDvbtSqiSpeed {
140 DRXK_DVBT_SQI_SPEED_FAST = 0,
141 DRXK_DVBT_SQI_SPEED_MEDIUM,
142 DRXK_DVBT_SQI_SPEED_SLOW,
143 DRXK_DVBT_SQI_SPEED_UNKNOWN = DRX_UNKNOWN
144} ;
145
146enum DRXFftmode_t {
147 DRX_FFTMODE_2K = 0,
148 DRX_FFTMODE_4K,
149 DRX_FFTMODE_8K,
150 DRX_FFTMODE_UNKNOWN = DRX_UNKNOWN,
151 DRX_FFTMODE_AUTO = DRX_AUTO
152};
153
154enum DRXMPEGStrWidth_t {
155 DRX_MPEG_STR_WIDTH_1,
156 DRX_MPEG_STR_WIDTH_8
157};
158
159enum DRXQamLockRange_t {
160 DRX_QAM_LOCKRANGE_NORMAL,
161 DRX_QAM_LOCKRANGE_EXTENDED
162};
163
164struct DRXKCfgDvbtEchoThres_t {
165 u16 threshold;
166 enum DRXFftmode_t fftMode;
167} ;
168
169struct SCfgAgc {
170 enum AGC_CTRL_MODE ctrlMode; /* off, user, auto */
171 u16 outputLevel; /* range dependent on AGC */
172 u16 minOutputLevel; /* range dependent on AGC */
173 u16 maxOutputLevel; /* range dependent on AGC */
174 u16 speed; /* range dependent on AGC */
175 u16 top; /* rf-agc take over point */
176 u16 cutOffCurrent; /* rf-agc is accelerated if output current
177 is below cut-off current */
178 u16 IngainTgtMax;
179 u16 FastClipCtrlDelay;
180};
181
182struct SCfgPreSaw {
183 u16 reference; /* pre SAW reference value, range 0 .. 31 */
184 bool usePreSaw; /* TRUE algorithms must use pre SAW sense */
185};
186
187struct DRXKOfdmScCmd_t {
188 u16 cmd; /**< Command number */
189 u16 subcmd; /**< Sub-command parameter*/
190 u16 param0; /**< General purpous param */
191 u16 param1; /**< General purpous param */
192 u16 param2; /**< General purpous param */
193 u16 param3; /**< General purpous param */
194 u16 param4; /**< General purpous param */
195};
196
197struct drxk_state {
198 struct dvb_frontend c_frontend;
199 struct dvb_frontend t_frontend;
200 struct dvb_frontend_parameters param;
201 struct device *dev;
202
203 struct i2c_adapter *i2c;
204 u8 demod_address;
205 void *priv;
206
207 struct mutex mutex;
208 struct mutex ctlock;
209
210 u32 m_Instance; /**< Channel 1,2,3 or 4 */
211
212 int m_ChunkSize;
213 u8 Chunk[256];
214
215 bool m_hasLNA;
216 bool m_hasDVBT;
217 bool m_hasDVBC;
218 bool m_hasAudio;
219 bool m_hasATV;
220 bool m_hasOOB;
221 bool m_hasSAWSW; /**< TRUE if mat_tx is available */
222 bool m_hasGPIO1; /**< TRUE if mat_rx is available */
223 bool m_hasGPIO2; /**< TRUE if GPIO is available */
224 bool m_hasIRQN; /**< TRUE if IRQN is available */
225 u16 m_oscClockFreq;
226 u16 m_HICfgTimingDiv;
227 u16 m_HICfgBridgeDelay;
228 u16 m_HICfgWakeUpKey;
229 u16 m_HICfgTimeout;
230 u16 m_HICfgCtrl;
231 s32 m_sysClockFreq; /**< system clock frequency in kHz */
232
233 enum EDrxkState m_DrxkState; /**< State of Drxk (init,stopped,started) */
234 enum OperationMode m_OperationMode; /**< digital standards */
235 struct SCfgAgc m_vsbRfAgcCfg; /**< settings for VSB RF-AGC */
236 struct SCfgAgc m_vsbIfAgcCfg; /**< settings for VSB IF-AGC */
237 u16 m_vsbPgaCfg; /**< settings for VSB PGA */
238 struct SCfgPreSaw m_vsbPreSawCfg; /**< settings for pre SAW sense */
239 s32 m_Quality83percent; /**< MER level (*0.1 dB) for 83% quality indication */
240 s32 m_Quality93percent; /**< MER level (*0.1 dB) for 93% quality indication */
241 bool m_smartAntInverted;
242 bool m_bDebugEnableBridge;
243 bool m_bPDownOpenBridge; /**< only open DRXK bridge before power-down once it has been accessed */
244 bool m_bPowerDown; /**< Power down when not used */
245
246 u32 m_IqmFsRateOfs; /**< frequency shift as written to DRXK register (28bit fixpoint) */
247
248 bool m_enableMPEGOutput; /**< If TRUE, enable MPEG output */
249 bool m_insertRSByte; /**< If TRUE, insert RS byte */
250 bool m_enableParallel; /**< If TRUE, parallel out otherwise serial */
251 bool m_invertDATA; /**< If TRUE, invert DATA signals */
252 bool m_invertERR; /**< If TRUE, invert ERR signal */
253 bool m_invertSTR; /**< If TRUE, invert STR signals */
254 bool m_invertVAL; /**< If TRUE, invert VAL signals */
255 bool m_invertCLK; /**< If TRUE, invert CLK signals */
256 bool m_DVBCStaticCLK;
257 bool m_DVBTStaticCLK; /**< If TRUE, static MPEG clockrate will
258 be used, otherwise clockrate will
259 adapt to the bitrate of the TS */
260 u32 m_DVBTBitrate;
261 u32 m_DVBCBitrate;
262
263 u8 m_TSDataStrength;
264 u8 m_TSClockkStrength;
265
266 enum DRXMPEGStrWidth_t m_widthSTR; /**< MPEG start width */
267 u32 m_mpegTsStaticBitrate; /**< Maximum bitrate in b/s in case
268 static clockrate is selected */
269
270 /* LARGE_INTEGER m_StartTime; */ /**< Contains the time of the last demod start */
271 s32 m_MpegLockTimeOut; /**< WaitForLockStatus Timeout (counts from start time) */
272 s32 m_DemodLockTimeOut; /**< WaitForLockStatus Timeout (counts from start time) */
273
274 bool m_disableTEIhandling;
275
276 bool m_RfAgcPol;
277 bool m_IfAgcPol;
278
279 struct SCfgAgc m_atvRfAgcCfg; /**< settings for ATV RF-AGC */
280 struct SCfgAgc m_atvIfAgcCfg; /**< settings for ATV IF-AGC */
281 struct SCfgPreSaw m_atvPreSawCfg; /**< settings for ATV pre SAW sense */
282 bool m_phaseCorrectionBypass;
283 s16 m_atvTopVidPeak;
284 u16 m_atvTopNoiseTh;
285 enum EDrxkSifAttenuation m_sifAttenuation;
286 bool m_enableCVBSOutput;
287 bool m_enableSIFOutput;
288 bool m_bMirrorFreqSpect;
289 enum EDrxkConstellation m_Constellation; /**< Constellation type of the channel */
290 u32 m_CurrSymbolRate; /**< Current QAM symbol rate */
291 struct SCfgAgc m_qamRfAgcCfg; /**< settings for QAM RF-AGC */
292 struct SCfgAgc m_qamIfAgcCfg; /**< settings for QAM IF-AGC */
293 u16 m_qamPgaCfg; /**< settings for QAM PGA */
294 struct SCfgPreSaw m_qamPreSawCfg; /**< settings for QAM pre SAW sense */
295 enum EDrxkInterleaveMode m_qamInterleaveMode; /**< QAM Interleave mode */
296 u16 m_fecRsPlen;
297 u16 m_fecRsPrescale;
298
299 enum DRXKCfgDvbtSqiSpeed m_sqiSpeed;
300
301 u16 m_GPIO;
302 u16 m_GPIOCfg;
303
304 struct SCfgAgc m_dvbtRfAgcCfg; /**< settings for QAM RF-AGC */
305 struct SCfgAgc m_dvbtIfAgcCfg; /**< settings for QAM IF-AGC */
306 struct SCfgPreSaw m_dvbtPreSawCfg; /**< settings for QAM pre SAW sense */
307
308 u16 m_agcFastClipCtrlDelay;
309 bool m_adcCompPassed;
310 u16 m_adcCompCoef[64];
311 u16 m_adcState;
312
313 u8 *m_microcode;
314 int m_microcode_length;
315 bool m_DRXK_A1_PATCH_CODE;
316 bool m_DRXK_A1_ROM_CODE;
317 bool m_DRXK_A2_ROM_CODE;
318 bool m_DRXK_A3_ROM_CODE;
319 bool m_DRXK_A2_PATCH_CODE;
320 bool m_DRXK_A3_PATCH_CODE;
321
322 bool m_rfmirror;
323 u8 m_deviceSpin;
324 u32 m_iqmRcRate;
325
326 enum DRXPowerMode m_currentPowerMode;
327
328 /*
329 * Configurable parameters at the driver. They stores the values found
330 * at struct drxk_config.
331 */
332
333 u16 UIO_mask; /* Bits used by UIO */
334
335 bool single_master;
336 bool no_i2c_bridge;
337 bool antenna_dvbt;
338 u16 antenna_gpio;
339
340 const char *microcode_name;
341};
342
343#define NEVER_LOCK 0
344#define NOT_LOCKED 1
345#define DEMOD_LOCK 2
346#define FEC_LOCK 3
347#define MPEG_LOCK 4
348
diff --git a/drivers/media/dvb/frontends/drxk_map.h b/drivers/media/dvb/frontends/drxk_map.h
new file mode 100644
index 000000000000..9b11a8328869
--- /dev/null
+++ b/drivers/media/dvb/frontends/drxk_map.h
@@ -0,0 +1,449 @@
1#define AUD_COMM_EXEC__A 0x1000000
2#define AUD_COMM_EXEC_STOP 0x0
3#define FEC_COMM_EXEC__A 0x1C00000
4#define FEC_COMM_EXEC_STOP 0x0
5#define FEC_COMM_EXEC_ACTIVE 0x1
6#define FEC_DI_COMM_EXEC__A 0x1C20000
7#define FEC_DI_COMM_EXEC_STOP 0x0
8#define FEC_DI_INPUT_CTL__A 0x1C20016
9#define FEC_RS_COMM_EXEC__A 0x1C30000
10#define FEC_RS_COMM_EXEC_STOP 0x0
11#define FEC_RS_MEASUREMENT_PERIOD__A 0x1C30012
12#define FEC_RS_MEASUREMENT_PRESCALE__A 0x1C30013
13#define FEC_OC_MODE__A 0x1C40011
14#define FEC_OC_MODE_PARITY__M 0x1
15#define FEC_OC_DTO_MODE__A 0x1C40014
16#define FEC_OC_DTO_MODE_DYNAMIC__M 0x1
17#define FEC_OC_DTO_MODE_OFFSET_ENABLE__M 0x4
18#define FEC_OC_DTO_PERIOD__A 0x1C40015
19#define FEC_OC_DTO_BURST_LEN__A 0x1C40018
20#define FEC_OC_FCT_MODE__A 0x1C4001A
21#define FEC_OC_FCT_MODE__PRE 0x0
22#define FEC_OC_FCT_MODE_RAT_ENA__M 0x1
23#define FEC_OC_FCT_MODE_VIRT_ENA__M 0x2
24#define FEC_OC_TMD_MODE__A 0x1C4001E
25#define FEC_OC_TMD_COUNT__A 0x1C4001F
26#define FEC_OC_TMD_HI_MARGIN__A 0x1C40020
27#define FEC_OC_TMD_LO_MARGIN__A 0x1C40021
28#define FEC_OC_TMD_INT_UPD_RATE__A 0x1C40023
29#define FEC_OC_AVR_PARM_A__A 0x1C40026
30#define FEC_OC_AVR_PARM_B__A 0x1C40027
31#define FEC_OC_RCN_GAIN__A 0x1C4002E
32#define FEC_OC_RCN_CTL_RATE_LO__A 0x1C40030
33#define FEC_OC_RCN_CTL_STEP_LO__A 0x1C40032
34#define FEC_OC_RCN_CTL_STEP_HI__A 0x1C40033
35#define FEC_OC_SNC_MODE__A 0x1C40040
36#define FEC_OC_SNC_MODE_SHUTDOWN__M 0x10
37#define FEC_OC_SNC_LWM__A 0x1C40041
38#define FEC_OC_SNC_HWM__A 0x1C40042
39#define FEC_OC_SNC_UNLOCK__A 0x1C40043
40#define FEC_OC_SNC_FAIL_PERIOD__A 0x1C40046
41#define FEC_OC_IPR_MODE__A 0x1C40048
42#define FEC_OC_IPR_MODE_SERIAL__M 0x1
43#define FEC_OC_IPR_MODE_MCLK_DIS_DAT_ABS__M 0x4
44#define FEC_OC_IPR_MODE_MVAL_DIS_PAR__M 0x10
45#define FEC_OC_IPR_INVERT__A 0x1C40049
46#define FEC_OC_IPR_INVERT_MD0__M 0x1
47#define FEC_OC_IPR_INVERT_MD1__M 0x2
48#define FEC_OC_IPR_INVERT_MD2__M 0x4
49#define FEC_OC_IPR_INVERT_MD3__M 0x8
50#define FEC_OC_IPR_INVERT_MD4__M 0x10
51#define FEC_OC_IPR_INVERT_MD5__M 0x20
52#define FEC_OC_IPR_INVERT_MD6__M 0x40
53#define FEC_OC_IPR_INVERT_MD7__M 0x80
54#define FEC_OC_IPR_INVERT_MERR__M 0x100
55#define FEC_OC_IPR_INVERT_MSTRT__M 0x200
56#define FEC_OC_IPR_INVERT_MVAL__M 0x400
57#define FEC_OC_IPR_INVERT_MCLK__M 0x800
58#define FEC_OC_OCR_INVERT__A 0x1C40052
59#define IQM_COMM_EXEC__A 0x1800000
60#define IQM_COMM_EXEC_B_STOP 0x0
61#define IQM_COMM_EXEC_B_ACTIVE 0x1
62#define IQM_FS_RATE_OFS_LO__A 0x1820010
63#define IQM_FS_ADJ_SEL__A 0x1820014
64#define IQM_FS_ADJ_SEL_B_OFF 0x0
65#define IQM_FS_ADJ_SEL_B_QAM 0x1
66#define IQM_FS_ADJ_SEL_B_VSB 0x2
67#define IQM_FD_RATESEL__A 0x1830010
68#define IQM_RC_RATE_OFS_LO__A 0x1840010
69#define IQM_RC_RATE_OFS_LO__W 16
70#define IQM_RC_RATE_OFS_LO__M 0xFFFF
71#define IQM_RC_RATE_OFS_HI__M 0xFF
72#define IQM_RC_ADJ_SEL__A 0x1840014
73#define IQM_RC_ADJ_SEL_B_OFF 0x0
74#define IQM_RC_ADJ_SEL_B_QAM 0x1
75#define IQM_RC_ADJ_SEL_B_VSB 0x2
76#define IQM_RC_STRETCH__A 0x1840016
77#define IQM_CF_COMM_INT_MSK__A 0x1860006
78#define IQM_CF_SYMMETRIC__A 0x1860010
79#define IQM_CF_MIDTAP__A 0x1860011
80#define IQM_CF_MIDTAP_RE__B 0
81#define IQM_CF_MIDTAP_IM__B 1
82#define IQM_CF_OUT_ENA__A 0x1860012
83#define IQM_CF_OUT_ENA_QAM__B 1
84#define IQM_CF_OUT_ENA_OFDM__M 0x4
85#define IQM_CF_ADJ_SEL__A 0x1860013
86#define IQM_CF_SCALE__A 0x1860014
87#define IQM_CF_SCALE_SH__A 0x1860015
88#define IQM_CF_SCALE_SH__PRE 0x0
89#define IQM_CF_POW_MEAS_LEN__A 0x1860017
90#define IQM_CF_DS_ENA__A 0x1860019
91#define IQM_CF_TAP_RE0__A 0x1860020
92#define IQM_CF_TAP_IM0__A 0x1860040
93#define IQM_CF_CLP_VAL__A 0x1860060
94#define IQM_CF_DATATH__A 0x1860061
95#define IQM_CF_PKDTH__A 0x1860062
96#define IQM_CF_WND_LEN__A 0x1860063
97#define IQM_CF_DET_LCT__A 0x1860064
98#define IQM_CF_BYPASSDET__A 0x1860067
99#define IQM_AF_COMM_EXEC__A 0x1870000
100#define IQM_AF_COMM_EXEC_ACTIVE 0x1
101#define IQM_AF_CLKNEG__A 0x1870012
102#define IQM_AF_CLKNEG_CLKNEGDATA__M 0x2
103#define IQM_AF_CLKNEG_CLKNEGDATA_CLK_ADC_DATA_POS 0x0
104#define IQM_AF_CLKNEG_CLKNEGDATA_CLK_ADC_DATA_NEG 0x2
105#define IQM_AF_START_LOCK__A 0x187001B
106#define IQM_AF_PHASE0__A 0x187001C
107#define IQM_AF_PHASE1__A 0x187001D
108#define IQM_AF_PHASE2__A 0x187001E
109#define IQM_AF_CLP_LEN__A 0x1870023
110#define IQM_AF_CLP_TH__A 0x1870024
111#define IQM_AF_SNS_LEN__A 0x1870026
112#define IQM_AF_AGC_IF__A 0x1870028
113#define IQM_AF_AGC_RF__A 0x1870029
114#define IQM_AF_PDREF__A 0x187002B
115#define IQM_AF_PDREF__M 0x1F
116#define IQM_AF_STDBY__A 0x187002C
117#define IQM_AF_STDBY_STDBY_ADC_STANDBY 0x2
118#define IQM_AF_STDBY_STDBY_AMP_STANDBY 0x4
119#define IQM_AF_STDBY_STDBY_PD_STANDBY 0x8
120#define IQM_AF_STDBY_STDBY_TAGC_IF_STANDBY 0x10
121#define IQM_AF_STDBY_STDBY_TAGC_RF_STANDBY 0x20
122#define IQM_AF_AMUX__A 0x187002D
123#define IQM_AF_AMUX_SIGNAL2ADC 0x1
124#define IQM_AF_UPD_SEL__A 0x187002F
125#define IQM_AF_INC_LCT__A 0x1870034
126#define IQM_AF_INC_BYPASS__A 0x1870036
127#define OFDM_CP_COMM_EXEC__A 0x2800000
128#define OFDM_CP_COMM_EXEC_STOP 0x0
129#define OFDM_EC_SB_PRIOR__A 0x3410013
130#define OFDM_EC_SB_PRIOR_HI 0x0
131#define OFDM_EC_SB_PRIOR_LO 0x1
132#define OFDM_EQ_TOP_TD_TPS_CONST__A 0x3010054
133#define OFDM_EQ_TOP_TD_TPS_CONST__M 0x3
134#define OFDM_EQ_TOP_TD_TPS_CONST_64QAM 0x2
135#define OFDM_EQ_TOP_TD_TPS_CODE_HP__A 0x3010056
136#define OFDM_EQ_TOP_TD_TPS_CODE_HP__M 0x7
137#define OFDM_EQ_TOP_TD_TPS_CODE_LP_7_8 0x4
138#define OFDM_EQ_TOP_TD_SQR_ERR_I__A 0x301005E
139#define OFDM_EQ_TOP_TD_SQR_ERR_Q__A 0x301005F
140#define OFDM_EQ_TOP_TD_SQR_ERR_EXP__A 0x3010060
141#define OFDM_EQ_TOP_TD_REQ_SMB_CNT__A 0x3010061
142#define OFDM_EQ_TOP_TD_TPS_PWR_OFS__A 0x3010062
143#define OFDM_LC_COMM_EXEC__A 0x3800000
144#define OFDM_LC_COMM_EXEC_STOP 0x0
145#define OFDM_SC_COMM_EXEC__A 0x3C00000
146#define OFDM_SC_COMM_EXEC_STOP 0x0
147#define OFDM_SC_COMM_STATE__A 0x3C00001
148#define OFDM_SC_RA_RAM_PARAM0__A 0x3C20040
149#define OFDM_SC_RA_RAM_PARAM1__A 0x3C20041
150#define OFDM_SC_RA_RAM_CMD_ADDR__A 0x3C20042
151#define OFDM_SC_RA_RAM_CMD__A 0x3C20043
152#define OFDM_SC_RA_RAM_CMD_NULL 0x0
153#define OFDM_SC_RA_RAM_CMD_PROC_START 0x1
154#define OFDM_SC_RA_RAM_CMD_SET_PREF_PARAM 0x3
155#define OFDM_SC_RA_RAM_CMD_PROGRAM_PARAM 0x4
156#define OFDM_SC_RA_RAM_CMD_GET_OP_PARAM 0x5
157#define OFDM_SC_RA_RAM_CMD_USER_IO 0x6
158#define OFDM_SC_RA_RAM_CMD_SET_TIMER 0x7
159#define OFDM_SC_RA_RAM_CMD_SET_ECHO_TIMING 0x8
160#define OFDM_SC_RA_RAM_SW_EVENT_RUN_NMASK__M 0x1
161#define OFDM_SC_RA_RAM_LOCKTRACK_MIN 0x1
162#define OFDM_SC_RA_RAM_OP_PARAM__A 0x3C20048
163#define OFDM_SC_RA_RAM_OP_PARAM_MODE__M 0x3
164#define OFDM_SC_RA_RAM_OP_PARAM_MODE_2K 0x0
165#define OFDM_SC_RA_RAM_OP_PARAM_MODE_8K 0x1
166#define OFDM_SC_RA_RAM_OP_PARAM_GUARD_32 0x0
167#define OFDM_SC_RA_RAM_OP_PARAM_GUARD_16 0x4
168#define OFDM_SC_RA_RAM_OP_PARAM_GUARD_8 0x8
169#define OFDM_SC_RA_RAM_OP_PARAM_GUARD_4 0xC
170#define OFDM_SC_RA_RAM_OP_PARAM_CONST_QPSK 0x0
171#define OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM16 0x10
172#define OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM64 0x20
173#define OFDM_SC_RA_RAM_OP_PARAM_HIER_NO 0x0
174#define OFDM_SC_RA_RAM_OP_PARAM_HIER_A1 0x40
175#define OFDM_SC_RA_RAM_OP_PARAM_HIER_A2 0x80
176#define OFDM_SC_RA_RAM_OP_PARAM_HIER_A4 0xC0
177#define OFDM_SC_RA_RAM_OP_PARAM_RATE_1_2 0x0
178#define OFDM_SC_RA_RAM_OP_PARAM_RATE_2_3 0x200
179#define OFDM_SC_RA_RAM_OP_PARAM_RATE_3_4 0x400
180#define OFDM_SC_RA_RAM_OP_PARAM_RATE_5_6 0x600
181#define OFDM_SC_RA_RAM_OP_PARAM_RATE_7_8 0x800
182#define OFDM_SC_RA_RAM_OP_PARAM_PRIO_HI 0x0
183#define OFDM_SC_RA_RAM_OP_PARAM_PRIO_LO 0x1000
184#define OFDM_SC_RA_RAM_OP_AUTO_MODE__M 0x1
185#define OFDM_SC_RA_RAM_OP_AUTO_GUARD__M 0x2
186#define OFDM_SC_RA_RAM_OP_AUTO_CONST__M 0x4
187#define OFDM_SC_RA_RAM_OP_AUTO_HIER__M 0x8
188#define OFDM_SC_RA_RAM_OP_AUTO_RATE__M 0x10
189#define OFDM_SC_RA_RAM_LOCK__A 0x3C2004B
190#define OFDM_SC_RA_RAM_LOCK_DEMOD__M 0x1
191#define OFDM_SC_RA_RAM_LOCK_FEC__M 0x2
192#define OFDM_SC_RA_RAM_LOCK_MPEG__M 0x4
193#define OFDM_SC_RA_RAM_LOCK_NODVBT__M 0x8
194#define OFDM_SC_RA_RAM_BE_OPT_DELAY__A 0x3C2004D
195#define OFDM_SC_RA_RAM_BE_OPT_INIT_DELAY__A 0x3C2004E
196#define OFDM_SC_RA_RAM_ECHO_THRES__A 0x3C2004F
197#define OFDM_SC_RA_RAM_ECHO_THRES_8K__B 0
198#define OFDM_SC_RA_RAM_ECHO_THRES_8K__M 0xFF
199#define OFDM_SC_RA_RAM_ECHO_THRES_2K__B 8
200#define OFDM_SC_RA_RAM_ECHO_THRES_2K__M 0xFF00
201#define OFDM_SC_RA_RAM_CONFIG__A 0x3C20050
202#define OFDM_SC_RA_RAM_CONFIG_NE_FIX_ENABLE__M 0x800
203#define OFDM_SC_RA_RAM_FR_THRES_8K__A 0x3C2007D
204#define OFDM_SC_RA_RAM_NI_INIT_2K_PER_LEFT__A 0x3C200E0
205#define OFDM_SC_RA_RAM_NI_INIT_2K_PER_RIGHT__A 0x3C200E1
206#define OFDM_SC_RA_RAM_NI_INIT_8K_PER_LEFT__A 0x3C200E3
207#define OFDM_SC_RA_RAM_NI_INIT_8K_PER_RIGHT__A 0x3C200E4
208#define OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A 0x3C200F8
209#define QAM_COMM_EXEC__A 0x1400000
210#define QAM_COMM_EXEC_STOP 0x0
211#define QAM_COMM_EXEC_ACTIVE 0x1
212#define QAM_TOP_ANNEX_A 0x0
213#define QAM_TOP_ANNEX_C 0x2
214#define QAM_SL_ERR_POWER__A 0x1430017
215#define QAM_DQ_QUAL_FUN0__A 0x1440018
216#define QAM_DQ_QUAL_FUN1__A 0x1440019
217#define QAM_DQ_QUAL_FUN2__A 0x144001A
218#define QAM_DQ_QUAL_FUN3__A 0x144001B
219#define QAM_DQ_QUAL_FUN4__A 0x144001C
220#define QAM_DQ_QUAL_FUN5__A 0x144001D
221#define QAM_LC_MODE__A 0x1450010
222#define QAM_LC_QUAL_TAB0__A 0x1450018
223#define QAM_LC_QUAL_TAB1__A 0x1450019
224#define QAM_LC_QUAL_TAB2__A 0x145001A
225#define QAM_LC_QUAL_TAB3__A 0x145001B
226#define QAM_LC_QUAL_TAB4__A 0x145001C
227#define QAM_LC_QUAL_TAB5__A 0x145001D
228#define QAM_LC_QUAL_TAB6__A 0x145001E
229#define QAM_LC_QUAL_TAB8__A 0x145001F
230#define QAM_LC_QUAL_TAB9__A 0x1450020
231#define QAM_LC_QUAL_TAB10__A 0x1450021
232#define QAM_LC_QUAL_TAB12__A 0x1450022
233#define QAM_LC_QUAL_TAB15__A 0x1450023
234#define QAM_LC_QUAL_TAB16__A 0x1450024
235#define QAM_LC_QUAL_TAB20__A 0x1450025
236#define QAM_LC_QUAL_TAB25__A 0x1450026
237#define QAM_LC_LPF_FACTORP__A 0x1450028
238#define QAM_LC_LPF_FACTORI__A 0x1450029
239#define QAM_LC_RATE_LIMIT__A 0x145002A
240#define QAM_LC_SYMBOL_FREQ__A 0x145002B
241#define QAM_SY_TIMEOUT__A 0x1470011
242#define QAM_SY_TIMEOUT__PRE 0x3A98
243#define QAM_SY_SYNC_LWM__A 0x1470012
244#define QAM_SY_SYNC_AWM__A 0x1470013
245#define QAM_SY_SYNC_HWM__A 0x1470014
246#define QAM_SY_SP_INV__A 0x1470017
247#define QAM_SY_SP_INV_SPECTRUM_INV_DIS 0x0
248#define SCU_COMM_EXEC__A 0x800000
249#define SCU_COMM_EXEC_STOP 0x0
250#define SCU_COMM_EXEC_ACTIVE 0x1
251#define SCU_COMM_EXEC_HOLD 0x2
252#define SCU_RAM_DRIVER_DEBUG__A 0x831EBF
253#define SCU_RAM_QAM_FSM_STEP_PERIOD__A 0x831EC4
254#define SCU_RAM_GPIO__A 0x831EC7
255#define SCU_RAM_GPIO_HW_LOCK_IND_DISABLE 0x0
256#define SCU_RAM_AGC_CLP_CTRL_MODE__A 0x831EC8
257#define SCU_RAM_FEC_ACCUM_PKT_FAILURES__A 0x831ECB
258#define SCU_RAM_FEC_PRE_RS_BER_FILTER_SH__A 0x831F05
259#define SCU_RAM_AGC_FAST_SNS_CTRL_DELAY__A 0x831F15
260#define SCU_RAM_AGC_KI_CYCLEN__A 0x831F17
261#define SCU_RAM_AGC_SNS_CYCLEN__A 0x831F18
262#define SCU_RAM_AGC_RF_SNS_DEV_MAX__A 0x831F19
263#define SCU_RAM_AGC_RF_SNS_DEV_MIN__A 0x831F1A
264#define SCU_RAM_AGC_RF_MAX__A 0x831F1B
265#define SCU_RAM_AGC_CONFIG__A 0x831F24
266#define SCU_RAM_AGC_CONFIG_DISABLE_RF_AGC__M 0x1
267#define SCU_RAM_AGC_CONFIG_DISABLE_IF_AGC__M 0x2
268#define SCU_RAM_AGC_CONFIG_INV_IF_POL__M 0x100
269#define SCU_RAM_AGC_CONFIG_INV_RF_POL__M 0x200
270#define SCU_RAM_AGC_KI__A 0x831F25
271#define SCU_RAM_AGC_KI_RF__B 4
272#define SCU_RAM_AGC_KI_RF__M 0xF0
273#define SCU_RAM_AGC_KI_IF__B 8
274#define SCU_RAM_AGC_KI_IF__M 0xF00
275#define SCU_RAM_AGC_KI_RED__A 0x831F26
276#define SCU_RAM_AGC_KI_RED_RAGC_RED__B 2
277#define SCU_RAM_AGC_KI_RED_RAGC_RED__M 0xC
278#define SCU_RAM_AGC_KI_RED_IAGC_RED__B 4
279#define SCU_RAM_AGC_KI_RED_IAGC_RED__M 0x30
280#define SCU_RAM_AGC_KI_INNERGAIN_MIN__A 0x831F27
281#define SCU_RAM_AGC_KI_MINGAIN__A 0x831F28
282#define SCU_RAM_AGC_KI_MAXGAIN__A 0x831F29
283#define SCU_RAM_AGC_KI_MAXMINGAIN_TH__A 0x831F2A
284#define SCU_RAM_AGC_KI_MIN__A 0x831F2B
285#define SCU_RAM_AGC_KI_MAX__A 0x831F2C
286#define SCU_RAM_AGC_CLP_SUM__A 0x831F2D
287#define SCU_RAM_AGC_CLP_SUM_MIN__A 0x831F2E
288#define SCU_RAM_AGC_CLP_SUM_MAX__A 0x831F2F
289#define SCU_RAM_AGC_CLP_CYCLEN__A 0x831F30
290#define SCU_RAM_AGC_CLP_CYCCNT__A 0x831F31
291#define SCU_RAM_AGC_CLP_DIR_TO__A 0x831F32
292#define SCU_RAM_AGC_CLP_DIR_WD__A 0x831F33
293#define SCU_RAM_AGC_CLP_DIR_STP__A 0x831F34
294#define SCU_RAM_AGC_SNS_SUM__A 0x831F35
295#define SCU_RAM_AGC_SNS_SUM_MIN__A 0x831F36
296#define SCU_RAM_AGC_SNS_SUM_MAX__A 0x831F37
297#define SCU_RAM_AGC_SNS_CYCCNT__A 0x831F38
298#define SCU_RAM_AGC_SNS_DIR_TO__A 0x831F39
299#define SCU_RAM_AGC_SNS_DIR_WD__A 0x831F3A
300#define SCU_RAM_AGC_SNS_DIR_STP__A 0x831F3B
301#define SCU_RAM_AGC_INGAIN_TGT__A 0x831F3D
302#define SCU_RAM_AGC_INGAIN_TGT_MIN__A 0x831F3E
303#define SCU_RAM_AGC_INGAIN_TGT_MAX__A 0x831F3F
304#define SCU_RAM_AGC_IF_IACCU_HI__A 0x831F40
305#define SCU_RAM_AGC_IF_IACCU_LO__A 0x831F41
306#define SCU_RAM_AGC_IF_IACCU_HI_TGT__A 0x831F42
307#define SCU_RAM_AGC_IF_IACCU_HI_TGT_MIN__A 0x831F43
308#define SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A 0x831F44
309#define SCU_RAM_AGC_RF_IACCU_HI__A 0x831F45
310#define SCU_RAM_AGC_RF_IACCU_LO__A 0x831F46
311#define SCU_RAM_AGC_RF_IACCU_HI_CO__A 0x831F47
312#define SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A 0x831F84
313#define SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A 0x831F85
314#define SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A 0x831F86
315#define SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A 0x831F87
316#define SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A 0x831F88
317#define SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A 0x831F89
318#define SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A 0x831F8A
319#define SCU_RAM_QAM_FSM_RTH__A 0x831F8E
320#define SCU_RAM_QAM_FSM_FTH__A 0x831F8F
321#define SCU_RAM_QAM_FSM_PTH__A 0x831F90
322#define SCU_RAM_QAM_FSM_MTH__A 0x831F91
323#define SCU_RAM_QAM_FSM_CTH__A 0x831F92
324#define SCU_RAM_QAM_FSM_QTH__A 0x831F93
325#define SCU_RAM_QAM_FSM_RATE_LIM__A 0x831F94
326#define SCU_RAM_QAM_FSM_FREQ_LIM__A 0x831F95
327#define SCU_RAM_QAM_FSM_COUNT_LIM__A 0x831F96
328#define SCU_RAM_QAM_LC_CA_COARSE__A 0x831F97
329#define SCU_RAM_QAM_LC_CA_FINE__A 0x831F99
330#define SCU_RAM_QAM_LC_CP_COARSE__A 0x831F9A
331#define SCU_RAM_QAM_LC_CP_MEDIUM__A 0x831F9B
332#define SCU_RAM_QAM_LC_CP_FINE__A 0x831F9C
333#define SCU_RAM_QAM_LC_CI_COARSE__A 0x831F9D
334#define SCU_RAM_QAM_LC_CI_MEDIUM__A 0x831F9E
335#define SCU_RAM_QAM_LC_CI_FINE__A 0x831F9F
336#define SCU_RAM_QAM_LC_EP_COARSE__A 0x831FA0
337#define SCU_RAM_QAM_LC_EP_MEDIUM__A 0x831FA1
338#define SCU_RAM_QAM_LC_EP_FINE__A 0x831FA2
339#define SCU_RAM_QAM_LC_EI_COARSE__A 0x831FA3
340#define SCU_RAM_QAM_LC_EI_MEDIUM__A 0x831FA4
341#define SCU_RAM_QAM_LC_EI_FINE__A 0x831FA5
342#define SCU_RAM_QAM_LC_CF_COARSE__A 0x831FA6
343#define SCU_RAM_QAM_LC_CF_MEDIUM__A 0x831FA7
344#define SCU_RAM_QAM_LC_CF_FINE__A 0x831FA8
345#define SCU_RAM_QAM_LC_CF1_COARSE__A 0x831FA9
346#define SCU_RAM_QAM_LC_CF1_MEDIUM__A 0x831FAA
347#define SCU_RAM_QAM_LC_CF1_FINE__A 0x831FAB
348#define SCU_RAM_QAM_SL_SIG_POWER__A 0x831FAC
349#define SCU_RAM_QAM_EQ_CMA_RAD0__A 0x831FAD
350#define SCU_RAM_QAM_EQ_CMA_RAD1__A 0x831FAE
351#define SCU_RAM_QAM_EQ_CMA_RAD2__A 0x831FAF
352#define SCU_RAM_QAM_EQ_CMA_RAD3__A 0x831FB0
353#define SCU_RAM_QAM_EQ_CMA_RAD4__A 0x831FB1
354#define SCU_RAM_QAM_EQ_CMA_RAD5__A 0x831FB2
355#define SCU_RAM_QAM_LOCKED_LOCKED_DEMOD_LOCKED 0x4000
356#define SCU_RAM_QAM_LOCKED_LOCKED_LOCKED 0x8000
357#define SCU_RAM_QAM_LOCKED_LOCKED_NEVER_LOCK 0xC000
358#define SCU_RAM_AGC_FAST_CLP_CTRL_DELAY__A 0x831FEA
359#define SCU_RAM_DRIVER_VER_HI__A 0x831FEB
360#define SCU_RAM_DRIVER_VER_LO__A 0x831FEC
361#define SCU_RAM_PARAM_15__A 0x831FED
362#define SCU_RAM_PARAM_0__A 0x831FFC
363#define SCU_RAM_COMMAND__A 0x831FFD
364#define SCU_RAM_COMMAND_CMD_DEMOD_RESET 0x1
365#define SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV 0x2
366#define SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM 0x3
367#define SCU_RAM_COMMAND_CMD_DEMOD_START 0x4
368#define SCU_RAM_COMMAND_CMD_DEMOD_GET_LOCK 0x5
369#define SCU_RAM_COMMAND_CMD_DEMOD_STOP 0x9
370#define SCU_RAM_COMMAND_STANDARD_QAM 0x200
371#define SCU_RAM_COMMAND_STANDARD_OFDM 0x400
372#define SIO_TOP_COMM_KEY__A 0x41000F
373#define SIO_TOP_COMM_KEY_KEY 0xFABA
374#define SIO_TOP_JTAGID_LO__A 0x410012
375#define SIO_HI_RA_RAM_RES__A 0x420031
376#define SIO_HI_RA_RAM_CMD__A 0x420032
377#define SIO_HI_RA_RAM_CMD_RESET 0x2
378#define SIO_HI_RA_RAM_CMD_CONFIG 0x3
379#define SIO_HI_RA_RAM_CMD_BRDCTRL 0x7
380#define SIO_HI_RA_RAM_PAR_1__A 0x420033
381#define SIO_HI_RA_RAM_PAR_1_PAR1_SEC_KEY 0x3945
382#define SIO_HI_RA_RAM_PAR_2__A 0x420034
383#define SIO_HI_RA_RAM_PAR_2_CFG_DIV__M 0x7F
384#define SIO_HI_RA_RAM_PAR_2_BRD_CFG_OPEN 0x0
385#define SIO_HI_RA_RAM_PAR_2_BRD_CFG_CLOSED 0x4
386#define SIO_HI_RA_RAM_PAR_3__A 0x420035
387#define SIO_HI_RA_RAM_PAR_3_CFG_DBL_SDA__M 0x7F
388#define SIO_HI_RA_RAM_PAR_3_CFG_DBL_SCL__B 7
389#define SIO_HI_RA_RAM_PAR_3_ACP_RW_READ 0x0
390#define SIO_HI_RA_RAM_PAR_3_ACP_RW_WRITE 0x8
391#define SIO_HI_RA_RAM_PAR_4__A 0x420036
392#define SIO_HI_RA_RAM_PAR_5__A 0x420037
393#define SIO_HI_RA_RAM_PAR_5_CFG_SLV0_SLAVE 0x1
394#define SIO_HI_RA_RAM_PAR_5_CFG_SLEEP__M 0x8
395#define SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ 0x8
396#define SIO_HI_RA_RAM_PAR_6__A 0x420038
397#define SIO_CC_PLL_LOCK__A 0x450012
398#define SIO_CC_PWD_MODE__A 0x450015
399#define SIO_CC_PWD_MODE_LEVEL_NONE 0x0
400#define SIO_CC_PWD_MODE_LEVEL_OFDM 0x1
401#define SIO_CC_PWD_MODE_LEVEL_CLOCK 0x2
402#define SIO_CC_PWD_MODE_LEVEL_PLL 0x3
403#define SIO_CC_PWD_MODE_LEVEL_OSC 0x4
404#define SIO_CC_SOFT_RST__A 0x450016
405#define SIO_CC_SOFT_RST_OFDM__M 0x1
406#define SIO_CC_SOFT_RST_SYS__M 0x2
407#define SIO_CC_SOFT_RST_OSC__M 0x4
408#define SIO_CC_UPDATE__A 0x450017
409#define SIO_CC_UPDATE_KEY 0xFABA
410#define SIO_OFDM_SH_OFDM_RING_ENABLE__A 0x470010
411#define SIO_OFDM_SH_OFDM_RING_ENABLE_OFF 0x0
412#define SIO_OFDM_SH_OFDM_RING_ENABLE_ON 0x1
413#define SIO_OFDM_SH_OFDM_RING_STATUS__A 0x470012
414#define SIO_OFDM_SH_OFDM_RING_STATUS_DOWN 0x0
415#define SIO_OFDM_SH_OFDM_RING_STATUS_ENABLED 0x1
416#define SIO_BL_COMM_EXEC__A 0x480000
417#define SIO_BL_COMM_EXEC_ACTIVE 0x1
418#define SIO_BL_STATUS__A 0x480010
419#define SIO_BL_MODE__A 0x480011
420#define SIO_BL_MODE_DIRECT 0x0
421#define SIO_BL_MODE_CHAIN 0x1
422#define SIO_BL_ENABLE__A 0x480012
423#define SIO_BL_ENABLE_ON 0x1
424#define SIO_BL_TGT_HDR__A 0x480014
425#define SIO_BL_TGT_ADDR__A 0x480015
426#define SIO_BL_SRC_ADDR__A 0x480016
427#define SIO_BL_SRC_LEN__A 0x480017
428#define SIO_BL_CHAIN_ADDR__A 0x480018
429#define SIO_BL_CHAIN_LEN__A 0x480019
430#define SIO_PDR_MON_CFG__A 0x7F0010
431#define SIO_PDR_UIO_IN_HI__A 0x7F0015
432#define SIO_PDR_UIO_OUT_LO__A 0x7F0016
433#define SIO_PDR_OHW_CFG__A 0x7F001F
434#define SIO_PDR_OHW_CFG_FREF_SEL__M 0x3
435#define SIO_PDR_MSTRT_CFG__A 0x7F0025
436#define SIO_PDR_MERR_CFG__A 0x7F0026
437#define SIO_PDR_MCLK_CFG__A 0x7F0028
438#define SIO_PDR_MCLK_CFG_DRIVE__B 3
439#define SIO_PDR_MVAL_CFG__A 0x7F0029
440#define SIO_PDR_MD0_CFG__A 0x7F002A
441#define SIO_PDR_MD0_CFG_DRIVE__B 3
442#define SIO_PDR_MD1_CFG__A 0x7F002B
443#define SIO_PDR_MD2_CFG__A 0x7F002C
444#define SIO_PDR_MD3_CFG__A 0x7F002D
445#define SIO_PDR_MD4_CFG__A 0x7F002F
446#define SIO_PDR_MD5_CFG__A 0x7F0030
447#define SIO_PDR_MD6_CFG__A 0x7F0031
448#define SIO_PDR_MD7_CFG__A 0x7F0032
449#define SIO_PDR_SMA_TX_CFG__A 0x7F0038
diff --git a/drivers/media/dvb/frontends/itd1000.c b/drivers/media/dvb/frontends/itd1000.c
index f7a40a18777a..aa9ccb821fa5 100644
--- a/drivers/media/dvb/frontends/itd1000.c
+++ b/drivers/media/dvb/frontends/itd1000.c
@@ -35,21 +35,18 @@ static int debug;
35module_param(debug, int, 0644); 35module_param(debug, int, 0644);
36MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); 36MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
37 37
38#define deb(args...) do { \ 38#define itd_dbg(args...) do { \
39 if (debug) { \ 39 if (debug) { \
40 printk(KERN_DEBUG "ITD1000: " args);\ 40 printk(KERN_DEBUG "ITD1000: " args);\
41 printk("\n"); \
42 } \ 41 } \
43} while (0) 42} while (0)
44 43
45#define warn(args...) do { \ 44#define itd_warn(args...) do { \
46 printk(KERN_WARNING "ITD1000: " args); \ 45 printk(KERN_WARNING "ITD1000: " args); \
47 printk("\n"); \
48} while (0) 46} while (0)
49 47
50#define info(args...) do { \ 48#define itd_info(args...) do { \
51 printk(KERN_INFO "ITD1000: " args); \ 49 printk(KERN_INFO "ITD1000: " args); \
52 printk("\n"); \
53} while (0) 50} while (0)
54 51
55/* don't write more than one byte with flexcop behind */ 52/* don't write more than one byte with flexcop behind */
@@ -62,7 +59,7 @@ static int itd1000_write_regs(struct itd1000_state *state, u8 reg, u8 v[], u8 le
62 buf[0] = reg; 59 buf[0] = reg;
63 memcpy(&buf[1], v, len); 60 memcpy(&buf[1], v, len);
64 61
65 /* deb("wr %02x: %02x", reg, v[0]); */ 62 /* itd_dbg("wr %02x: %02x\n", reg, v[0]); */
66 63
67 if (i2c_transfer(state->i2c, &msg, 1) != 1) { 64 if (i2c_transfer(state->i2c, &msg, 1) != 1) {
68 printk(KERN_WARNING "itd1000 I2C write failed\n"); 65 printk(KERN_WARNING "itd1000 I2C write failed\n");
@@ -83,7 +80,7 @@ static int itd1000_read_reg(struct itd1000_state *state, u8 reg)
83 itd1000_write_regs(state, (reg - 1) & 0xff, &state->shadow[(reg - 1) & 0xff], 1); 80 itd1000_write_regs(state, (reg - 1) & 0xff, &state->shadow[(reg - 1) & 0xff], 1);
84 81
85 if (i2c_transfer(state->i2c, msg, 2) != 2) { 82 if (i2c_transfer(state->i2c, msg, 2) != 2) {
86 warn("itd1000 I2C read failed"); 83 itd_warn("itd1000 I2C read failed\n");
87 return -EREMOTEIO; 84 return -EREMOTEIO;
88 } 85 }
89 return val; 86 return val;
@@ -127,14 +124,14 @@ static void itd1000_set_lpf_bw(struct itd1000_state *state, u32 symbol_rate)
127 u8 bbgvmin = itd1000_read_reg(state, BBGVMIN) & 0xf0; 124 u8 bbgvmin = itd1000_read_reg(state, BBGVMIN) & 0xf0;
128 u8 bw = itd1000_read_reg(state, BW) & 0xf0; 125 u8 bw = itd1000_read_reg(state, BW) & 0xf0;
129 126
130 deb("symbol_rate = %d", symbol_rate); 127 itd_dbg("symbol_rate = %d\n", symbol_rate);
131 128
132 /* not sure what is that ? - starting to download the table */ 129 /* not sure what is that ? - starting to download the table */
133 itd1000_write_reg(state, CON1, con1 | (1 << 1)); 130 itd1000_write_reg(state, CON1, con1 | (1 << 1));
134 131
135 for (i = 0; i < ARRAY_SIZE(itd1000_lpf_pga); i++) 132 for (i = 0; i < ARRAY_SIZE(itd1000_lpf_pga); i++)
136 if (symbol_rate < itd1000_lpf_pga[i].symbol_rate) { 133 if (symbol_rate < itd1000_lpf_pga[i].symbol_rate) {
137 deb("symrate: index: %d pgaext: %x, bbgvmin: %x", i, itd1000_lpf_pga[i].pgaext, itd1000_lpf_pga[i].bbgvmin); 134 itd_dbg("symrate: index: %d pgaext: %x, bbgvmin: %x\n", i, itd1000_lpf_pga[i].pgaext, itd1000_lpf_pga[i].bbgvmin);
138 itd1000_write_reg(state, PLLFH, pllfh | (itd1000_lpf_pga[i].pgaext << 4)); 135 itd1000_write_reg(state, PLLFH, pllfh | (itd1000_lpf_pga[i].pgaext << 4));
139 itd1000_write_reg(state, BBGVMIN, bbgvmin | (itd1000_lpf_pga[i].bbgvmin)); 136 itd1000_write_reg(state, BBGVMIN, bbgvmin | (itd1000_lpf_pga[i].bbgvmin));
140 itd1000_write_reg(state, BW, bw | (i & 0x0f)); 137 itd1000_write_reg(state, BW, bw | (i & 0x0f));
@@ -182,7 +179,7 @@ static void itd1000_set_vco(struct itd1000_state *state, u32 freq_khz)
182 179
183 adcout = itd1000_read_reg(state, PLLLOCK) & 0x0f; 180 adcout = itd1000_read_reg(state, PLLLOCK) & 0x0f;
184 181
185 deb("VCO: %dkHz: %d -> ADCOUT: %d %02x", freq_khz, itd1000_vcorg[i].vcorg, adcout, vco_chp1_i2c); 182 itd_dbg("VCO: %dkHz: %d -> ADCOUT: %d %02x\n", freq_khz, itd1000_vcorg[i].vcorg, adcout, vco_chp1_i2c);
186 183
187 if (adcout > 13) { 184 if (adcout > 13) {
188 if (!(itd1000_vcorg[i].vcorg == 7 || itd1000_vcorg[i].vcorg == 15)) 185 if (!(itd1000_vcorg[i].vcorg == 7 || itd1000_vcorg[i].vcorg == 15))
@@ -232,7 +229,7 @@ static void itd1000_set_lo(struct itd1000_state *state, u32 freq_khz)
232 pllf = (u32) tmp; 229 pllf = (u32) tmp;
233 230
234 state->frequency = ((plln * 1000) + (pllf * 1000)/1048576) * 2*FREF; 231 state->frequency = ((plln * 1000) + (pllf * 1000)/1048576) * 2*FREF;
235 deb("frequency: %dkHz (wanted) %dkHz (set), PLLF = %d, PLLN = %d", freq_khz, state->frequency, pllf, plln); 232 itd_dbg("frequency: %dkHz (wanted) %dkHz (set), PLLF = %d, PLLN = %d\n", freq_khz, state->frequency, pllf, plln);
236 233
237 itd1000_write_reg(state, PLLNH, 0x80); /* PLLNH */; 234 itd1000_write_reg(state, PLLNH, 0x80); /* PLLNH */;
238 itd1000_write_reg(state, PLLNL, plln & 0xff); 235 itd1000_write_reg(state, PLLNL, plln & 0xff);
@@ -242,7 +239,7 @@ static void itd1000_set_lo(struct itd1000_state *state, u32 freq_khz)
242 239
243 for (i = 0; i < ARRAY_SIZE(itd1000_fre_values); i++) { 240 for (i = 0; i < ARRAY_SIZE(itd1000_fre_values); i++) {
244 if (freq_khz <= itd1000_fre_values[i].freq) { 241 if (freq_khz <= itd1000_fre_values[i].freq) {
245 deb("fre_values: %d", i); 242 itd_dbg("fre_values: %d\n", i);
246 itd1000_write_reg(state, RFTR, itd1000_fre_values[i].values[0]); 243 itd1000_write_reg(state, RFTR, itd1000_fre_values[i].values[0]);
247 for (j = 0; j < 9; j++) 244 for (j = 0; j < 9; j++)
248 itd1000_write_reg(state, RFST1+j, itd1000_fre_values[i].values[j+1]); 245 itd1000_write_reg(state, RFST1+j, itd1000_fre_values[i].values[j+1]);
@@ -382,7 +379,7 @@ struct dvb_frontend *itd1000_attach(struct dvb_frontend *fe, struct i2c_adapter
382 kfree(state); 379 kfree(state);
383 return NULL; 380 return NULL;
384 } 381 }
385 info("successfully identified (ID: %d)", i); 382 itd_info("successfully identified (ID: %d)\n", i);
386 383
387 memset(state->shadow, 0xff, sizeof(state->shadow)); 384 memset(state->shadow, 0xff, sizeof(state->shadow));
388 for (i = 0x65; i < 0x9c; i++) 385 for (i = 0x65; i < 0x9c; i++)
diff --git a/drivers/media/dvb/frontends/nxt6000.c b/drivers/media/dvb/frontends/nxt6000.c
index a763ec756f7f..6599b8fea9e9 100644
--- a/drivers/media/dvb/frontends/nxt6000.c
+++ b/drivers/media/dvb/frontends/nxt6000.c
@@ -50,7 +50,7 @@ static int nxt6000_writereg(struct nxt6000_state* state, u8 reg, u8 data)
50 if ((ret = i2c_transfer(state->i2c, &msg, 1)) != 1) 50 if ((ret = i2c_transfer(state->i2c, &msg, 1)) != 1)
51 dprintk("nxt6000: nxt6000_write error (reg: 0x%02X, data: 0x%02X, ret: %d)\n", reg, data, ret); 51 dprintk("nxt6000: nxt6000_write error (reg: 0x%02X, data: 0x%02X, ret: %d)\n", reg, data, ret);
52 52
53 return (ret != 1) ? -EFAULT : 0; 53 return (ret != 1) ? -EIO : 0;
54} 54}
55 55
56static u8 nxt6000_readreg(struct nxt6000_state* state, u8 reg) 56static u8 nxt6000_readreg(struct nxt6000_state* state, u8 reg)
diff --git a/drivers/media/dvb/frontends/s5h1420.c b/drivers/media/dvb/frontends/s5h1420.c
index 17f8cdf8afef..3879d2e378aa 100644
--- a/drivers/media/dvb/frontends/s5h1420.c
+++ b/drivers/media/dvb/frontends/s5h1420.c
@@ -634,7 +634,7 @@ static int s5h1420_set_frontend(struct dvb_frontend* fe,
634 struct s5h1420_state* state = fe->demodulator_priv; 634 struct s5h1420_state* state = fe->demodulator_priv;
635 int frequency_delta; 635 int frequency_delta;
636 struct dvb_frontend_tune_settings fesettings; 636 struct dvb_frontend_tune_settings fesettings;
637 uint8_t clock_settting; 637 uint8_t clock_setting;
638 638
639 dprintk("enter %s\n", __func__); 639 dprintk("enter %s\n", __func__);
640 640
@@ -684,19 +684,19 @@ static int s5h1420_set_frontend(struct dvb_frontend* fe,
684 switch (state->fclk) { 684 switch (state->fclk) {
685 default: 685 default:
686 case 88000000: 686 case 88000000:
687 clock_settting = 80; 687 clock_setting = 80;
688 break; 688 break;
689 case 86000000: 689 case 86000000:
690 clock_settting = 78; 690 clock_setting = 78;
691 break; 691 break;
692 case 80000000: 692 case 80000000:
693 clock_settting = 72; 693 clock_setting = 72;
694 break; 694 break;
695 case 59000000: 695 case 59000000:
696 clock_settting = 51; 696 clock_setting = 51;
697 break; 697 break;
698 case 44000000: 698 case 44000000:
699 clock_settting = 36; 699 clock_setting = 36;
700 break; 700 break;
701 } 701 }
702 dprintk("pll01: %d, ToneFreq: %d\n", state->fclk/1000000 - 8, (state->fclk + (TONE_FREQ * 32) - 1) / (TONE_FREQ * 32)); 702 dprintk("pll01: %d, ToneFreq: %d\n", state->fclk/1000000 - 8, (state->fclk + (TONE_FREQ * 32) - 1) / (TONE_FREQ * 32));
diff --git a/drivers/media/dvb/frontends/tda18271c2dd.c b/drivers/media/dvb/frontends/tda18271c2dd.c
new file mode 100644
index 000000000000..0384e8da4f5e
--- /dev/null
+++ b/drivers/media/dvb/frontends/tda18271c2dd.c
@@ -0,0 +1,1251 @@
1/*
2 * tda18271c2dd: Driver for the TDA18271C2 tuner
3 *
4 * Copyright (C) 2010 Digital Devices GmbH
5 *
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 only, as published by the Free Software Foundation.
10 *
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA
22 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
23 */
24
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <linux/init.h>
29#include <linux/delay.h>
30#include <linux/firmware.h>
31#include <linux/i2c.h>
32#include <linux/version.h>
33#include <asm/div64.h>
34
35#include "dvb_frontend.h"
36
37struct SStandardParam {
38 s32 m_IFFrequency;
39 u32 m_BandWidth;
40 u8 m_EP3_4_0;
41 u8 m_EB22;
42};
43
44struct SMap {
45 u32 m_Frequency;
46 u8 m_Param;
47};
48
49struct SMapI {
50 u32 m_Frequency;
51 s32 m_Param;
52};
53
54struct SMap2 {
55 u32 m_Frequency;
56 u8 m_Param1;
57 u8 m_Param2;
58};
59
60struct SRFBandMap {
61 u32 m_RF_max;
62 u32 m_RF1_Default;
63 u32 m_RF2_Default;
64 u32 m_RF3_Default;
65};
66
67enum ERegister {
68 ID = 0,
69 TM,
70 PL,
71 EP1, EP2, EP3, EP4, EP5,
72 CPD, CD1, CD2, CD3,
73 MPD, MD1, MD2, MD3,
74 EB1, EB2, EB3, EB4, EB5, EB6, EB7, EB8, EB9, EB10,
75 EB11, EB12, EB13, EB14, EB15, EB16, EB17, EB18, EB19, EB20,
76 EB21, EB22, EB23,
77 NUM_REGS
78};
79
80struct tda_state {
81 struct i2c_adapter *i2c;
82 u8 adr;
83
84 u32 m_Frequency;
85 u32 IF;
86
87 u8 m_IFLevelAnalog;
88 u8 m_IFLevelDigital;
89 u8 m_IFLevelDVBC;
90 u8 m_IFLevelDVBT;
91
92 u8 m_EP4;
93 u8 m_EP3_Standby;
94
95 bool m_bMaster;
96
97 s32 m_SettlingTime;
98
99 u8 m_Regs[NUM_REGS];
100
101 /* Tracking filter settings for band 0..6 */
102 u32 m_RF1[7];
103 s32 m_RF_A1[7];
104 s32 m_RF_B1[7];
105 u32 m_RF2[7];
106 s32 m_RF_A2[7];
107 s32 m_RF_B2[7];
108 u32 m_RF3[7];
109
110 u8 m_TMValue_RFCal; /* Calibration temperatur */
111
112 bool m_bFMInput; /* true to use Pin 8 for FM Radio */
113
114};
115
116static int PowerScan(struct tda_state *state,
117 u8 RFBand, u32 RF_in,
118 u32 *pRF_Out, bool *pbcal);
119
120static int i2c_readn(struct i2c_adapter *adapter, u8 adr, u8 *data, int len)
121{
122 struct i2c_msg msgs[1] = {{.addr = adr, .flags = I2C_M_RD,
123 .buf = data, .len = len} };
124 return (i2c_transfer(adapter, msgs, 1) == 1) ? 0 : -1;
125}
126
127static int i2c_write(struct i2c_adapter *adap, u8 adr, u8 *data, int len)
128{
129 struct i2c_msg msg = {.addr = adr, .flags = 0,
130 .buf = data, .len = len};
131
132 if (i2c_transfer(adap, &msg, 1) != 1) {
133 printk(KERN_ERR "tda18271c2dd: i2c write error at addr %i\n", adr);
134 return -1;
135 }
136 return 0;
137}
138
139static int WriteRegs(struct tda_state *state,
140 u8 SubAddr, u8 *Regs, u16 nRegs)
141{
142 u8 data[nRegs+1];
143
144 data[0] = SubAddr;
145 memcpy(data + 1, Regs, nRegs);
146 return i2c_write(state->i2c, state->adr, data, nRegs+1);
147}
148
149static int WriteReg(struct tda_state *state, u8 SubAddr, u8 Reg)
150{
151 u8 msg[2] = {SubAddr, Reg};
152
153 return i2c_write(state->i2c, state->adr, msg, 2);
154}
155
156static int Read(struct tda_state *state, u8 * Regs)
157{
158 return i2c_readn(state->i2c, state->adr, Regs, 16);
159}
160
161static int ReadExtented(struct tda_state *state, u8 * Regs)
162{
163 return i2c_readn(state->i2c, state->adr, Regs, NUM_REGS);
164}
165
166static int UpdateRegs(struct tda_state *state, u8 RegFrom, u8 RegTo)
167{
168 return WriteRegs(state, RegFrom,
169 &state->m_Regs[RegFrom], RegTo-RegFrom+1);
170}
171static int UpdateReg(struct tda_state *state, u8 Reg)
172{
173 return WriteReg(state, Reg, state->m_Regs[Reg]);
174}
175
176#include "tda18271c2dd_maps.h"
177
178static void reset(struct tda_state *state)
179{
180 u32 ulIFLevelAnalog = 0;
181 u32 ulIFLevelDigital = 2;
182 u32 ulIFLevelDVBC = 7;
183 u32 ulIFLevelDVBT = 6;
184 u32 ulXTOut = 0;
185 u32 ulStandbyMode = 0x06; /* Send in stdb, but leave osc on */
186 u32 ulSlave = 0;
187 u32 ulFMInput = 0;
188 u32 ulSettlingTime = 100;
189
190 state->m_Frequency = 0;
191 state->m_SettlingTime = 100;
192 state->m_IFLevelAnalog = (ulIFLevelAnalog & 0x07) << 2;
193 state->m_IFLevelDigital = (ulIFLevelDigital & 0x07) << 2;
194 state->m_IFLevelDVBC = (ulIFLevelDVBC & 0x07) << 2;
195 state->m_IFLevelDVBT = (ulIFLevelDVBT & 0x07) << 2;
196
197 state->m_EP4 = 0x20;
198 if (ulXTOut != 0)
199 state->m_EP4 |= 0x40;
200
201 state->m_EP3_Standby = ((ulStandbyMode & 0x07) << 5) | 0x0F;
202 state->m_bMaster = (ulSlave == 0);
203
204 state->m_SettlingTime = ulSettlingTime;
205
206 state->m_bFMInput = (ulFMInput == 2);
207}
208
209static bool SearchMap1(struct SMap Map[],
210 u32 Frequency, u8 *pParam)
211{
212 int i = 0;
213
214 while ((Map[i].m_Frequency != 0) && (Frequency > Map[i].m_Frequency))
215 i += 1;
216 if (Map[i].m_Frequency == 0)
217 return false;
218 *pParam = Map[i].m_Param;
219 return true;
220}
221
222static bool SearchMap2(struct SMapI Map[],
223 u32 Frequency, s32 *pParam)
224{
225 int i = 0;
226
227 while ((Map[i].m_Frequency != 0) &&
228 (Frequency > Map[i].m_Frequency))
229 i += 1;
230 if (Map[i].m_Frequency == 0)
231 return false;
232 *pParam = Map[i].m_Param;
233 return true;
234}
235
236static bool SearchMap3(struct SMap2 Map[], u32 Frequency,
237 u8 *pParam1, u8 *pParam2)
238{
239 int i = 0;
240
241 while ((Map[i].m_Frequency != 0) &&
242 (Frequency > Map[i].m_Frequency))
243 i += 1;
244 if (Map[i].m_Frequency == 0)
245 return false;
246 *pParam1 = Map[i].m_Param1;
247 *pParam2 = Map[i].m_Param2;
248 return true;
249}
250
251static bool SearchMap4(struct SRFBandMap Map[],
252 u32 Frequency, u8 *pRFBand)
253{
254 int i = 0;
255
256 while (i < 7 && (Frequency > Map[i].m_RF_max))
257 i += 1;
258 if (i == 7)
259 return false;
260 *pRFBand = i;
261 return true;
262}
263
264static int ThermometerRead(struct tda_state *state, u8 *pTM_Value)
265{
266 int status = 0;
267
268 do {
269 u8 Regs[16];
270 state->m_Regs[TM] |= 0x10;
271 status = UpdateReg(state, TM);
272 if (status < 0)
273 break;
274 status = Read(state, Regs);
275 if (status < 0)
276 break;
277 if (((Regs[TM] & 0x0F) == 0 && (Regs[TM] & 0x20) == 0x20) ||
278 ((Regs[TM] & 0x0F) == 8 && (Regs[TM] & 0x20) == 0x00)) {
279 state->m_Regs[TM] ^= 0x20;
280 status = UpdateReg(state, TM);
281 if (status < 0)
282 break;
283 msleep(10);
284 status = Read(state, Regs);
285 if (status < 0)
286 break;
287 }
288 *pTM_Value = (Regs[TM] & 0x20)
289 ? m_Thermometer_Map_2[Regs[TM] & 0x0F]
290 : m_Thermometer_Map_1[Regs[TM] & 0x0F] ;
291 state->m_Regs[TM] &= ~0x10; /* Thermometer off */
292 status = UpdateReg(state, TM);
293 if (status < 0)
294 break;
295 state->m_Regs[EP4] &= ~0x03; /* CAL_mode = 0 ????????? */
296 status = UpdateReg(state, EP4);
297 if (status < 0)
298 break;
299 } while (0);
300
301 return status;
302}
303
304static int StandBy(struct tda_state *state)
305{
306 int status = 0;
307 do {
308 state->m_Regs[EB12] &= ~0x20; /* PD_AGC1_Det = 0 */
309 status = UpdateReg(state, EB12);
310 if (status < 0)
311 break;
312 state->m_Regs[EB18] &= ~0x83; /* AGC1_loop_off = 0, AGC1_Gain = 6 dB */
313 status = UpdateReg(state, EB18);
314 if (status < 0)
315 break;
316 state->m_Regs[EB21] |= 0x03; /* AGC2_Gain = -6 dB */
317 state->m_Regs[EP3] = state->m_EP3_Standby;
318 status = UpdateReg(state, EP3);
319 if (status < 0)
320 break;
321 state->m_Regs[EB23] &= ~0x06; /* ForceLP_Fc2_En = 0, LP_Fc[2] = 0 */
322 status = UpdateRegs(state, EB21, EB23);
323 if (status < 0)
324 break;
325 } while (0);
326 return status;
327}
328
329static int CalcMainPLL(struct tda_state *state, u32 freq)
330{
331
332 u8 PostDiv;
333 u8 Div;
334 u64 OscFreq;
335 u32 MainDiv;
336
337 if (!SearchMap3(m_Main_PLL_Map, freq, &PostDiv, &Div))
338 return -EINVAL;
339
340 OscFreq = (u64) freq * (u64) Div;
341 OscFreq *= (u64) 16384;
342 do_div(OscFreq, (u64)16000000);
343 MainDiv = OscFreq;
344
345 state->m_Regs[MPD] = PostDiv & 0x77;
346 state->m_Regs[MD1] = ((MainDiv >> 16) & 0x7F);
347 state->m_Regs[MD2] = ((MainDiv >> 8) & 0xFF);
348 state->m_Regs[MD3] = (MainDiv & 0xFF);
349
350 return UpdateRegs(state, MPD, MD3);
351}
352
353static int CalcCalPLL(struct tda_state *state, u32 freq)
354{
355 u8 PostDiv;
356 u8 Div;
357 u64 OscFreq;
358 u32 CalDiv;
359
360 if (!SearchMap3(m_Cal_PLL_Map, freq, &PostDiv, &Div))
361 return -EINVAL;
362
363 OscFreq = (u64)freq * (u64)Div;
364 /* CalDiv = u32( OscFreq * 16384 / 16000000 ); */
365 OscFreq *= (u64)16384;
366 do_div(OscFreq, (u64)16000000);
367 CalDiv = OscFreq;
368
369 state->m_Regs[CPD] = PostDiv;
370 state->m_Regs[CD1] = ((CalDiv >> 16) & 0xFF);
371 state->m_Regs[CD2] = ((CalDiv >> 8) & 0xFF);
372 state->m_Regs[CD3] = (CalDiv & 0xFF);
373
374 return UpdateRegs(state, CPD, CD3);
375}
376
377static int CalibrateRF(struct tda_state *state,
378 u8 RFBand, u32 freq, s32 *pCprog)
379{
380 int status = 0;
381 u8 Regs[NUM_REGS];
382 do {
383 u8 BP_Filter = 0;
384 u8 GainTaper = 0;
385 u8 RFC_K = 0;
386 u8 RFC_M = 0;
387
388 state->m_Regs[EP4] &= ~0x03; /* CAL_mode = 0 */
389 status = UpdateReg(state, EP4);
390 if (status < 0)
391 break;
392 state->m_Regs[EB18] |= 0x03; /* AGC1_Gain = 3 */
393 status = UpdateReg(state, EB18);
394 if (status < 0)
395 break;
396
397 /* Switching off LT (as datasheet says) causes calibration on C1 to fail */
398 /* (Readout of Cprog is allways 255) */
399 if (state->m_Regs[ID] != 0x83) /* C1: ID == 83, C2: ID == 84 */
400 state->m_Regs[EP3] |= 0x40; /* SM_LT = 1 */
401
402 if (!(SearchMap1(m_BP_Filter_Map, freq, &BP_Filter) &&
403 SearchMap1(m_GainTaper_Map, freq, &GainTaper) &&
404 SearchMap3(m_KM_Map, freq, &RFC_K, &RFC_M)))
405 return -EINVAL;
406
407 state->m_Regs[EP1] = (state->m_Regs[EP1] & ~0x07) | BP_Filter;
408 state->m_Regs[EP2] = (RFBand << 5) | GainTaper;
409
410 state->m_Regs[EB13] = (state->m_Regs[EB13] & ~0x7C) | (RFC_K << 4) | (RFC_M << 2);
411
412 status = UpdateRegs(state, EP1, EP3);
413 if (status < 0)
414 break;
415 status = UpdateReg(state, EB13);
416 if (status < 0)
417 break;
418
419 state->m_Regs[EB4] |= 0x20; /* LO_ForceSrce = 1 */
420 status = UpdateReg(state, EB4);
421 if (status < 0)
422 break;
423
424 state->m_Regs[EB7] |= 0x20; /* CAL_ForceSrce = 1 */
425 status = UpdateReg(state, EB7);
426 if (status < 0)
427 break;
428
429 state->m_Regs[EB14] = 0; /* RFC_Cprog = 0 */
430 status = UpdateReg(state, EB14);
431 if (status < 0)
432 break;
433
434 state->m_Regs[EB20] &= ~0x20; /* ForceLock = 0; */
435 status = UpdateReg(state, EB20);
436 if (status < 0)
437 break;
438
439 state->m_Regs[EP4] |= 0x03; /* CAL_Mode = 3 */
440 status = UpdateRegs(state, EP4, EP5);
441 if (status < 0)
442 break;
443
444 status = CalcCalPLL(state, freq);
445 if (status < 0)
446 break;
447 status = CalcMainPLL(state, freq + 1000000);
448 if (status < 0)
449 break;
450
451 msleep(5);
452 status = UpdateReg(state, EP2);
453 if (status < 0)
454 break;
455 status = UpdateReg(state, EP1);
456 if (status < 0)
457 break;
458 status = UpdateReg(state, EP2);
459 if (status < 0)
460 break;
461 status = UpdateReg(state, EP1);
462 if (status < 0)
463 break;
464
465 state->m_Regs[EB4] &= ~0x20; /* LO_ForceSrce = 0 */
466 status = UpdateReg(state, EB4);
467 if (status < 0)
468 break;
469
470 state->m_Regs[EB7] &= ~0x20; /* CAL_ForceSrce = 0 */
471 status = UpdateReg(state, EB7);
472 if (status < 0)
473 break;
474 msleep(10);
475
476 state->m_Regs[EB20] |= 0x20; /* ForceLock = 1; */
477 status = UpdateReg(state, EB20);
478 if (status < 0)
479 break;
480 msleep(60);
481
482 state->m_Regs[EP4] &= ~0x03; /* CAL_Mode = 0 */
483 state->m_Regs[EP3] &= ~0x40; /* SM_LT = 0 */
484 state->m_Regs[EB18] &= ~0x03; /* AGC1_Gain = 0 */
485 status = UpdateReg(state, EB18);
486 if (status < 0)
487 break;
488 status = UpdateRegs(state, EP3, EP4);
489 if (status < 0)
490 break;
491 status = UpdateReg(state, EP1);
492 if (status < 0)
493 break;
494
495 status = ReadExtented(state, Regs);
496 if (status < 0)
497 break;
498
499 *pCprog = Regs[EB14];
500
501 } while (0);
502 return status;
503}
504
505static int RFTrackingFiltersInit(struct tda_state *state,
506 u8 RFBand)
507{
508 int status = 0;
509
510 u32 RF1 = m_RF_Band_Map[RFBand].m_RF1_Default;
511 u32 RF2 = m_RF_Band_Map[RFBand].m_RF2_Default;
512 u32 RF3 = m_RF_Band_Map[RFBand].m_RF3_Default;
513 bool bcal = false;
514
515 s32 Cprog_cal1 = 0;
516 s32 Cprog_table1 = 0;
517 s32 Cprog_cal2 = 0;
518 s32 Cprog_table2 = 0;
519 s32 Cprog_cal3 = 0;
520 s32 Cprog_table3 = 0;
521
522 state->m_RF_A1[RFBand] = 0;
523 state->m_RF_B1[RFBand] = 0;
524 state->m_RF_A2[RFBand] = 0;
525 state->m_RF_B2[RFBand] = 0;
526
527 do {
528 status = PowerScan(state, RFBand, RF1, &RF1, &bcal);
529 if (status < 0)
530 break;
531 if (bcal) {
532 status = CalibrateRF(state, RFBand, RF1, &Cprog_cal1);
533 if (status < 0)
534 break;
535 }
536 SearchMap2(m_RF_Cal_Map, RF1, &Cprog_table1);
537 if (!bcal)
538 Cprog_cal1 = Cprog_table1;
539 state->m_RF_B1[RFBand] = Cprog_cal1 - Cprog_table1;
540 /* state->m_RF_A1[RF_Band] = ???? */
541
542 if (RF2 == 0)
543 break;
544
545 status = PowerScan(state, RFBand, RF2, &RF2, &bcal);
546 if (status < 0)
547 break;
548 if (bcal) {
549 status = CalibrateRF(state, RFBand, RF2, &Cprog_cal2);
550 if (status < 0)
551 break;
552 }
553 SearchMap2(m_RF_Cal_Map, RF2, &Cprog_table2);
554 if (!bcal)
555 Cprog_cal2 = Cprog_table2;
556
557 state->m_RF_A1[RFBand] =
558 (Cprog_cal2 - Cprog_table2 - Cprog_cal1 + Cprog_table1) /
559 ((s32)(RF2) - (s32)(RF1));
560
561 if (RF3 == 0)
562 break;
563
564 status = PowerScan(state, RFBand, RF3, &RF3, &bcal);
565 if (status < 0)
566 break;
567 if (bcal) {
568 status = CalibrateRF(state, RFBand, RF3, &Cprog_cal3);
569 if (status < 0)
570 break;
571 }
572 SearchMap2(m_RF_Cal_Map, RF3, &Cprog_table3);
573 if (!bcal)
574 Cprog_cal3 = Cprog_table3;
575 state->m_RF_A2[RFBand] = (Cprog_cal3 - Cprog_table3 - Cprog_cal2 + Cprog_table2) / ((s32)(RF3) - (s32)(RF2));
576 state->m_RF_B2[RFBand] = Cprog_cal2 - Cprog_table2;
577
578 } while (0);
579
580 state->m_RF1[RFBand] = RF1;
581 state->m_RF2[RFBand] = RF2;
582 state->m_RF3[RFBand] = RF3;
583
584#if 0
585 printk(KERN_ERR "tda18271c2dd: %s %d RF1 = %d A1 = %d B1 = %d RF2 = %d A2 = %d B2 = %d RF3 = %d\n", __func__,
586 RFBand, RF1, state->m_RF_A1[RFBand], state->m_RF_B1[RFBand], RF2,
587 state->m_RF_A2[RFBand], state->m_RF_B2[RFBand], RF3);
588#endif
589
590 return status;
591}
592
593static int PowerScan(struct tda_state *state,
594 u8 RFBand, u32 RF_in, u32 *pRF_Out, bool *pbcal)
595{
596 int status = 0;
597 do {
598 u8 Gain_Taper = 0;
599 s32 RFC_Cprog = 0;
600 u8 CID_Target = 0;
601 u8 CountLimit = 0;
602 u32 freq_MainPLL;
603 u8 Regs[NUM_REGS];
604 u8 CID_Gain;
605 s32 Count = 0;
606 int sign = 1;
607 bool wait = false;
608
609 if (!(SearchMap2(m_RF_Cal_Map, RF_in, &RFC_Cprog) &&
610 SearchMap1(m_GainTaper_Map, RF_in, &Gain_Taper) &&
611 SearchMap3(m_CID_Target_Map, RF_in, &CID_Target, &CountLimit))) {
612
613 printk(KERN_ERR "tda18271c2dd: %s Search map failed\n", __func__);
614 return -EINVAL;
615 }
616
617 state->m_Regs[EP2] = (RFBand << 5) | Gain_Taper;
618 state->m_Regs[EB14] = (RFC_Cprog);
619 status = UpdateReg(state, EP2);
620 if (status < 0)
621 break;
622 status = UpdateReg(state, EB14);
623 if (status < 0)
624 break;
625
626 freq_MainPLL = RF_in + 1000000;
627 status = CalcMainPLL(state, freq_MainPLL);
628 if (status < 0)
629 break;
630 msleep(5);
631 state->m_Regs[EP4] = (state->m_Regs[EP4] & ~0x03) | 1; /* CAL_mode = 1 */
632 status = UpdateReg(state, EP4);
633 if (status < 0)
634 break;
635 status = UpdateReg(state, EP2); /* Launch power measurement */
636 if (status < 0)
637 break;
638 status = ReadExtented(state, Regs);
639 if (status < 0)
640 break;
641 CID_Gain = Regs[EB10] & 0x3F;
642 state->m_Regs[ID] = Regs[ID]; /* Chip version, (needed for C1 workarround in CalibrateRF) */
643
644 *pRF_Out = RF_in;
645
646 while (CID_Gain < CID_Target) {
647 freq_MainPLL = RF_in + sign * Count + 1000000;
648 status = CalcMainPLL(state, freq_MainPLL);
649 if (status < 0)
650 break;
651 msleep(wait ? 5 : 1);
652 wait = false;
653 status = UpdateReg(state, EP2); /* Launch power measurement */
654 if (status < 0)
655 break;
656 status = ReadExtented(state, Regs);
657 if (status < 0)
658 break;
659 CID_Gain = Regs[EB10] & 0x3F;
660 Count += 200000;
661
662 if (Count < CountLimit * 100000)
663 continue;
664 if (sign < 0)
665 break;
666
667 sign = -sign;
668 Count = 200000;
669 wait = true;
670 }
671 status = status;
672 if (status < 0)
673 break;
674 if (CID_Gain >= CID_Target) {
675 *pbcal = true;
676 *pRF_Out = freq_MainPLL - 1000000;
677 } else
678 *pbcal = false;
679 } while (0);
680
681 return status;
682}
683
684static int PowerScanInit(struct tda_state *state)
685{
686 int status = 0;
687 do {
688 state->m_Regs[EP3] = (state->m_Regs[EP3] & ~0x1F) | 0x12;
689 state->m_Regs[EP4] = (state->m_Regs[EP4] & ~0x1F); /* If level = 0, Cal mode = 0 */
690 status = UpdateRegs(state, EP3, EP4);
691 if (status < 0)
692 break;
693 state->m_Regs[EB18] = (state->m_Regs[EB18] & ~0x03); /* AGC 1 Gain = 0 */
694 status = UpdateReg(state, EB18);
695 if (status < 0)
696 break;
697 state->m_Regs[EB21] = (state->m_Regs[EB21] & ~0x03); /* AGC 2 Gain = 0 (Datasheet = 3) */
698 state->m_Regs[EB23] = (state->m_Regs[EB23] | 0x06); /* ForceLP_Fc2_En = 1, LPFc[2] = 1 */
699 status = UpdateRegs(state, EB21, EB23);
700 if (status < 0)
701 break;
702 } while (0);
703 return status;
704}
705
706static int CalcRFFilterCurve(struct tda_state *state)
707{
708 int status = 0;
709 do {
710 msleep(200); /* Temperature stabilisation */
711 status = PowerScanInit(state);
712 if (status < 0)
713 break;
714 status = RFTrackingFiltersInit(state, 0);
715 if (status < 0)
716 break;
717 status = RFTrackingFiltersInit(state, 1);
718 if (status < 0)
719 break;
720 status = RFTrackingFiltersInit(state, 2);
721 if (status < 0)
722 break;
723 status = RFTrackingFiltersInit(state, 3);
724 if (status < 0)
725 break;
726 status = RFTrackingFiltersInit(state, 4);
727 if (status < 0)
728 break;
729 status = RFTrackingFiltersInit(state, 5);
730 if (status < 0)
731 break;
732 status = RFTrackingFiltersInit(state, 6);
733 if (status < 0)
734 break;
735 status = ThermometerRead(state, &state->m_TMValue_RFCal); /* also switches off Cal mode !!! */
736 if (status < 0)
737 break;
738 } while (0);
739
740 return status;
741}
742
743static int FixedContentsI2CUpdate(struct tda_state *state)
744{
745 static u8 InitRegs[] = {
746 0x08, 0x80, 0xC6,
747 0xDF, 0x16, 0x60, 0x80,
748 0x80, 0x00, 0x00, 0x00,
749 0x00, 0x00, 0x00, 0x00,
750 0xFC, 0x01, 0x84, 0x41,
751 0x01, 0x84, 0x40, 0x07,
752 0x00, 0x00, 0x96, 0x3F,
753 0xC1, 0x00, 0x8F, 0x00,
754 0x00, 0x8C, 0x00, 0x20,
755 0xB3, 0x48, 0xB0,
756 };
757 int status = 0;
758 memcpy(&state->m_Regs[TM], InitRegs, EB23 - TM + 1);
759 do {
760 status = UpdateRegs(state, TM, EB23);
761 if (status < 0)
762 break;
763
764 /* AGC1 gain setup */
765 state->m_Regs[EB17] = 0x00;
766 status = UpdateReg(state, EB17);
767 if (status < 0)
768 break;
769 state->m_Regs[EB17] = 0x03;
770 status = UpdateReg(state, EB17);
771 if (status < 0)
772 break;
773 state->m_Regs[EB17] = 0x43;
774 status = UpdateReg(state, EB17);
775 if (status < 0)
776 break;
777 state->m_Regs[EB17] = 0x4C;
778 status = UpdateReg(state, EB17);
779 if (status < 0)
780 break;
781
782 /* IRC Cal Low band */
783 state->m_Regs[EP3] = 0x1F;
784 state->m_Regs[EP4] = 0x66;
785 state->m_Regs[EP5] = 0x81;
786 state->m_Regs[CPD] = 0xCC;
787 state->m_Regs[CD1] = 0x6C;
788 state->m_Regs[CD2] = 0x00;
789 state->m_Regs[CD3] = 0x00;
790 state->m_Regs[MPD] = 0xC5;
791 state->m_Regs[MD1] = 0x77;
792 state->m_Regs[MD2] = 0x08;
793 state->m_Regs[MD3] = 0x00;
794 status = UpdateRegs(state, EP2, MD3); /* diff between sw and datasheet (ep3-md3) */
795 if (status < 0)
796 break;
797
798#if 0
799 state->m_Regs[EB4] = 0x61; /* missing in sw */
800 status = UpdateReg(state, EB4);
801 if (status < 0)
802 break;
803 msleep(1);
804 state->m_Regs[EB4] = 0x41;
805 status = UpdateReg(state, EB4);
806 if (status < 0)
807 break;
808#endif
809
810 msleep(5);
811 status = UpdateReg(state, EP1);
812 if (status < 0)
813 break;
814 msleep(5);
815
816 state->m_Regs[EP5] = 0x85;
817 state->m_Regs[CPD] = 0xCB;
818 state->m_Regs[CD1] = 0x66;
819 state->m_Regs[CD2] = 0x70;
820 status = UpdateRegs(state, EP3, CD3);
821 if (status < 0)
822 break;
823 msleep(5);
824 status = UpdateReg(state, EP2);
825 if (status < 0)
826 break;
827 msleep(30);
828
829 /* IRC Cal mid band */
830 state->m_Regs[EP5] = 0x82;
831 state->m_Regs[CPD] = 0xA8;
832 state->m_Regs[CD2] = 0x00;
833 state->m_Regs[MPD] = 0xA1; /* Datasheet = 0xA9 */
834 state->m_Regs[MD1] = 0x73;
835 state->m_Regs[MD2] = 0x1A;
836 status = UpdateRegs(state, EP3, MD3);
837 if (status < 0)
838 break;
839
840 msleep(5);
841 status = UpdateReg(state, EP1);
842 if (status < 0)
843 break;
844 msleep(5);
845
846 state->m_Regs[EP5] = 0x86;
847 state->m_Regs[CPD] = 0xA8;
848 state->m_Regs[CD1] = 0x66;
849 state->m_Regs[CD2] = 0xA0;
850 status = UpdateRegs(state, EP3, CD3);
851 if (status < 0)
852 break;
853 msleep(5);
854 status = UpdateReg(state, EP2);
855 if (status < 0)
856 break;
857 msleep(30);
858
859 /* IRC Cal high band */
860 state->m_Regs[EP5] = 0x83;
861 state->m_Regs[CPD] = 0x98;
862 state->m_Regs[CD1] = 0x65;
863 state->m_Regs[CD2] = 0x00;
864 state->m_Regs[MPD] = 0x91; /* Datasheet = 0x91 */
865 state->m_Regs[MD1] = 0x71;
866 state->m_Regs[MD2] = 0xCD;
867 status = UpdateRegs(state, EP3, MD3);
868 if (status < 0)
869 break;
870 msleep(5);
871 status = UpdateReg(state, EP1);
872 if (status < 0)
873 break;
874 msleep(5);
875 state->m_Regs[EP5] = 0x87;
876 state->m_Regs[CD1] = 0x65;
877 state->m_Regs[CD2] = 0x50;
878 status = UpdateRegs(state, EP3, CD3);
879 if (status < 0)
880 break;
881 msleep(5);
882 status = UpdateReg(state, EP2);
883 if (status < 0)
884 break;
885 msleep(30);
886
887 /* Back to normal */
888 state->m_Regs[EP4] = 0x64;
889 status = UpdateReg(state, EP4);
890 if (status < 0)
891 break;
892 status = UpdateReg(state, EP1);
893 if (status < 0)
894 break;
895
896 } while (0);
897 return status;
898}
899
900static int InitCal(struct tda_state *state)
901{
902 int status = 0;
903
904 do {
905 status = FixedContentsI2CUpdate(state);
906 if (status < 0)
907 break;
908 status = CalcRFFilterCurve(state);
909 if (status < 0)
910 break;
911 status = StandBy(state);
912 if (status < 0)
913 break;
914 /* m_bInitDone = true; */
915 } while (0);
916 return status;
917};
918
919static int RFTrackingFiltersCorrection(struct tda_state *state,
920 u32 Frequency)
921{
922 int status = 0;
923 s32 Cprog_table;
924 u8 RFBand;
925 u8 dCoverdT;
926
927 if (!SearchMap2(m_RF_Cal_Map, Frequency, &Cprog_table) ||
928 !SearchMap4(m_RF_Band_Map, Frequency, &RFBand) ||
929 !SearchMap1(m_RF_Cal_DC_Over_DT_Map, Frequency, &dCoverdT))
930
931 return -EINVAL;
932
933 do {
934 u8 TMValue_Current;
935 u32 RF1 = state->m_RF1[RFBand];
936 u32 RF2 = state->m_RF1[RFBand];
937 u32 RF3 = state->m_RF1[RFBand];
938 s32 RF_A1 = state->m_RF_A1[RFBand];
939 s32 RF_B1 = state->m_RF_B1[RFBand];
940 s32 RF_A2 = state->m_RF_A2[RFBand];
941 s32 RF_B2 = state->m_RF_B2[RFBand];
942 s32 Capprox = 0;
943 int TComp;
944
945 state->m_Regs[EP3] &= ~0xE0; /* Power up */
946 status = UpdateReg(state, EP3);
947 if (status < 0)
948 break;
949
950 status = ThermometerRead(state, &TMValue_Current);
951 if (status < 0)
952 break;
953
954 if (RF3 == 0 || Frequency < RF2)
955 Capprox = RF_A1 * ((s32)(Frequency) - (s32)(RF1)) + RF_B1 + Cprog_table;
956 else
957 Capprox = RF_A2 * ((s32)(Frequency) - (s32)(RF2)) + RF_B2 + Cprog_table;
958
959 TComp = (int)(dCoverdT) * ((int)(TMValue_Current) - (int)(state->m_TMValue_RFCal))/1000;
960
961 Capprox += TComp;
962
963 if (Capprox < 0)
964 Capprox = 0;
965 else if (Capprox > 255)
966 Capprox = 255;
967
968
969 /* TODO Temperature compensation. There is defenitely a scale factor */
970 /* missing in the datasheet, so leave it out for now. */
971 state->m_Regs[EB14] = Capprox;
972
973 status = UpdateReg(state, EB14);
974 if (status < 0)
975 break;
976
977 } while (0);
978 return status;
979}
980
981static int ChannelConfiguration(struct tda_state *state,
982 u32 Frequency, int Standard)
983{
984
985 s32 IntermediateFrequency = m_StandardTable[Standard].m_IFFrequency;
986 int status = 0;
987
988 u8 BP_Filter = 0;
989 u8 RF_Band = 0;
990 u8 GainTaper = 0;
991 u8 IR_Meas = 0;
992
993 state->IF = IntermediateFrequency;
994 /* printk("tda18271c2dd: %s Freq = %d Standard = %d IF = %d\n", __func__, Frequency, Standard, IntermediateFrequency); */
995 /* get values from tables */
996
997 if (!(SearchMap1(m_BP_Filter_Map, Frequency, &BP_Filter) &&
998 SearchMap1(m_GainTaper_Map, Frequency, &GainTaper) &&
999 SearchMap1(m_IR_Meas_Map, Frequency, &IR_Meas) &&
1000 SearchMap4(m_RF_Band_Map, Frequency, &RF_Band))) {
1001
1002 printk(KERN_ERR "tda18271c2dd: %s SearchMap failed\n", __func__);
1003 return -EINVAL;
1004 }
1005
1006 do {
1007 state->m_Regs[EP3] = (state->m_Regs[EP3] & ~0x1F) | m_StandardTable[Standard].m_EP3_4_0;
1008 state->m_Regs[EP3] &= ~0x04; /* switch RFAGC to high speed mode */
1009
1010 /* m_EP4 default for XToutOn, CAL_Mode (0) */
1011 state->m_Regs[EP4] = state->m_EP4 | ((Standard > HF_AnalogMax) ? state->m_IFLevelDigital : state->m_IFLevelAnalog);
1012 /* state->m_Regs[EP4] = state->m_EP4 | state->m_IFLevelDigital; */
1013 if (Standard <= HF_AnalogMax)
1014 state->m_Regs[EP4] = state->m_EP4 | state->m_IFLevelAnalog;
1015 else if (Standard <= HF_ATSC)
1016 state->m_Regs[EP4] = state->m_EP4 | state->m_IFLevelDVBT;
1017 else if (Standard <= HF_DVBC)
1018 state->m_Regs[EP4] = state->m_EP4 | state->m_IFLevelDVBC;
1019 else
1020 state->m_Regs[EP4] = state->m_EP4 | state->m_IFLevelDigital;
1021
1022 if ((Standard == HF_FM_Radio) && state->m_bFMInput)
1023 state->m_Regs[EP4] |= 80;
1024
1025 state->m_Regs[MPD] &= ~0x80;
1026 if (Standard > HF_AnalogMax)
1027 state->m_Regs[MPD] |= 0x80; /* Add IF_notch for digital */
1028
1029 state->m_Regs[EB22] = m_StandardTable[Standard].m_EB22;
1030
1031 /* Note: This is missing from flowchart in TDA18271 specification ( 1.5 MHz cutoff for FM ) */
1032 if (Standard == HF_FM_Radio)
1033 state->m_Regs[EB23] |= 0x06; /* ForceLP_Fc2_En = 1, LPFc[2] = 1 */
1034 else
1035 state->m_Regs[EB23] &= ~0x06; /* ForceLP_Fc2_En = 0, LPFc[2] = 0 */
1036
1037 status = UpdateRegs(state, EB22, EB23);
1038 if (status < 0)
1039 break;
1040
1041 state->m_Regs[EP1] = (state->m_Regs[EP1] & ~0x07) | 0x40 | BP_Filter; /* Dis_Power_level = 1, Filter */
1042 state->m_Regs[EP5] = (state->m_Regs[EP5] & ~0x07) | IR_Meas;
1043 state->m_Regs[EP2] = (RF_Band << 5) | GainTaper;
1044
1045 state->m_Regs[EB1] = (state->m_Regs[EB1] & ~0x07) |
1046 (state->m_bMaster ? 0x04 : 0x00); /* CALVCO_FortLOn = MS */
1047 /* AGC1_always_master = 0 */
1048 /* AGC_firstn = 0 */
1049 status = UpdateReg(state, EB1);
1050 if (status < 0)
1051 break;
1052
1053 if (state->m_bMaster) {
1054 status = CalcMainPLL(state, Frequency + IntermediateFrequency);
1055 if (status < 0)
1056 break;
1057 status = UpdateRegs(state, TM, EP5);
1058 if (status < 0)
1059 break;
1060 state->m_Regs[EB4] |= 0x20; /* LO_forceSrce = 1 */
1061 status = UpdateReg(state, EB4);
1062 if (status < 0)
1063 break;
1064 msleep(1);
1065 state->m_Regs[EB4] &= ~0x20; /* LO_forceSrce = 0 */
1066 status = UpdateReg(state, EB4);
1067 if (status < 0)
1068 break;
1069 } else {
1070 u8 PostDiv = 0;
1071 u8 Div;
1072 status = CalcCalPLL(state, Frequency + IntermediateFrequency);
1073 if (status < 0)
1074 break;
1075
1076 SearchMap3(m_Cal_PLL_Map, Frequency + IntermediateFrequency, &PostDiv, &Div);
1077 state->m_Regs[MPD] = (state->m_Regs[MPD] & ~0x7F) | (PostDiv & 0x77);
1078 status = UpdateReg(state, MPD);
1079 if (status < 0)
1080 break;
1081 status = UpdateRegs(state, TM, EP5);
1082 if (status < 0)
1083 break;
1084
1085 state->m_Regs[EB7] |= 0x20; /* CAL_forceSrce = 1 */
1086 status = UpdateReg(state, EB7);
1087 if (status < 0)
1088 break;
1089 msleep(1);
1090 state->m_Regs[EB7] &= ~0x20; /* CAL_forceSrce = 0 */
1091 status = UpdateReg(state, EB7);
1092 if (status < 0)
1093 break;
1094 }
1095 msleep(20);
1096 if (Standard != HF_FM_Radio)
1097 state->m_Regs[EP3] |= 0x04; /* RFAGC to normal mode */
1098 status = UpdateReg(state, EP3);
1099 if (status < 0)
1100 break;
1101
1102 } while (0);
1103 return status;
1104}
1105
1106static int sleep(struct dvb_frontend *fe)
1107{
1108 struct tda_state *state = fe->tuner_priv;
1109
1110 StandBy(state);
1111 return 0;
1112}
1113
1114static int init(struct dvb_frontend *fe)
1115{
1116 return 0;
1117}
1118
1119static int release(struct dvb_frontend *fe)
1120{
1121 kfree(fe->tuner_priv);
1122 fe->tuner_priv = NULL;
1123 return 0;
1124}
1125
1126/*
1127 * As defined on EN 300 429 Annex A and on ITU-T J.83 annex A, the DVB-C
1128 * roll-off factor is 0.15.
1129 * According with the specs, the amount of the needed bandwith is given by:
1130 * Bw = Symbol_rate * (1 + 0.15)
1131 * As such, the maximum symbol rate supported by 6 MHz is
1132 * max_symbol_rate = 6 MHz / 1.15 = 5217391 Bauds
1133 *NOTE: For ITU-T J.83 Annex C, the roll-off factor is 0.13. So:
1134 * max_symbol_rate = 6 MHz / 1.13 = 5309735 Baud
1135 * That means that an adjustment is needed for Japan,
1136 * but, as currently DRX-K is hardcoded to Annex A, let's stick
1137 * with 0.15 roll-off factor.
1138 */
1139#define MAX_SYMBOL_RATE_6MHz 5217391
1140
1141static int set_params(struct dvb_frontend *fe,
1142 struct dvb_frontend_parameters *params)
1143{
1144 struct tda_state *state = fe->tuner_priv;
1145 int status = 0;
1146 int Standard;
1147
1148 state->m_Frequency = params->frequency;
1149
1150 if (fe->ops.info.type == FE_OFDM)
1151 switch (params->u.ofdm.bandwidth) {
1152 case BANDWIDTH_6_MHZ:
1153 Standard = HF_DVBT_6MHZ;
1154 break;
1155 case BANDWIDTH_7_MHZ:
1156 Standard = HF_DVBT_7MHZ;
1157 break;
1158 default:
1159 case BANDWIDTH_8_MHZ:
1160 Standard = HF_DVBT_8MHZ;
1161 break;
1162 }
1163 else if (fe->ops.info.type == FE_QAM) {
1164 if (params->u.qam.symbol_rate <= MAX_SYMBOL_RATE_6MHz)
1165 Standard = HF_DVBC_6MHZ;
1166 else
1167 Standard = HF_DVBC_8MHZ;
1168 } else
1169 return -EINVAL;
1170 do {
1171 status = RFTrackingFiltersCorrection(state, params->frequency);
1172 if (status < 0)
1173 break;
1174 status = ChannelConfiguration(state, params->frequency, Standard);
1175 if (status < 0)
1176 break;
1177
1178 msleep(state->m_SettlingTime); /* Allow AGC's to settle down */
1179 } while (0);
1180 return status;
1181}
1182
1183#if 0
1184static int GetSignalStrength(s32 *pSignalStrength, u32 RFAgc, u32 IFAgc)
1185{
1186 if (IFAgc < 500) {
1187 /* Scale this from 0 to 50000 */
1188 *pSignalStrength = IFAgc * 100;
1189 } else {
1190 /* Scale range 500-1500 to 50000-80000 */
1191 *pSignalStrength = 50000 + (IFAgc - 500) * 30;
1192 }
1193
1194 return 0;
1195}
1196#endif
1197
1198static int get_frequency(struct dvb_frontend *fe, u32 *frequency)
1199{
1200 struct tda_state *state = fe->tuner_priv;
1201
1202 *frequency = state->IF;
1203 return 0;
1204}
1205
1206static int get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
1207{
1208 /* struct tda_state *state = fe->tuner_priv; */
1209 /* *bandwidth = priv->bandwidth; */
1210 return 0;
1211}
1212
1213
1214static struct dvb_tuner_ops tuner_ops = {
1215 .info = {
1216 .name = "NXP TDA18271C2D",
1217 .frequency_min = 47125000,
1218 .frequency_max = 865000000,
1219 .frequency_step = 62500
1220 },
1221 .init = init,
1222 .sleep = sleep,
1223 .set_params = set_params,
1224 .release = release,
1225 .get_frequency = get_frequency,
1226 .get_bandwidth = get_bandwidth,
1227};
1228
1229struct dvb_frontend *tda18271c2dd_attach(struct dvb_frontend *fe,
1230 struct i2c_adapter *i2c, u8 adr)
1231{
1232 struct tda_state *state;
1233
1234 state = kzalloc(sizeof(struct tda_state), GFP_KERNEL);
1235 if (!state)
1236 return NULL;
1237
1238 fe->tuner_priv = state;
1239 state->adr = adr;
1240 state->i2c = i2c;
1241 memcpy(&fe->ops.tuner_ops, &tuner_ops, sizeof(struct dvb_tuner_ops));
1242 reset(state);
1243 InitCal(state);
1244
1245 return fe;
1246}
1247EXPORT_SYMBOL_GPL(tda18271c2dd_attach);
1248
1249MODULE_DESCRIPTION("TDA18271C2 driver");
1250MODULE_AUTHOR("DD");
1251MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/tda18271c2dd.h b/drivers/media/dvb/frontends/tda18271c2dd.h
new file mode 100644
index 000000000000..1389c74e12ce
--- /dev/null
+++ b/drivers/media/dvb/frontends/tda18271c2dd.h
@@ -0,0 +1,16 @@
1#ifndef _TDA18271C2DD_H_
2#define _TDA18271C2DD_H_
3#if defined(CONFIG_DVB_TDA18271C2DD) || (defined(CONFIG_DVB_TDA18271C2DD_MODULE) \
4 && defined(MODULE))
5struct dvb_frontend *tda18271c2dd_attach(struct dvb_frontend *fe,
6 struct i2c_adapter *i2c, u8 adr);
7#else
8static inline struct dvb_frontend *tda18271c2dd_attach(struct dvb_frontend *fe,
9 struct i2c_adapter *i2c, u8 adr)
10{
11 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
12 return NULL;
13}
14#endif
15
16#endif
diff --git a/drivers/media/dvb/frontends/tda18271c2dd_maps.h b/drivers/media/dvb/frontends/tda18271c2dd_maps.h
new file mode 100644
index 000000000000..b87661b9df14
--- /dev/null
+++ b/drivers/media/dvb/frontends/tda18271c2dd_maps.h
@@ -0,0 +1,814 @@
1enum HF_S {
2 HF_None = 0, HF_B, HF_DK, HF_G, HF_I, HF_L, HF_L1, HF_MN, HF_FM_Radio,
3 HF_AnalogMax, HF_DVBT_6MHZ, HF_DVBT_7MHZ, HF_DVBT_8MHZ,
4 HF_DVBT, HF_ATSC, HF_DVBC_6MHZ, HF_DVBC_7MHZ,
5 HF_DVBC_8MHZ, HF_DVBC
6};
7
8struct SStandardParam m_StandardTable[] = {
9 { 0, 0, 0x00, 0x00 }, /* HF_None */
10 { 6000000, 7000000, 0x1D, 0x2C }, /* HF_B, */
11 { 6900000, 8000000, 0x1E, 0x2C }, /* HF_DK, */
12 { 7100000, 8000000, 0x1E, 0x2C }, /* HF_G, */
13 { 7250000, 8000000, 0x1E, 0x2C }, /* HF_I, */
14 { 6900000, 8000000, 0x1E, 0x2C }, /* HF_L, */
15 { 1250000, 8000000, 0x1E, 0x2C }, /* HF_L1, */
16 { 5400000, 6000000, 0x1C, 0x2C }, /* HF_MN, */
17 { 1250000, 500000, 0x18, 0x2C }, /* HF_FM_Radio, */
18 { 0, 0, 0x00, 0x00 }, /* HF_AnalogMax (Unused) */
19 { 3300000, 6000000, 0x1C, 0x58 }, /* HF_DVBT_6MHZ */
20 { 3500000, 7000000, 0x1C, 0x37 }, /* HF_DVBT_7MHZ */
21 { 4000000, 8000000, 0x1D, 0x37 }, /* HF_DVBT_8MHZ */
22 { 0, 0, 0x00, 0x00 }, /* HF_DVBT (Unused) */
23 { 5000000, 6000000, 0x1C, 0x37 }, /* HF_ATSC (center = 3.25 MHz) */
24 { 4000000, 6000000, 0x1D, 0x58 }, /* HF_DVBC_6MHZ (Chicago) */
25 { 4500000, 7000000, 0x1E, 0x37 }, /* HF_DVBC_7MHZ (not documented by NXP) */
26 { 5000000, 8000000, 0x1F, 0x37 }, /* HF_DVBC_8MHZ */
27 { 0, 0, 0x00, 0x00 }, /* HF_DVBC (Unused) */
28};
29
30struct SMap m_BP_Filter_Map[] = {
31 { 62000000, 0x00 },
32 { 84000000, 0x01 },
33 { 100000000, 0x02 },
34 { 140000000, 0x03 },
35 { 170000000, 0x04 },
36 { 180000000, 0x05 },
37 { 865000000, 0x06 },
38 { 0, 0x00 }, /* Table End */
39};
40
41static struct SMapI m_RF_Cal_Map[] = {
42 { 41000000, 0x0F },
43 { 43000000, 0x1C },
44 { 45000000, 0x2F },
45 { 46000000, 0x39 },
46 { 47000000, 0x40 },
47 { 47900000, 0x50 },
48 { 49100000, 0x16 },
49 { 50000000, 0x18 },
50 { 51000000, 0x20 },
51 { 53000000, 0x28 },
52 { 55000000, 0x2B },
53 { 56000000, 0x32 },
54 { 57000000, 0x35 },
55 { 58000000, 0x3E },
56 { 59000000, 0x43 },
57 { 60000000, 0x4E },
58 { 61100000, 0x55 },
59 { 63000000, 0x0F },
60 { 64000000, 0x11 },
61 { 65000000, 0x12 },
62 { 66000000, 0x15 },
63 { 67000000, 0x16 },
64 { 68000000, 0x17 },
65 { 70000000, 0x19 },
66 { 71000000, 0x1C },
67 { 72000000, 0x1D },
68 { 73000000, 0x1F },
69 { 74000000, 0x20 },
70 { 75000000, 0x21 },
71 { 76000000, 0x24 },
72 { 77000000, 0x25 },
73 { 78000000, 0x27 },
74 { 80000000, 0x28 },
75 { 81000000, 0x29 },
76 { 82000000, 0x2D },
77 { 83000000, 0x2E },
78 { 84000000, 0x2F },
79 { 85000000, 0x31 },
80 { 86000000, 0x33 },
81 { 87000000, 0x34 },
82 { 88000000, 0x35 },
83 { 89000000, 0x37 },
84 { 90000000, 0x38 },
85 { 91000000, 0x39 },
86 { 93000000, 0x3C },
87 { 94000000, 0x3E },
88 { 95000000, 0x3F },
89 { 96000000, 0x40 },
90 { 97000000, 0x42 },
91 { 99000000, 0x45 },
92 { 100000000, 0x46 },
93 { 102000000, 0x48 },
94 { 103000000, 0x4A },
95 { 105000000, 0x4D },
96 { 106000000, 0x4E },
97 { 107000000, 0x50 },
98 { 108000000, 0x51 },
99 { 110000000, 0x54 },
100 { 111000000, 0x56 },
101 { 112000000, 0x57 },
102 { 113000000, 0x58 },
103 { 114000000, 0x59 },
104 { 115000000, 0x5C },
105 { 116000000, 0x5D },
106 { 117000000, 0x5F },
107 { 119000000, 0x60 },
108 { 120000000, 0x64 },
109 { 121000000, 0x65 },
110 { 122000000, 0x66 },
111 { 123000000, 0x68 },
112 { 124000000, 0x69 },
113 { 125000000, 0x6C },
114 { 126000000, 0x6D },
115 { 127000000, 0x6E },
116 { 128000000, 0x70 },
117 { 129000000, 0x71 },
118 { 130000000, 0x75 },
119 { 131000000, 0x77 },
120 { 132000000, 0x78 },
121 { 133000000, 0x7B },
122 { 134000000, 0x7E },
123 { 135000000, 0x81 },
124 { 136000000, 0x82 },
125 { 137000000, 0x87 },
126 { 138000000, 0x88 },
127 { 139000000, 0x8D },
128 { 140000000, 0x8E },
129 { 141000000, 0x91 },
130 { 142000000, 0x95 },
131 { 143000000, 0x9A },
132 { 144000000, 0x9D },
133 { 145000000, 0xA1 },
134 { 146000000, 0xA2 },
135 { 147000000, 0xA4 },
136 { 148000000, 0xA9 },
137 { 149000000, 0xAE },
138 { 150000000, 0xB0 },
139 { 151000000, 0xB1 },
140 { 152000000, 0xB7 },
141 { 152600000, 0xBD },
142 { 154000000, 0x20 },
143 { 155000000, 0x22 },
144 { 156000000, 0x24 },
145 { 157000000, 0x25 },
146 { 158000000, 0x27 },
147 { 159000000, 0x29 },
148 { 160000000, 0x2C },
149 { 161000000, 0x2D },
150 { 163000000, 0x2E },
151 { 164000000, 0x2F },
152 { 164700000, 0x30 },
153 { 166000000, 0x11 },
154 { 167000000, 0x12 },
155 { 168000000, 0x13 },
156 { 169000000, 0x14 },
157 { 170000000, 0x15 },
158 { 172000000, 0x16 },
159 { 173000000, 0x17 },
160 { 174000000, 0x18 },
161 { 175000000, 0x1A },
162 { 176000000, 0x1B },
163 { 178000000, 0x1D },
164 { 179000000, 0x1E },
165 { 180000000, 0x1F },
166 { 181000000, 0x20 },
167 { 182000000, 0x21 },
168 { 183000000, 0x22 },
169 { 184000000, 0x24 },
170 { 185000000, 0x25 },
171 { 186000000, 0x26 },
172 { 187000000, 0x27 },
173 { 188000000, 0x29 },
174 { 189000000, 0x2A },
175 { 190000000, 0x2C },
176 { 191000000, 0x2D },
177 { 192000000, 0x2E },
178 { 193000000, 0x2F },
179 { 194000000, 0x30 },
180 { 195000000, 0x33 },
181 { 196000000, 0x35 },
182 { 198000000, 0x36 },
183 { 200000000, 0x38 },
184 { 201000000, 0x3C },
185 { 202000000, 0x3D },
186 { 203500000, 0x3E },
187 { 206000000, 0x0E },
188 { 208000000, 0x0F },
189 { 212000000, 0x10 },
190 { 216000000, 0x11 },
191 { 217000000, 0x12 },
192 { 218000000, 0x13 },
193 { 220000000, 0x14 },
194 { 222000000, 0x15 },
195 { 225000000, 0x16 },
196 { 228000000, 0x17 },
197 { 231000000, 0x18 },
198 { 234000000, 0x19 },
199 { 235000000, 0x1A },
200 { 236000000, 0x1B },
201 { 237000000, 0x1C },
202 { 240000000, 0x1D },
203 { 242000000, 0x1E },
204 { 244000000, 0x1F },
205 { 247000000, 0x20 },
206 { 249000000, 0x21 },
207 { 252000000, 0x22 },
208 { 253000000, 0x23 },
209 { 254000000, 0x24 },
210 { 256000000, 0x25 },
211 { 259000000, 0x26 },
212 { 262000000, 0x27 },
213 { 264000000, 0x28 },
214 { 267000000, 0x29 },
215 { 269000000, 0x2A },
216 { 271000000, 0x2B },
217 { 273000000, 0x2C },
218 { 275000000, 0x2D },
219 { 277000000, 0x2E },
220 { 279000000, 0x2F },
221 { 282000000, 0x30 },
222 { 284000000, 0x31 },
223 { 286000000, 0x32 },
224 { 287000000, 0x33 },
225 { 290000000, 0x34 },
226 { 293000000, 0x35 },
227 { 295000000, 0x36 },
228 { 297000000, 0x37 },
229 { 300000000, 0x38 },
230 { 303000000, 0x39 },
231 { 305000000, 0x3A },
232 { 306000000, 0x3B },
233 { 307000000, 0x3C },
234 { 310000000, 0x3D },
235 { 312000000, 0x3E },
236 { 315000000, 0x3F },
237 { 318000000, 0x40 },
238 { 320000000, 0x41 },
239 { 323000000, 0x42 },
240 { 324000000, 0x43 },
241 { 325000000, 0x44 },
242 { 327000000, 0x45 },
243 { 331000000, 0x46 },
244 { 334000000, 0x47 },
245 { 337000000, 0x48 },
246 { 339000000, 0x49 },
247 { 340000000, 0x4A },
248 { 341000000, 0x4B },
249 { 343000000, 0x4C },
250 { 345000000, 0x4D },
251 { 349000000, 0x4E },
252 { 352000000, 0x4F },
253 { 353000000, 0x50 },
254 { 355000000, 0x51 },
255 { 357000000, 0x52 },
256 { 359000000, 0x53 },
257 { 361000000, 0x54 },
258 { 362000000, 0x55 },
259 { 364000000, 0x56 },
260 { 368000000, 0x57 },
261 { 370000000, 0x58 },
262 { 372000000, 0x59 },
263 { 375000000, 0x5A },
264 { 376000000, 0x5B },
265 { 377000000, 0x5C },
266 { 379000000, 0x5D },
267 { 382000000, 0x5E },
268 { 384000000, 0x5F },
269 { 385000000, 0x60 },
270 { 386000000, 0x61 },
271 { 388000000, 0x62 },
272 { 390000000, 0x63 },
273 { 393000000, 0x64 },
274 { 394000000, 0x65 },
275 { 396000000, 0x66 },
276 { 397000000, 0x67 },
277 { 398000000, 0x68 },
278 { 400000000, 0x69 },
279 { 402000000, 0x6A },
280 { 403000000, 0x6B },
281 { 407000000, 0x6C },
282 { 408000000, 0x6D },
283 { 409000000, 0x6E },
284 { 410000000, 0x6F },
285 { 411000000, 0x70 },
286 { 412000000, 0x71 },
287 { 413000000, 0x72 },
288 { 414000000, 0x73 },
289 { 417000000, 0x74 },
290 { 418000000, 0x75 },
291 { 420000000, 0x76 },
292 { 422000000, 0x77 },
293 { 423000000, 0x78 },
294 { 424000000, 0x79 },
295 { 427000000, 0x7A },
296 { 428000000, 0x7B },
297 { 429000000, 0x7D },
298 { 432000000, 0x7F },
299 { 434000000, 0x80 },
300 { 435000000, 0x81 },
301 { 436000000, 0x83 },
302 { 437000000, 0x84 },
303 { 438000000, 0x85 },
304 { 439000000, 0x86 },
305 { 440000000, 0x87 },
306 { 441000000, 0x88 },
307 { 442000000, 0x89 },
308 { 445000000, 0x8A },
309 { 446000000, 0x8B },
310 { 447000000, 0x8C },
311 { 448000000, 0x8E },
312 { 449000000, 0x8F },
313 { 450000000, 0x90 },
314 { 452000000, 0x91 },
315 { 453000000, 0x93 },
316 { 454000000, 0x94 },
317 { 456000000, 0x96 },
318 { 457800000, 0x98 },
319 { 461000000, 0x11 },
320 { 468000000, 0x12 },
321 { 472000000, 0x13 },
322 { 473000000, 0x14 },
323 { 474000000, 0x15 },
324 { 481000000, 0x16 },
325 { 486000000, 0x17 },
326 { 491000000, 0x18 },
327 { 498000000, 0x19 },
328 { 499000000, 0x1A },
329 { 501000000, 0x1B },
330 { 506000000, 0x1C },
331 { 511000000, 0x1D },
332 { 516000000, 0x1E },
333 { 520000000, 0x1F },
334 { 521000000, 0x20 },
335 { 525000000, 0x21 },
336 { 529000000, 0x22 },
337 { 533000000, 0x23 },
338 { 539000000, 0x24 },
339 { 541000000, 0x25 },
340 { 547000000, 0x26 },
341 { 549000000, 0x27 },
342 { 551000000, 0x28 },
343 { 556000000, 0x29 },
344 { 561000000, 0x2A },
345 { 563000000, 0x2B },
346 { 565000000, 0x2C },
347 { 569000000, 0x2D },
348 { 571000000, 0x2E },
349 { 577000000, 0x2F },
350 { 580000000, 0x30 },
351 { 582000000, 0x31 },
352 { 584000000, 0x32 },
353 { 588000000, 0x33 },
354 { 591000000, 0x34 },
355 { 596000000, 0x35 },
356 { 598000000, 0x36 },
357 { 603000000, 0x37 },
358 { 604000000, 0x38 },
359 { 606000000, 0x39 },
360 { 612000000, 0x3A },
361 { 615000000, 0x3B },
362 { 617000000, 0x3C },
363 { 621000000, 0x3D },
364 { 622000000, 0x3E },
365 { 625000000, 0x3F },
366 { 632000000, 0x40 },
367 { 633000000, 0x41 },
368 { 634000000, 0x42 },
369 { 642000000, 0x43 },
370 { 643000000, 0x44 },
371 { 647000000, 0x45 },
372 { 650000000, 0x46 },
373 { 652000000, 0x47 },
374 { 657000000, 0x48 },
375 { 661000000, 0x49 },
376 { 662000000, 0x4A },
377 { 665000000, 0x4B },
378 { 667000000, 0x4C },
379 { 670000000, 0x4D },
380 { 673000000, 0x4E },
381 { 676000000, 0x4F },
382 { 677000000, 0x50 },
383 { 681000000, 0x51 },
384 { 683000000, 0x52 },
385 { 686000000, 0x53 },
386 { 688000000, 0x54 },
387 { 689000000, 0x55 },
388 { 691000000, 0x56 },
389 { 695000000, 0x57 },
390 { 698000000, 0x58 },
391 { 703000000, 0x59 },
392 { 704000000, 0x5A },
393 { 705000000, 0x5B },
394 { 707000000, 0x5C },
395 { 710000000, 0x5D },
396 { 712000000, 0x5E },
397 { 717000000, 0x5F },
398 { 718000000, 0x60 },
399 { 721000000, 0x61 },
400 { 722000000, 0x62 },
401 { 723000000, 0x63 },
402 { 725000000, 0x64 },
403 { 727000000, 0x65 },
404 { 730000000, 0x66 },
405 { 732000000, 0x67 },
406 { 735000000, 0x68 },
407 { 740000000, 0x69 },
408 { 741000000, 0x6A },
409 { 742000000, 0x6B },
410 { 743000000, 0x6C },
411 { 745000000, 0x6D },
412 { 747000000, 0x6E },
413 { 748000000, 0x6F },
414 { 750000000, 0x70 },
415 { 752000000, 0x71 },
416 { 754000000, 0x72 },
417 { 757000000, 0x73 },
418 { 758000000, 0x74 },
419 { 760000000, 0x75 },
420 { 763000000, 0x76 },
421 { 764000000, 0x77 },
422 { 766000000, 0x78 },
423 { 767000000, 0x79 },
424 { 768000000, 0x7A },
425 { 773000000, 0x7B },
426 { 774000000, 0x7C },
427 { 776000000, 0x7D },
428 { 777000000, 0x7E },
429 { 778000000, 0x7F },
430 { 779000000, 0x80 },
431 { 781000000, 0x81 },
432 { 783000000, 0x82 },
433 { 784000000, 0x83 },
434 { 785000000, 0x84 },
435 { 786000000, 0x85 },
436 { 793000000, 0x86 },
437 { 794000000, 0x87 },
438 { 795000000, 0x88 },
439 { 797000000, 0x89 },
440 { 799000000, 0x8A },
441 { 801000000, 0x8B },
442 { 802000000, 0x8C },
443 { 803000000, 0x8D },
444 { 804000000, 0x8E },
445 { 810000000, 0x90 },
446 { 811000000, 0x91 },
447 { 812000000, 0x92 },
448 { 814000000, 0x93 },
449 { 816000000, 0x94 },
450 { 817000000, 0x96 },
451 { 818000000, 0x97 },
452 { 820000000, 0x98 },
453 { 821000000, 0x99 },
454 { 822000000, 0x9A },
455 { 828000000, 0x9B },
456 { 829000000, 0x9D },
457 { 830000000, 0x9F },
458 { 831000000, 0xA0 },
459 { 833000000, 0xA1 },
460 { 835000000, 0xA2 },
461 { 836000000, 0xA3 },
462 { 837000000, 0xA4 },
463 { 838000000, 0xA6 },
464 { 840000000, 0xA8 },
465 { 842000000, 0xA9 },
466 { 845000000, 0xAA },
467 { 846000000, 0xAB },
468 { 847000000, 0xAD },
469 { 848000000, 0xAE },
470 { 852000000, 0xAF },
471 { 853000000, 0xB0 },
472 { 858000000, 0xB1 },
473 { 860000000, 0xB2 },
474 { 861000000, 0xB3 },
475 { 862000000, 0xB4 },
476 { 863000000, 0xB6 },
477 { 864000000, 0xB8 },
478 { 865000000, 0xB9 },
479 { 0, 0x00 }, /* Table End */
480};
481
482
483static struct SMap2 m_KM_Map[] = {
484 { 47900000, 3, 2 },
485 { 61100000, 3, 1 },
486 { 350000000, 3, 0 },
487 { 720000000, 2, 1 },
488 { 865000000, 3, 3 },
489 { 0, 0x00 }, /* Table End */
490};
491
492static struct SMap2 m_Main_PLL_Map[] = {
493 { 33125000, 0x57, 0xF0 },
494 { 35500000, 0x56, 0xE0 },
495 { 38188000, 0x55, 0xD0 },
496 { 41375000, 0x54, 0xC0 },
497 { 45125000, 0x53, 0xB0 },
498 { 49688000, 0x52, 0xA0 },
499 { 55188000, 0x51, 0x90 },
500 { 62125000, 0x50, 0x80 },
501 { 66250000, 0x47, 0x78 },
502 { 71000000, 0x46, 0x70 },
503 { 76375000, 0x45, 0x68 },
504 { 82750000, 0x44, 0x60 },
505 { 90250000, 0x43, 0x58 },
506 { 99375000, 0x42, 0x50 },
507 { 110375000, 0x41, 0x48 },
508 { 124250000, 0x40, 0x40 },
509 { 132500000, 0x37, 0x3C },
510 { 142000000, 0x36, 0x38 },
511 { 152750000, 0x35, 0x34 },
512 { 165500000, 0x34, 0x30 },
513 { 180500000, 0x33, 0x2C },
514 { 198750000, 0x32, 0x28 },
515 { 220750000, 0x31, 0x24 },
516 { 248500000, 0x30, 0x20 },
517 { 265000000, 0x27, 0x1E },
518 { 284000000, 0x26, 0x1C },
519 { 305500000, 0x25, 0x1A },
520 { 331000000, 0x24, 0x18 },
521 { 361000000, 0x23, 0x16 },
522 { 397500000, 0x22, 0x14 },
523 { 441500000, 0x21, 0x12 },
524 { 497000000, 0x20, 0x10 },
525 { 530000000, 0x17, 0x0F },
526 { 568000000, 0x16, 0x0E },
527 { 611000000, 0x15, 0x0D },
528 { 662000000, 0x14, 0x0C },
529 { 722000000, 0x13, 0x0B },
530 { 795000000, 0x12, 0x0A },
531 { 883000000, 0x11, 0x09 },
532 { 994000000, 0x10, 0x08 },
533 { 0, 0x00, 0x00 }, /* Table End */
534};
535
536static struct SMap2 m_Cal_PLL_Map[] = {
537 { 33813000, 0xDD, 0xD0 },
538 { 36625000, 0xDC, 0xC0 },
539 { 39938000, 0xDB, 0xB0 },
540 { 43938000, 0xDA, 0xA0 },
541 { 48813000, 0xD9, 0x90 },
542 { 54938000, 0xD8, 0x80 },
543 { 62813000, 0xD3, 0x70 },
544 { 67625000, 0xCD, 0x68 },
545 { 73250000, 0xCC, 0x60 },
546 { 79875000, 0xCB, 0x58 },
547 { 87875000, 0xCA, 0x50 },
548 { 97625000, 0xC9, 0x48 },
549 { 109875000, 0xC8, 0x40 },
550 { 125625000, 0xC3, 0x38 },
551 { 135250000, 0xBD, 0x34 },
552 { 146500000, 0xBC, 0x30 },
553 { 159750000, 0xBB, 0x2C },
554 { 175750000, 0xBA, 0x28 },
555 { 195250000, 0xB9, 0x24 },
556 { 219750000, 0xB8, 0x20 },
557 { 251250000, 0xB3, 0x1C },
558 { 270500000, 0xAD, 0x1A },
559 { 293000000, 0xAC, 0x18 },
560 { 319500000, 0xAB, 0x16 },
561 { 351500000, 0xAA, 0x14 },
562 { 390500000, 0xA9, 0x12 },
563 { 439500000, 0xA8, 0x10 },
564 { 502500000, 0xA3, 0x0E },
565 { 541000000, 0x9D, 0x0D },
566 { 586000000, 0x9C, 0x0C },
567 { 639000000, 0x9B, 0x0B },
568 { 703000000, 0x9A, 0x0A },
569 { 781000000, 0x99, 0x09 },
570 { 879000000, 0x98, 0x08 },
571 { 0, 0x00, 0x00 }, /* Table End */
572};
573
574static struct SMap m_GainTaper_Map[] = {
575 { 45400000, 0x1F },
576 { 45800000, 0x1E },
577 { 46200000, 0x1D },
578 { 46700000, 0x1C },
579 { 47100000, 0x1B },
580 { 47500000, 0x1A },
581 { 47900000, 0x19 },
582 { 49600000, 0x17 },
583 { 51200000, 0x16 },
584 { 52900000, 0x15 },
585 { 54500000, 0x14 },
586 { 56200000, 0x13 },
587 { 57800000, 0x12 },
588 { 59500000, 0x11 },
589 { 61100000, 0x10 },
590 { 67600000, 0x0D },
591 { 74200000, 0x0C },
592 { 80700000, 0x0B },
593 { 87200000, 0x0A },
594 { 93800000, 0x09 },
595 { 100300000, 0x08 },
596 { 106900000, 0x07 },
597 { 113400000, 0x06 },
598 { 119900000, 0x05 },
599 { 126500000, 0x04 },
600 { 133000000, 0x03 },
601 { 139500000, 0x02 },
602 { 146100000, 0x01 },
603 { 152600000, 0x00 },
604 { 154300000, 0x1F },
605 { 156100000, 0x1E },
606 { 157800000, 0x1D },
607 { 159500000, 0x1C },
608 { 161200000, 0x1B },
609 { 163000000, 0x1A },
610 { 164700000, 0x19 },
611 { 170200000, 0x17 },
612 { 175800000, 0x16 },
613 { 181300000, 0x15 },
614 { 186900000, 0x14 },
615 { 192400000, 0x13 },
616 { 198000000, 0x12 },
617 { 203500000, 0x11 },
618 { 216200000, 0x14 },
619 { 228900000, 0x13 },
620 { 241600000, 0x12 },
621 { 254400000, 0x11 },
622 { 267100000, 0x10 },
623 { 279800000, 0x0F },
624 { 292500000, 0x0E },
625 { 305200000, 0x0D },
626 { 317900000, 0x0C },
627 { 330700000, 0x0B },
628 { 343400000, 0x0A },
629 { 356100000, 0x09 },
630 { 368800000, 0x08 },
631 { 381500000, 0x07 },
632 { 394200000, 0x06 },
633 { 406900000, 0x05 },
634 { 419700000, 0x04 },
635 { 432400000, 0x03 },
636 { 445100000, 0x02 },
637 { 457800000, 0x01 },
638 { 476300000, 0x19 },
639 { 494800000, 0x18 },
640 { 513300000, 0x17 },
641 { 531800000, 0x16 },
642 { 550300000, 0x15 },
643 { 568900000, 0x14 },
644 { 587400000, 0x13 },
645 { 605900000, 0x12 },
646 { 624400000, 0x11 },
647 { 642900000, 0x10 },
648 { 661400000, 0x0F },
649 { 679900000, 0x0E },
650 { 698400000, 0x0D },
651 { 716900000, 0x0C },
652 { 735400000, 0x0B },
653 { 753900000, 0x0A },
654 { 772500000, 0x09 },
655 { 791000000, 0x08 },
656 { 809500000, 0x07 },
657 { 828000000, 0x06 },
658 { 846500000, 0x05 },
659 { 865000000, 0x04 },
660 { 0, 0x00 }, /* Table End */
661};
662
663static struct SMap m_RF_Cal_DC_Over_DT_Map[] = {
664 { 47900000, 0x00 },
665 { 55000000, 0x00 },
666 { 61100000, 0x0A },
667 { 64000000, 0x0A },
668 { 82000000, 0x14 },
669 { 84000000, 0x19 },
670 { 119000000, 0x1C },
671 { 124000000, 0x20 },
672 { 129000000, 0x2A },
673 { 134000000, 0x32 },
674 { 139000000, 0x39 },
675 { 144000000, 0x3E },
676 { 149000000, 0x3F },
677 { 152600000, 0x40 },
678 { 154000000, 0x40 },
679 { 164700000, 0x41 },
680 { 203500000, 0x32 },
681 { 353000000, 0x19 },
682 { 356000000, 0x1A },
683 { 359000000, 0x1B },
684 { 363000000, 0x1C },
685 { 366000000, 0x1D },
686 { 369000000, 0x1E },
687 { 373000000, 0x1F },
688 { 376000000, 0x20 },
689 { 379000000, 0x21 },
690 { 383000000, 0x22 },
691 { 386000000, 0x23 },
692 { 389000000, 0x24 },
693 { 393000000, 0x25 },
694 { 396000000, 0x26 },
695 { 399000000, 0x27 },
696 { 402000000, 0x28 },
697 { 404000000, 0x29 },
698 { 407000000, 0x2A },
699 { 409000000, 0x2B },
700 { 412000000, 0x2C },
701 { 414000000, 0x2D },
702 { 417000000, 0x2E },
703 { 419000000, 0x2F },
704 { 422000000, 0x30 },
705 { 424000000, 0x31 },
706 { 427000000, 0x32 },
707 { 429000000, 0x33 },
708 { 432000000, 0x34 },
709 { 434000000, 0x35 },
710 { 437000000, 0x36 },
711 { 439000000, 0x37 },
712 { 442000000, 0x38 },
713 { 444000000, 0x39 },
714 { 447000000, 0x3A },
715 { 449000000, 0x3B },
716 { 457800000, 0x3C },
717 { 465000000, 0x0F },
718 { 477000000, 0x12 },
719 { 483000000, 0x14 },
720 { 502000000, 0x19 },
721 { 508000000, 0x1B },
722 { 519000000, 0x1C },
723 { 522000000, 0x1D },
724 { 524000000, 0x1E },
725 { 534000000, 0x1F },
726 { 549000000, 0x20 },
727 { 554000000, 0x22 },
728 { 584000000, 0x24 },
729 { 589000000, 0x26 },
730 { 658000000, 0x27 },
731 { 664000000, 0x2C },
732 { 669000000, 0x2D },
733 { 699000000, 0x2E },
734 { 704000000, 0x30 },
735 { 709000000, 0x31 },
736 { 714000000, 0x32 },
737 { 724000000, 0x33 },
738 { 729000000, 0x36 },
739 { 739000000, 0x38 },
740 { 744000000, 0x39 },
741 { 749000000, 0x3B },
742 { 754000000, 0x3C },
743 { 759000000, 0x3D },
744 { 764000000, 0x3E },
745 { 769000000, 0x3F },
746 { 774000000, 0x40 },
747 { 779000000, 0x41 },
748 { 784000000, 0x43 },
749 { 789000000, 0x46 },
750 { 794000000, 0x48 },
751 { 799000000, 0x4B },
752 { 804000000, 0x4F },
753 { 809000000, 0x54 },
754 { 814000000, 0x59 },
755 { 819000000, 0x5D },
756 { 824000000, 0x61 },
757 { 829000000, 0x68 },
758 { 834000000, 0x6E },
759 { 839000000, 0x75 },
760 { 844000000, 0x7E },
761 { 849000000, 0x82 },
762 { 854000000, 0x84 },
763 { 859000000, 0x8F },
764 { 865000000, 0x9A },
765 { 0, 0x00 }, /* Table End */
766};
767
768
769static struct SMap m_IR_Meas_Map[] = {
770 { 200000000, 0x05 },
771 { 400000000, 0x06 },
772 { 865000000, 0x07 },
773 { 0, 0x00 }, /* Table End */
774};
775
776static struct SMap2 m_CID_Target_Map[] = {
777 { 46000000, 0x04, 18 },
778 { 52200000, 0x0A, 15 },
779 { 70100000, 0x01, 40 },
780 { 136800000, 0x18, 40 },
781 { 156700000, 0x18, 40 },
782 { 186250000, 0x0A, 40 },
783 { 230000000, 0x0A, 40 },
784 { 345000000, 0x18, 40 },
785 { 426000000, 0x0E, 40 },
786 { 489500000, 0x1E, 40 },
787 { 697500000, 0x32, 40 },
788 { 842000000, 0x3A, 40 },
789 { 0, 0x00, 0 }, /* Table End */
790};
791
792static struct SRFBandMap m_RF_Band_Map[7] = {
793 { 47900000, 46000000, 0, 0},
794 { 61100000, 52200000, 0, 0},
795 { 152600000, 70100000, 136800000, 0},
796 { 164700000, 156700000, 0, 0},
797 { 203500000, 186250000, 0, 0},
798 { 457800000, 230000000, 345000000, 426000000},
799 { 865000000, 489500000, 697500000, 842000000},
800};
801
802u8 m_Thermometer_Map_1[16] = {
803 60, 62, 66, 64,
804 74, 72, 68, 70,
805 90, 88, 84, 86,
806 76, 78, 82, 80,
807};
808
809u8 m_Thermometer_Map_2[16] = {
810 92, 94, 98, 96,
811 106, 104, 100, 102,
812 122, 120, 116, 118,
813 108, 110, 114, 112,
814};
diff --git a/drivers/media/dvb/ngene/Kconfig b/drivers/media/dvb/ngene/Kconfig
index cec242b7c00d..64c84702ba5c 100644
--- a/drivers/media/dvb/ngene/Kconfig
+++ b/drivers/media/dvb/ngene/Kconfig
@@ -5,6 +5,8 @@ config DVB_NGENE
5 select DVB_STV6110x if !DVB_FE_CUSTOMISE 5 select DVB_STV6110x if !DVB_FE_CUSTOMISE
6 select DVB_STV090x if !DVB_FE_CUSTOMISE 6 select DVB_STV090x if !DVB_FE_CUSTOMISE
7 select DVB_LGDT330X if !DVB_FE_CUSTOMISE 7 select DVB_LGDT330X if !DVB_FE_CUSTOMISE
8 select DVB_DRXK if !DVB_FE_CUSTOMISE
9 select DVB_TDA18271C2DD if !DVB_FE_CUSTOMISE
8 select MEDIA_TUNER_MT2131 if !MEDIA_TUNER_CUSTOMISE 10 select MEDIA_TUNER_MT2131 if !MEDIA_TUNER_CUSTOMISE
9 ---help--- 11 ---help---
10 Support for Micronas PCI express cards with nGene bridge. 12 Support for Micronas PCI express cards with nGene bridge.
diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
index fcf4be901ec8..056419228363 100644
--- a/drivers/media/dvb/ngene/ngene-cards.c
+++ b/drivers/media/dvb/ngene/ngene-cards.c
@@ -40,6 +40,8 @@
40#include "lnbh24.h" 40#include "lnbh24.h"
41#include "lgdt330x.h" 41#include "lgdt330x.h"
42#include "mt2131.h" 42#include "mt2131.h"
43#include "tda18271c2dd.h"
44#include "drxk.h"
43 45
44 46
45/****************************************************************************/ 47/****************************************************************************/
@@ -83,6 +85,49 @@ static int tuner_attach_stv6110(struct ngene_channel *chan)
83} 85}
84 86
85 87
88static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
89{
90 struct ngene_channel *chan = fe->sec_priv;
91 int status;
92
93 if (enable) {
94 down(&chan->dev->pll_mutex);
95 status = chan->gate_ctrl(fe, 1);
96 } else {
97 status = chan->gate_ctrl(fe, 0);
98 up(&chan->dev->pll_mutex);
99 }
100 return status;
101}
102
103static int tuner_attach_tda18271(struct ngene_channel *chan)
104{
105 struct i2c_adapter *i2c;
106 struct dvb_frontend *fe;
107
108 i2c = &chan->dev->channel[0].i2c_adapter;
109 if (chan->fe->ops.i2c_gate_ctrl)
110 chan->fe->ops.i2c_gate_ctrl(chan->fe, 1);
111 fe = dvb_attach(tda18271c2dd_attach, chan->fe, i2c, 0x60);
112 if (chan->fe->ops.i2c_gate_ctrl)
113 chan->fe->ops.i2c_gate_ctrl(chan->fe, 0);
114 if (!fe) {
115 printk(KERN_ERR "No TDA18271 found!\n");
116 return -ENODEV;
117 }
118
119 return 0;
120}
121
122static int tuner_attach_probe(struct ngene_channel *chan)
123{
124 if (chan->demod_type == 0)
125 return tuner_attach_stv6110(chan);
126 if (chan->demod_type == 1)
127 return tuner_attach_tda18271(chan);
128 return -EINVAL;
129}
130
86static int demod_attach_stv0900(struct ngene_channel *chan) 131static int demod_attach_stv0900(struct ngene_channel *chan)
87{ 132{
88 struct i2c_adapter *i2c; 133 struct i2c_adapter *i2c;
@@ -130,6 +175,60 @@ static void cineS2_tuner_i2c_lock(struct dvb_frontend *fe, int lock)
130 up(&chan->dev->pll_mutex); 175 up(&chan->dev->pll_mutex);
131} 176}
132 177
178static int i2c_read(struct i2c_adapter *adapter, u8 adr, u8 *val)
179{
180 struct i2c_msg msgs[1] = {{.addr = adr, .flags = I2C_M_RD,
181 .buf = val, .len = 1 } };
182 return (i2c_transfer(adapter, msgs, 1) == 1) ? 0 : -1;
183}
184
185static int i2c_read_reg16(struct i2c_adapter *adapter, u8 adr,
186 u16 reg, u8 *val)
187{
188 u8 msg[2] = {reg>>8, reg&0xff};
189 struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
190 .buf = msg, .len = 2},
191 {.addr = adr, .flags = I2C_M_RD,
192 .buf = val, .len = 1} };
193 return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1;
194}
195
196static int port_has_stv0900(struct i2c_adapter *i2c, int port)
197{
198 u8 val;
199 if (i2c_read_reg16(i2c, 0x68+port/2, 0xf100, &val) < 0)
200 return 0;
201 return 1;
202}
203
204static int port_has_drxk(struct i2c_adapter *i2c, int port)
205{
206 u8 val;
207
208 if (i2c_read(i2c, 0x29+port, &val) < 0)
209 return 0;
210 return 1;
211}
212
213static int demod_attach_drxk(struct ngene_channel *chan,
214 struct i2c_adapter *i2c)
215{
216 struct drxk_config config;
217
218 memset(&config, 0, sizeof(config));
219 config.adr = 0x29 + (chan->number ^ 2);
220
221 chan->fe = dvb_attach(drxk_attach, &config, i2c, &chan->fe2);
222 if (!chan->fe) {
223 printk(KERN_ERR "No DRXK found!\n");
224 return -ENODEV;
225 }
226 chan->fe->sec_priv = chan;
227 chan->gate_ctrl = chan->fe->ops.i2c_gate_ctrl;
228 chan->fe->ops.i2c_gate_ctrl = drxk_gate_ctrl;
229 return 0;
230}
231
133static int cineS2_probe(struct ngene_channel *chan) 232static int cineS2_probe(struct ngene_channel *chan)
134{ 233{
135 struct i2c_adapter *i2c; 234 struct i2c_adapter *i2c;
@@ -144,43 +243,42 @@ static int cineS2_probe(struct ngene_channel *chan)
144 else 243 else
145 i2c = &chan->dev->channel[1].i2c_adapter; 244 i2c = &chan->dev->channel[1].i2c_adapter;
146 245
147 fe_conf = chan->dev->card_info->fe_config[chan->number]; 246 if (port_has_stv0900(i2c, chan->number)) {
148 i2c_msg.addr = fe_conf->address; 247 chan->demod_type = 0;
149 248 fe_conf = chan->dev->card_info->fe_config[chan->number];
150 /* probe demod */ 249 /* demod found, attach it */
151 i2c_msg.len = 2; 250 rc = demod_attach_stv0900(chan);
152 buf[0] = 0xf1; 251 if (rc < 0 || chan->number < 2)
153 buf[1] = 0x00; 252 return rc;
154 rc = i2c_transfer(i2c, &i2c_msg, 1); 253
155 if (rc != 1) 254 /* demod #2: reprogram outputs DPN1 & DPN2 */
156 return -ENODEV; 255 i2c_msg.addr = fe_conf->address;
157 256 i2c_msg.len = 3;
158 /* demod found, attach it */ 257 buf[0] = 0xf1;
159 rc = demod_attach_stv0900(chan); 258 switch (chan->number) {
160 if (rc < 0 || chan->number < 2) 259 case 2:
161 return rc; 260 buf[1] = 0x5c;
162 261 buf[2] = 0xc2;
163 /* demod #2: reprogram outputs DPN1 & DPN2 */ 262 break;
164 i2c_msg.len = 3; 263 case 3:
165 buf[0] = 0xf1; 264 buf[1] = 0x61;
166 switch (chan->number) { 265 buf[2] = 0xcc;
167 case 2: 266 break;
168 buf[1] = 0x5c; 267 default:
169 buf[2] = 0xc2; 268 return -ENODEV;
170 break; 269 }
171 case 3: 270 rc = i2c_transfer(i2c, &i2c_msg, 1);
172 buf[1] = 0x61; 271 if (rc != 1) {
173 buf[2] = 0xcc; 272 printk(KERN_ERR DEVICE_NAME ": could not setup DPNx\n");
174 break; 273 return -EIO;
175 default: 274 }
275 } else if (port_has_drxk(i2c, chan->number^2)) {
276 chan->demod_type = 1;
277 demod_attach_drxk(chan, i2c);
278 } else {
279 printk(KERN_ERR "No demod found on chan %d\n", chan->number);
176 return -ENODEV; 280 return -ENODEV;
177 } 281 }
178 rc = i2c_transfer(i2c, &i2c_msg, 1);
179 if (rc != 1) {
180 printk(KERN_ERR DEVICE_NAME ": could not setup DPNx\n");
181 return -EIO;
182 }
183
184 return 0; 282 return 0;
185} 283}
186 284
@@ -306,7 +404,7 @@ static struct ngene_info ngene_info_satixS2v2 = {
306 .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, 404 .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN,
307 NGENE_IO_TSOUT}, 405 NGENE_IO_TSOUT},
308 .demod_attach = {demod_attach_stv0900, demod_attach_stv0900, cineS2_probe, cineS2_probe}, 406 .demod_attach = {demod_attach_stv0900, demod_attach_stv0900, cineS2_probe, cineS2_probe},
309 .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_stv6110}, 407 .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_probe, tuner_attach_probe},
310 .fe_config = {&fe_cineS2, &fe_cineS2, &fe_cineS2_2, &fe_cineS2_2}, 408 .fe_config = {&fe_cineS2, &fe_cineS2, &fe_cineS2_2, &fe_cineS2_2},
311 .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1, &tuner_cineS2_0, &tuner_cineS2_1}, 409 .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1, &tuner_cineS2_0, &tuner_cineS2_1},
312 .lnb = {0x0a, 0x08, 0x0b, 0x09}, 410 .lnb = {0x0a, 0x08, 0x0b, 0x09},
@@ -321,7 +419,7 @@ static struct ngene_info ngene_info_cineS2v5 = {
321 .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, 419 .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN,
322 NGENE_IO_TSOUT}, 420 NGENE_IO_TSOUT},
323 .demod_attach = {demod_attach_stv0900, demod_attach_stv0900, cineS2_probe, cineS2_probe}, 421 .demod_attach = {demod_attach_stv0900, demod_attach_stv0900, cineS2_probe, cineS2_probe},
324 .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_stv6110}, 422 .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_probe, tuner_attach_probe},
325 .fe_config = {&fe_cineS2, &fe_cineS2, &fe_cineS2_2, &fe_cineS2_2}, 423 .fe_config = {&fe_cineS2, &fe_cineS2, &fe_cineS2_2, &fe_cineS2_2},
326 .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1, &tuner_cineS2_0, &tuner_cineS2_1}, 424 .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1, &tuner_cineS2_0, &tuner_cineS2_1},
327 .lnb = {0x0a, 0x08, 0x0b, 0x09}, 425 .lnb = {0x0a, 0x08, 0x0b, 0x09},
@@ -331,13 +429,13 @@ static struct ngene_info ngene_info_cineS2v5 = {
331}; 429};
332 430
333 431
334static struct ngene_info ngene_info_duoFlexS2 = { 432static struct ngene_info ngene_info_duoFlex = {
335 .type = NGENE_SIDEWINDER, 433 .type = NGENE_SIDEWINDER,
336 .name = "Digital Devices DuoFlex S2 miniPCIe", 434 .name = "Digital Devices DuoFlex PCIe or miniPCIe",
337 .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, 435 .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN,
338 NGENE_IO_TSOUT}, 436 NGENE_IO_TSOUT},
339 .demod_attach = {cineS2_probe, cineS2_probe, cineS2_probe, cineS2_probe}, 437 .demod_attach = {cineS2_probe, cineS2_probe, cineS2_probe, cineS2_probe},
340 .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_stv6110}, 438 .tuner_attach = {tuner_attach_probe, tuner_attach_probe, tuner_attach_probe, tuner_attach_probe},
341 .fe_config = {&fe_cineS2, &fe_cineS2, &fe_cineS2_2, &fe_cineS2_2}, 439 .fe_config = {&fe_cineS2, &fe_cineS2, &fe_cineS2_2, &fe_cineS2_2},
342 .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1, &tuner_cineS2_0, &tuner_cineS2_1}, 440 .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1, &tuner_cineS2_0, &tuner_cineS2_1},
343 .lnb = {0x0a, 0x08, 0x0b, 0x09}, 441 .lnb = {0x0a, 0x08, 0x0b, 0x09},
@@ -385,8 +483,8 @@ static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
385 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2), 483 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
386 NGENE_ID(0x18c3, 0xdb02, ngene_info_satixS2v2), 484 NGENE_ID(0x18c3, 0xdb02, ngene_info_satixS2v2),
387 NGENE_ID(0x18c3, 0xdd00, ngene_info_cineS2v5), 485 NGENE_ID(0x18c3, 0xdd00, ngene_info_cineS2v5),
388 NGENE_ID(0x18c3, 0xdd10, ngene_info_duoFlexS2), 486 NGENE_ID(0x18c3, 0xdd10, ngene_info_duoFlex),
389 NGENE_ID(0x18c3, 0xdd20, ngene_info_duoFlexS2), 487 NGENE_ID(0x18c3, 0xdd20, ngene_info_duoFlex),
390 NGENE_ID(0x1461, 0x062e, ngene_info_m780), 488 NGENE_ID(0x1461, 0x062e, ngene_info_m780),
391 {0} 489 {0}
392}; 490};
diff --git a/drivers/media/dvb/ngene/ngene-core.c b/drivers/media/dvb/ngene/ngene-core.c
index 6927c726ce35..f129a9303f80 100644
--- a/drivers/media/dvb/ngene/ngene-core.c
+++ b/drivers/media/dvb/ngene/ngene-core.c
@@ -41,7 +41,7 @@
41 41
42#include "ngene.h" 42#include "ngene.h"
43 43
44static int one_adapter = 1; 44static int one_adapter;
45module_param(one_adapter, int, 0444); 45module_param(one_adapter, int, 0444);
46MODULE_PARM_DESC(one_adapter, "Use only one adapter."); 46MODULE_PARM_DESC(one_adapter, "Use only one adapter.");
47 47
@@ -461,7 +461,7 @@ static u8 TSFeatureDecoderSetup[8 * 5] = {
461 0x42, 0x00, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, 461 0x42, 0x00, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00,
462 0x40, 0x06, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* DRXH */ 462 0x40, 0x06, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* DRXH */
463 0x71, 0x07, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* DRXHser */ 463 0x71, 0x07, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* DRXHser */
464 0x72, 0x06, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* S2ser */ 464 0x72, 0x00, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* S2ser */
465 0x40, 0x07, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* LGDT3303 */ 465 0x40, 0x07, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* LGDT3303 */
466}; 466};
467 467
@@ -507,7 +507,7 @@ void FillTSBuffer(void *Buffer, int Length, u32 Flags)
507{ 507{
508 u32 *ptr = Buffer; 508 u32 *ptr = Buffer;
509 509
510 memset(Buffer, 0xff, Length); 510 memset(Buffer, TS_FILLER, Length);
511 while (Length > 0) { 511 while (Length > 0) {
512 if (Flags & DF_SWAP32) 512 if (Flags & DF_SWAP32)
513 *ptr = 0x471FFF10; 513 *ptr = 0x471FFF10;
@@ -1443,6 +1443,9 @@ static void release_channel(struct ngene_channel *chan)
1443 chan->ci_dev = NULL; 1443 chan->ci_dev = NULL;
1444 } 1444 }
1445 1445
1446 if (chan->fe2)
1447 dvb_unregister_frontend(chan->fe2);
1448
1446 if (chan->fe) { 1449 if (chan->fe) {
1447 dvb_unregister_frontend(chan->fe); 1450 dvb_unregister_frontend(chan->fe);
1448 dvb_frontend_detach(chan->fe); 1451 dvb_frontend_detach(chan->fe);
@@ -1534,6 +1537,14 @@ static int init_channel(struct ngene_channel *chan)
1534 goto err; 1537 goto err;
1535 chan->has_demux = true; 1538 chan->has_demux = true;
1536 } 1539 }
1540 if (chan->fe2) {
1541 if (dvb_register_frontend(adapter, chan->fe2) < 0)
1542 goto err;
1543 chan->fe2->tuner_priv = chan->fe->tuner_priv;
1544 memcpy(&chan->fe2->ops.tuner_ops,
1545 &chan->fe->ops.tuner_ops,
1546 sizeof(struct dvb_tuner_ops));
1547 }
1537 1548
1538 if (chan->has_demux) { 1549 if (chan->has_demux) {
1539 ret = my_dvb_dmx_ts_card_init(dvbdemux, "SW demux", 1550 ret = my_dvb_dmx_ts_card_init(dvbdemux, "SW demux",
@@ -1571,11 +1582,18 @@ static int init_channels(struct ngene *dev)
1571 return 0; 1582 return 0;
1572} 1583}
1573 1584
1585static struct cxd2099_cfg cxd_cfg = {
1586 .bitrate = 62000,
1587 .adr = 0x40,
1588 .polarity = 0,
1589 .clock_mode = 0,
1590};
1591
1574static void cxd_attach(struct ngene *dev) 1592static void cxd_attach(struct ngene *dev)
1575{ 1593{
1576 struct ngene_ci *ci = &dev->ci; 1594 struct ngene_ci *ci = &dev->ci;
1577 1595
1578 ci->en = cxd2099_attach(0x40, dev, &dev->channel[0].i2c_adapter); 1596 ci->en = cxd2099_attach(&cxd_cfg, dev, &dev->channel[0].i2c_adapter);
1579 ci->dev = dev; 1597 ci->dev = dev;
1580 return; 1598 return;
1581} 1599}
diff --git a/drivers/media/dvb/ngene/ngene-dvb.c b/drivers/media/dvb/ngene/ngene-dvb.c
index 0b4943233166..fcb16a615aab 100644
--- a/drivers/media/dvb/ngene/ngene-dvb.c
+++ b/drivers/media/dvb/ngene/ngene-dvb.c
@@ -118,6 +118,16 @@ static void swap_buffer(u32 *p, u32 len)
118 } 118 }
119} 119}
120 120
121/* start of filler packet */
122static u8 fill_ts[] = { 0x47, 0x1f, 0xff, 0x10, TS_FILLER };
123
124/* #define DEBUG_CI_XFER */
125#ifdef DEBUG_CI_XFER
126static u32 ok;
127static u32 overflow;
128static u32 stripped;
129#endif
130
121void *tsin_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags) 131void *tsin_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags)
122{ 132{
123 struct ngene_channel *chan = priv; 133 struct ngene_channel *chan = priv;
@@ -126,21 +136,41 @@ void *tsin_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags)
126 136
127 if (flags & DF_SWAP32) 137 if (flags & DF_SWAP32)
128 swap_buffer(buf, len); 138 swap_buffer(buf, len);
139
129 if (dev->ci.en && chan->number == 2) { 140 if (dev->ci.en && chan->number == 2) {
130 if (dvb_ringbuffer_free(&dev->tsin_rbuf) > len) { 141 while (len >= 188) {
131 dvb_ringbuffer_write(&dev->tsin_rbuf, buf, len); 142 if (memcmp(buf, fill_ts, sizeof fill_ts) != 0) {
132 wake_up_interruptible(&dev->tsin_rbuf.queue); 143 if (dvb_ringbuffer_free(&dev->tsin_rbuf) >= 188) {
144 dvb_ringbuffer_write(&dev->tsin_rbuf, buf, 188);
145 wake_up(&dev->tsin_rbuf.queue);
146#ifdef DEBUG_CI_XFER
147 ok++;
148#endif
149 }
150#ifdef DEBUG_CI_XFER
151 else
152 overflow++;
153#endif
154 }
155#ifdef DEBUG_CI_XFER
156 else
157 stripped++;
158
159 if (ok % 100 == 0 && overflow)
160 printk(KERN_WARNING "%s: ok %u overflow %u dropped %u\n", __func__, ok, overflow, stripped);
161#endif
162 buf += 188;
163 len -= 188;
133 } 164 }
134 return 0; 165 return NULL;
135 } 166 }
136 if (chan->users > 0) { 167
168 if (chan->users > 0)
137 dvb_dmx_swfilter(&chan->demux, buf, len); 169 dvb_dmx_swfilter(&chan->demux, buf, len);
138 } 170
139 return NULL; 171 return NULL;
140} 172}
141 173
142u8 fill_ts[188] = { 0x47, 0x1f, 0xff, 0x10 };
143
144void *tsout_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags) 174void *tsout_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags)
145{ 175{
146 struct ngene_channel *chan = priv; 176 struct ngene_channel *chan = priv;
diff --git a/drivers/media/dvb/ngene/ngene.h b/drivers/media/dvb/ngene/ngene.h
index 40fce9e3ae66..5443dc0caea5 100644
--- a/drivers/media/dvb/ngene/ngene.h
+++ b/drivers/media/dvb/ngene/ngene.h
@@ -641,8 +641,11 @@ struct ngene_channel {
641 int mode; 641 int mode;
642 bool has_adapter; 642 bool has_adapter;
643 bool has_demux; 643 bool has_demux;
644 int demod_type;
645 int (*gate_ctrl)(struct dvb_frontend *, int);
644 646
645 struct dvb_frontend *fe; 647 struct dvb_frontend *fe;
648 struct dvb_frontend *fe2;
646 struct dmxdev dmxdev; 649 struct dmxdev dmxdev;
647 struct dvb_demux demux; 650 struct dvb_demux demux;
648 struct dvb_net dvbnet; 651 struct dvb_net dvbnet;
@@ -786,6 +789,8 @@ struct ngene {
786 u8 uart_rbuf[UART_RBUF_LEN]; 789 u8 uart_rbuf[UART_RBUF_LEN];
787 int uart_rp, uart_wp; 790 int uart_rp, uart_wp;
788 791
792#define TS_FILLER 0x6f
793
789 u8 *tsout_buf; 794 u8 *tsout_buf;
790#define TSOUT_BUF_SIZE (512*188*8) 795#define TSOUT_BUF_SIZE (512*188*8)
791 struct dvb_ringbuffer tsout_rbuf; 796 struct dvb_ringbuffer tsout_rbuf;
@@ -852,7 +857,7 @@ struct ngene_info {
852}; 857};
853 858
854#ifdef NGENE_V4L 859#ifdef NGENE_V4L
855struct ngene_format{ 860struct ngene_format {
856 char *name; 861 char *name;
857 int fourcc; /* video4linux 2 */ 862 int fourcc; /* video4linux 2 */
858 int btformat; /* BT848_COLOR_FMT_* */ 863 int btformat; /* BT848_COLOR_FMT_* */
diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c
index 78765ed28063..7331e8450d1a 100644
--- a/drivers/media/dvb/siano/smscoreapi.c
+++ b/drivers/media/dvb/siano/smscoreapi.c
@@ -1147,7 +1147,7 @@ static int smscore_validate_client(struct smscore_device_t *coredev,
1147 1147
1148 if (!client) { 1148 if (!client) {
1149 sms_err("bad parameter."); 1149 sms_err("bad parameter.");
1150 return -EFAULT; 1150 return -EINVAL;
1151 } 1151 }
1152 registered_client = smscore_find_client(coredev, data_type, id); 1152 registered_client = smscore_find_client(coredev, data_type, id);
1153 if (registered_client == client) 1153 if (registered_client == client)
diff --git a/drivers/media/dvb/siano/smscoreapi.h b/drivers/media/dvb/siano/smscoreapi.h
index 8ecadecaa9d0..c592ae090397 100644
--- a/drivers/media/dvb/siano/smscoreapi.h
+++ b/drivers/media/dvb/siano/smscoreapi.h
@@ -22,7 +22,6 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
22#ifndef __SMS_CORE_API_H__ 22#ifndef __SMS_CORE_API_H__
23#define __SMS_CORE_API_H__ 23#define __SMS_CORE_API_H__
24 24
25#include <linux/version.h>
26#include <linux/device.h> 25#include <linux/device.h>
27#include <linux/list.h> 26#include <linux/list.h>
28#include <linux/mm.h> 27#include <linux/mm.h>
diff --git a/drivers/media/radio/dsbr100.c b/drivers/media/radio/dsbr100.c
index 3d8cc425fa6b..25e58cbf35f0 100644
--- a/drivers/media/radio/dsbr100.c
+++ b/drivers/media/radio/dsbr100.c
@@ -102,10 +102,7 @@
102/* 102/*
103 * Version Information 103 * Version Information
104 */ 104 */
105#include <linux/version.h> /* for KERNEL_VERSION MACRO */ 105#define DRIVER_VERSION "0.4.7"
106
107#define DRIVER_VERSION "v0.46"
108#define RADIO_VERSION KERNEL_VERSION(0, 4, 6)
109 106
110#define DRIVER_AUTHOR "Markus Demleitner <msdemlei@tucana.harvard.edu>" 107#define DRIVER_AUTHOR "Markus Demleitner <msdemlei@tucana.harvard.edu>"
111#define DRIVER_DESC "D-Link DSB-R100 USB FM radio driver" 108#define DRIVER_DESC "D-Link DSB-R100 USB FM radio driver"
@@ -335,7 +332,6 @@ static int vidioc_querycap(struct file *file, void *priv,
335 strlcpy(v->driver, "dsbr100", sizeof(v->driver)); 332 strlcpy(v->driver, "dsbr100", sizeof(v->driver));
336 strlcpy(v->card, "D-Link R-100 USB FM Radio", sizeof(v->card)); 333 strlcpy(v->card, "D-Link R-100 USB FM Radio", sizeof(v->card));
337 usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info)); 334 usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
338 v->version = RADIO_VERSION;
339 v->capabilities = V4L2_CAP_TUNER; 335 v->capabilities = V4L2_CAP_TUNER;
340 return 0; 336 return 0;
341} 337}
@@ -647,3 +643,4 @@ module_exit (dsbr100_exit);
647MODULE_AUTHOR( DRIVER_AUTHOR ); 643MODULE_AUTHOR( DRIVER_AUTHOR );
648MODULE_DESCRIPTION( DRIVER_DESC ); 644MODULE_DESCRIPTION( DRIVER_DESC );
649MODULE_LICENSE("GPL"); 645MODULE_LICENSE("GPL");
646MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index 4ce10dbeadd8..1c3f8440a55c 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -33,7 +33,6 @@
33#include <linux/ioport.h> /* request_region */ 33#include <linux/ioport.h> /* request_region */
34#include <linux/delay.h> /* msleep */ 34#include <linux/delay.h> /* msleep */
35#include <linux/videodev2.h> /* kernel radio structs */ 35#include <linux/videodev2.h> /* kernel radio structs */
36#include <linux/version.h> /* for KERNEL_VERSION MACRO */
37#include <linux/io.h> /* outb, outb_p */ 36#include <linux/io.h> /* outb, outb_p */
38#include <media/v4l2-device.h> 37#include <media/v4l2-device.h>
39#include <media/v4l2-ioctl.h> 38#include <media/v4l2-ioctl.h>
@@ -41,6 +40,7 @@
41MODULE_AUTHOR("M.Kirkwood"); 40MODULE_AUTHOR("M.Kirkwood");
42MODULE_DESCRIPTION("A driver for the RadioTrack/RadioReveal radio card."); 41MODULE_DESCRIPTION("A driver for the RadioTrack/RadioReveal radio card.");
43MODULE_LICENSE("GPL"); 42MODULE_LICENSE("GPL");
43MODULE_VERSION("0.0.3");
44 44
45#ifndef CONFIG_RADIO_RTRACK_PORT 45#ifndef CONFIG_RADIO_RTRACK_PORT
46#define CONFIG_RADIO_RTRACK_PORT -1 46#define CONFIG_RADIO_RTRACK_PORT -1
@@ -53,8 +53,6 @@ module_param(io, int, 0);
53MODULE_PARM_DESC(io, "I/O address of the RadioTrack card (0x20f or 0x30f)"); 53MODULE_PARM_DESC(io, "I/O address of the RadioTrack card (0x20f or 0x30f)");
54module_param(radio_nr, int, 0); 54module_param(radio_nr, int, 0);
55 55
56#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
57
58struct rtrack 56struct rtrack
59{ 57{
60 struct v4l2_device v4l2_dev; 58 struct v4l2_device v4l2_dev;
@@ -223,7 +221,6 @@ static int vidioc_querycap(struct file *file, void *priv,
223 strlcpy(v->driver, "radio-aimslab", sizeof(v->driver)); 221 strlcpy(v->driver, "radio-aimslab", sizeof(v->driver));
224 strlcpy(v->card, "RadioTrack", sizeof(v->card)); 222 strlcpy(v->card, "RadioTrack", sizeof(v->card));
225 strlcpy(v->bus_info, "ISA", sizeof(v->bus_info)); 223 strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
226 v->version = RADIO_VERSION;
227 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO; 224 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
228 return 0; 225 return 0;
229} 226}
diff --git a/drivers/media/radio/radio-aztech.c b/drivers/media/radio/radio-aztech.c
index dd8a6ab0d437..eed7b0840734 100644
--- a/drivers/media/radio/radio-aztech.c
+++ b/drivers/media/radio/radio-aztech.c
@@ -30,7 +30,6 @@
30#include <linux/ioport.h> /* request_region */ 30#include <linux/ioport.h> /* request_region */
31#include <linux/delay.h> /* udelay */ 31#include <linux/delay.h> /* udelay */
32#include <linux/videodev2.h> /* kernel radio structs */ 32#include <linux/videodev2.h> /* kernel radio structs */
33#include <linux/version.h> /* for KERNEL_VERSION MACRO */
34#include <linux/io.h> /* outb, outb_p */ 33#include <linux/io.h> /* outb, outb_p */
35#include <media/v4l2-device.h> 34#include <media/v4l2-device.h>
36#include <media/v4l2-ioctl.h> 35#include <media/v4l2-ioctl.h>
@@ -38,6 +37,7 @@
38MODULE_AUTHOR("Russell Kroll, Quay Lu, Donald Song, Jason Lewis, Scott McGrath, William McGrath"); 37MODULE_AUTHOR("Russell Kroll, Quay Lu, Donald Song, Jason Lewis, Scott McGrath, William McGrath");
39MODULE_DESCRIPTION("A driver for the Aztech radio card."); 38MODULE_DESCRIPTION("A driver for the Aztech radio card.");
40MODULE_LICENSE("GPL"); 39MODULE_LICENSE("GPL");
40MODULE_VERSION("0.0.3");
41 41
42/* acceptable ports: 0x350 (JP3 shorted), 0x358 (JP3 open) */ 42/* acceptable ports: 0x350 (JP3 shorted), 0x358 (JP3 open) */
43 43
@@ -53,8 +53,6 @@ module_param(io, int, 0);
53module_param(radio_nr, int, 0); 53module_param(radio_nr, int, 0);
54MODULE_PARM_DESC(io, "I/O address of the Aztech card (0x350 or 0x358)"); 54MODULE_PARM_DESC(io, "I/O address of the Aztech card (0x350 or 0x358)");
55 55
56#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
57
58struct aztech 56struct aztech
59{ 57{
60 struct v4l2_device v4l2_dev; 58 struct v4l2_device v4l2_dev;
@@ -188,7 +186,6 @@ static int vidioc_querycap(struct file *file, void *priv,
188 strlcpy(v->driver, "radio-aztech", sizeof(v->driver)); 186 strlcpy(v->driver, "radio-aztech", sizeof(v->driver));
189 strlcpy(v->card, "Aztech Radio", sizeof(v->card)); 187 strlcpy(v->card, "Aztech Radio", sizeof(v->card));
190 strlcpy(v->bus_info, "ISA", sizeof(v->bus_info)); 188 strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
191 v->version = RADIO_VERSION;
192 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO; 189 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
193 return 0; 190 return 0;
194} 191}
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index bc9ad0897c55..16a089fad909 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -30,7 +30,6 @@
30 * Changed API to V4L2 30 * Changed API to V4L2
31 */ 31 */
32 32
33#include <linux/version.h>
34#include <linux/module.h> /* Modules */ 33#include <linux/module.h> /* Modules */
35#include <linux/init.h> /* Initdata */ 34#include <linux/init.h> /* Initdata */
36#include <linux/ioport.h> /* request_region */ 35#include <linux/ioport.h> /* request_region */
@@ -46,6 +45,7 @@
46MODULE_AUTHOR("Fred Gleason, Russell Kroll, Quay Lu, Donald Song, Jason Lewis, Scott McGrath, William McGrath"); 45MODULE_AUTHOR("Fred Gleason, Russell Kroll, Quay Lu, Donald Song, Jason Lewis, Scott McGrath, William McGrath");
47MODULE_DESCRIPTION("A driver for the ADS Cadet AM/FM/RDS radio card."); 46MODULE_DESCRIPTION("A driver for the ADS Cadet AM/FM/RDS radio card.");
48MODULE_LICENSE("GPL"); 47MODULE_LICENSE("GPL");
48MODULE_VERSION("0.3.4");
49 49
50static int io = -1; /* default to isapnp activation */ 50static int io = -1; /* default to isapnp activation */
51static int radio_nr = -1; 51static int radio_nr = -1;
@@ -54,8 +54,6 @@ module_param(io, int, 0);
54MODULE_PARM_DESC(io, "I/O address of Cadet card (0x330,0x332,0x334,0x336,0x338,0x33a,0x33c,0x33e)"); 54MODULE_PARM_DESC(io, "I/O address of Cadet card (0x330,0x332,0x334,0x336,0x338,0x33a,0x33c,0x33e)");
55module_param(radio_nr, int, 0); 55module_param(radio_nr, int, 0);
56 56
57#define CADET_VERSION KERNEL_VERSION(0, 3, 3)
58
59#define RDS_BUFFER 256 57#define RDS_BUFFER 256
60#define RDS_RX_FLAG 1 58#define RDS_RX_FLAG 1
61#define MBS_RX_FLAG 2 59#define MBS_RX_FLAG 2
@@ -361,7 +359,6 @@ static int vidioc_querycap(struct file *file, void *priv,
361 strlcpy(v->driver, "ADS Cadet", sizeof(v->driver)); 359 strlcpy(v->driver, "ADS Cadet", sizeof(v->driver));
362 strlcpy(v->card, "ADS Cadet", sizeof(v->card)); 360 strlcpy(v->card, "ADS Cadet", sizeof(v->card));
363 strlcpy(v->bus_info, "ISA", sizeof(v->bus_info)); 361 strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
364 v->version = CADET_VERSION;
365 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO | 362 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
366 V4L2_CAP_READWRITE | V4L2_CAP_RDS_CAPTURE; 363 V4L2_CAP_READWRITE | V4L2_CAP_RDS_CAPTURE;
367 return 0; 364 return 0;
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index 259936422e49..edadc8449a3d 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -21,21 +21,19 @@
21#include <linux/ioport.h> /* request_region */ 21#include <linux/ioport.h> /* request_region */
22#include <linux/delay.h> /* udelay */ 22#include <linux/delay.h> /* udelay */
23#include <linux/videodev2.h> /* kernel radio structs */ 23#include <linux/videodev2.h> /* kernel radio structs */
24#include <linux/version.h> /* for KERNEL_VERSION MACRO */
25#include <linux/mutex.h> 24#include <linux/mutex.h>
26#include <linux/io.h> /* outb, outb_p */ 25#include <linux/io.h> /* outb, outb_p */
27#include <media/v4l2-ioctl.h> 26#include <media/v4l2-ioctl.h>
28#include <media/v4l2-device.h> 27#include <media/v4l2-device.h>
29 28
30#define RADIO_VERSION KERNEL_VERSION(0, 0, 3)
31
32/* 29/*
33 * Module info. 30 * Module info.
34 */ 31 */
35 32
36MODULE_AUTHOR("Jonas Munsin, Pekka Seppänen <pexu@kapsi.fi>"); 33MODULE_AUTHOR("Jonas Munsin, Pekka Seppänen <pexu@kapsi.fi>");
37MODULE_DESCRIPTION("A driver for the GemTek Radio card."); 34MODULE_DESCRIPTION("A driver for the GemTek Radio card.");
38MODULE_LICENSE("GPL"); 35MODULE_LICENSE("GPL");
36MODULE_VERSION("0.0.4");
39 37
40/* 38/*
41 * Module params. 39 * Module params.
@@ -387,7 +385,6 @@ static int vidioc_querycap(struct file *file, void *priv,
387 strlcpy(v->driver, "radio-gemtek", sizeof(v->driver)); 385 strlcpy(v->driver, "radio-gemtek", sizeof(v->driver));
388 strlcpy(v->card, "GemTek", sizeof(v->card)); 386 strlcpy(v->card, "GemTek", sizeof(v->card));
389 strlcpy(v->bus_info, "ISA", sizeof(v->bus_info)); 387 strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
390 v->version = RADIO_VERSION;
391 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO; 388 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
392 return 0; 389 return 0;
393} 390}
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index e83e84003025..f872a54cf3d9 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -40,15 +40,18 @@
40#include <linux/mutex.h> 40#include <linux/mutex.h>
41#include <linux/pci.h> 41#include <linux/pci.h>
42#include <linux/videodev2.h> 42#include <linux/videodev2.h>
43#include <linux/version.h> /* for KERNEL_VERSION MACRO */
44#include <linux/io.h> 43#include <linux/io.h>
45#include <linux/slab.h> 44#include <linux/slab.h>
46#include <media/v4l2-device.h> 45#include <media/v4l2-device.h>
47#include <media/v4l2-ioctl.h> 46#include <media/v4l2-ioctl.h>
48 47
48#define DRIVER_VERSION "0.7.8"
49
50
49MODULE_AUTHOR("Dimitromanolakis Apostolos, apdim@grecian.net"); 51MODULE_AUTHOR("Dimitromanolakis Apostolos, apdim@grecian.net");
50MODULE_DESCRIPTION("Radio driver for the Guillemot Maxi Radio FM2000 radio."); 52MODULE_DESCRIPTION("Radio driver for the Guillemot Maxi Radio FM2000 radio.");
51MODULE_LICENSE("GPL"); 53MODULE_LICENSE("GPL");
54MODULE_VERSION(DRIVER_VERSION);
52 55
53static int radio_nr = -1; 56static int radio_nr = -1;
54module_param(radio_nr, int, 0); 57module_param(radio_nr, int, 0);
@@ -58,10 +61,6 @@ static int debug;
58module_param(debug, int, 0644); 61module_param(debug, int, 0644);
59MODULE_PARM_DESC(debug, "activates debug info"); 62MODULE_PARM_DESC(debug, "activates debug info");
60 63
61#define DRIVER_VERSION "0.77"
62
63#define RADIO_VERSION KERNEL_VERSION(0, 7, 7)
64
65#define dprintk(dev, num, fmt, arg...) \ 64#define dprintk(dev, num, fmt, arg...) \
66 v4l2_dbg(num, debug, &dev->v4l2_dev, fmt, ## arg) 65 v4l2_dbg(num, debug, &dev->v4l2_dev, fmt, ## arg)
67 66
@@ -195,7 +194,6 @@ static int vidioc_querycap(struct file *file, void *priv,
195 strlcpy(v->driver, "radio-maxiradio", sizeof(v->driver)); 194 strlcpy(v->driver, "radio-maxiradio", sizeof(v->driver));
196 strlcpy(v->card, "Maxi Radio FM2000 radio", sizeof(v->card)); 195 strlcpy(v->card, "Maxi Radio FM2000 radio", sizeof(v->card));
197 snprintf(v->bus_info, sizeof(v->bus_info), "PCI:%s", pci_name(dev->pdev)); 196 snprintf(v->bus_info, sizeof(v->bus_info), "PCI:%s", pci_name(dev->pdev));
198 v->version = RADIO_VERSION;
199 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO; 197 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
200 return 0; 198 return 0;
201} 199}
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index b3a635b95820..1742bd8110bd 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -63,18 +63,17 @@
63#include <media/v4l2-device.h> 63#include <media/v4l2-device.h>
64#include <media/v4l2-ioctl.h> 64#include <media/v4l2-ioctl.h>
65#include <linux/usb.h> 65#include <linux/usb.h>
66#include <linux/version.h> /* for KERNEL_VERSION MACRO */
67#include <linux/mutex.h> 66#include <linux/mutex.h>
68 67
69/* driver and module definitions */ 68/* driver and module definitions */
70#define DRIVER_AUTHOR "Alexey Klimov <klimov.linux@gmail.com>" 69#define DRIVER_AUTHOR "Alexey Klimov <klimov.linux@gmail.com>"
71#define DRIVER_DESC "AverMedia MR 800 USB FM radio driver" 70#define DRIVER_DESC "AverMedia MR 800 USB FM radio driver"
72#define DRIVER_VERSION "0.11" 71#define DRIVER_VERSION "0.1.2"
73#define RADIO_VERSION KERNEL_VERSION(0, 1, 1)
74 72
75MODULE_AUTHOR(DRIVER_AUTHOR); 73MODULE_AUTHOR(DRIVER_AUTHOR);
76MODULE_DESCRIPTION(DRIVER_DESC); 74MODULE_DESCRIPTION(DRIVER_DESC);
77MODULE_LICENSE("GPL"); 75MODULE_LICENSE("GPL");
76MODULE_VERSION(DRIVER_VERSION);
78 77
79#define USB_AMRADIO_VENDOR 0x07ca 78#define USB_AMRADIO_VENDOR 0x07ca
80#define USB_AMRADIO_PRODUCT 0xb800 79#define USB_AMRADIO_PRODUCT 0xb800
@@ -301,7 +300,6 @@ static int vidioc_querycap(struct file *file, void *priv,
301 strlcpy(v->driver, "radio-mr800", sizeof(v->driver)); 300 strlcpy(v->driver, "radio-mr800", sizeof(v->driver));
302 strlcpy(v->card, "AverMedia MR 800 USB FM Radio", sizeof(v->card)); 301 strlcpy(v->card, "AverMedia MR 800 USB FM Radio", sizeof(v->card));
303 usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info)); 302 usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
304 v->version = RADIO_VERSION;
305 v->capabilities = V4L2_CAP_TUNER; 303 v->capabilities = V4L2_CAP_TUNER;
306 return 0; 304 return 0;
307} 305}
diff --git a/drivers/media/radio/radio-rtrack2.c b/drivers/media/radio/radio-rtrack2.c
index 8d6ea591bd18..3628be617ee9 100644
--- a/drivers/media/radio/radio-rtrack2.c
+++ b/drivers/media/radio/radio-rtrack2.c
@@ -15,7 +15,6 @@
15#include <linux/delay.h> /* udelay */ 15#include <linux/delay.h> /* udelay */
16#include <linux/videodev2.h> /* kernel radio structs */ 16#include <linux/videodev2.h> /* kernel radio structs */
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18#include <linux/version.h> /* for KERNEL_VERSION MACRO */
19#include <linux/io.h> /* outb, outb_p */ 18#include <linux/io.h> /* outb, outb_p */
20#include <media/v4l2-device.h> 19#include <media/v4l2-device.h>
21#include <media/v4l2-ioctl.h> 20#include <media/v4l2-ioctl.h>
@@ -23,6 +22,7 @@
23MODULE_AUTHOR("Ben Pfaff"); 22MODULE_AUTHOR("Ben Pfaff");
24MODULE_DESCRIPTION("A driver for the RadioTrack II radio card."); 23MODULE_DESCRIPTION("A driver for the RadioTrack II radio card.");
25MODULE_LICENSE("GPL"); 24MODULE_LICENSE("GPL");
25MODULE_VERSION("0.0.3");
26 26
27#ifndef CONFIG_RADIO_RTRACK2_PORT 27#ifndef CONFIG_RADIO_RTRACK2_PORT
28#define CONFIG_RADIO_RTRACK2_PORT -1 28#define CONFIG_RADIO_RTRACK2_PORT -1
@@ -35,8 +35,6 @@ module_param(io, int, 0);
35MODULE_PARM_DESC(io, "I/O address of the RadioTrack card (0x20c or 0x30c)"); 35MODULE_PARM_DESC(io, "I/O address of the RadioTrack card (0x20c or 0x30c)");
36module_param(radio_nr, int, 0); 36module_param(radio_nr, int, 0);
37 37
38#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
39
40struct rtrack2 38struct rtrack2
41{ 39{
42 struct v4l2_device v4l2_dev; 40 struct v4l2_device v4l2_dev;
@@ -121,7 +119,6 @@ static int vidioc_querycap(struct file *file, void *priv,
121 strlcpy(v->driver, "radio-rtrack2", sizeof(v->driver)); 119 strlcpy(v->driver, "radio-rtrack2", sizeof(v->driver));
122 strlcpy(v->card, "RadioTrack II", sizeof(v->card)); 120 strlcpy(v->card, "RadioTrack II", sizeof(v->card));
123 strlcpy(v->bus_info, "ISA", sizeof(v->bus_info)); 121 strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
124 v->version = RADIO_VERSION;
125 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO; 122 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
126 return 0; 123 return 0;
127} 124}
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index b5a5f89e238a..22c5743bf9db 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -16,7 +16,6 @@
16 * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> 16 * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
17 */ 17 */
18 18
19#include <linux/version.h>
20#include <linux/kernel.h> /* __setup */ 19#include <linux/kernel.h> /* __setup */
21#include <linux/module.h> /* Modules */ 20#include <linux/module.h> /* Modules */
22#include <linux/init.h> /* Initdata */ 21#include <linux/init.h> /* Initdata */
@@ -32,6 +31,7 @@
32MODULE_AUTHOR("Petr Vandrovec, vandrove@vc.cvut.cz and M. Kirkwood"); 31MODULE_AUTHOR("Petr Vandrovec, vandrove@vc.cvut.cz and M. Kirkwood");
33MODULE_DESCRIPTION("A driver for the SF16-FMI and SF16-FMP radio."); 32MODULE_DESCRIPTION("A driver for the SF16-FMI and SF16-FMP radio.");
34MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
34MODULE_VERSION("0.0.3");
35 35
36static int io = -1; 36static int io = -1;
37static int radio_nr = -1; 37static int radio_nr = -1;
@@ -40,8 +40,6 @@ module_param(io, int, 0);
40MODULE_PARM_DESC(io, "I/O address of the SF16-FMI or SF16-FMP card (0x284 or 0x384)"); 40MODULE_PARM_DESC(io, "I/O address of the SF16-FMI or SF16-FMP card (0x284 or 0x384)");
41module_param(radio_nr, int, 0); 41module_param(radio_nr, int, 0);
42 42
43#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
44
45struct fmi 43struct fmi
46{ 44{
47 struct v4l2_device v4l2_dev; 45 struct v4l2_device v4l2_dev;
@@ -134,7 +132,6 @@ static int vidioc_querycap(struct file *file, void *priv,
134 strlcpy(v->driver, "radio-sf16fmi", sizeof(v->driver)); 132 strlcpy(v->driver, "radio-sf16fmi", sizeof(v->driver));
135 strlcpy(v->card, "SF16-FMx radio", sizeof(v->card)); 133 strlcpy(v->card, "SF16-FMx radio", sizeof(v->card));
136 strlcpy(v->bus_info, "ISA", sizeof(v->bus_info)); 134 strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
137 v->version = RADIO_VERSION;
138 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO; 135 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
139 return 0; 136 return 0;
140} 137}
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index 87bad7678d92..2dd485996ba8 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -1,441 +1,209 @@
1/* SF16FMR2 radio driver for Linux radio support 1/* SF16-FMR2 radio driver for Linux
2 * heavily based on fmi driver... 2 * Copyright (c) 2011 Ondrej Zary
3 * (c) 2000-2002 Ziglio Frediano, freddy77@angelfire.com
4 * 3 *
5 * Notes on the hardware 4 * Original driver was (c) 2000-2002 Ziglio Frediano, freddy77@angelfire.com
6 * 5 * but almost nothing remained here after conversion to generic TEA575x
7 * Frequency control is done digitally -- ie out(port,encodefreq(95.8)); 6 * implementation
8 * No volume control - only mute/unmute - you have to use line volume
9 *
10 * For read stereo/mono you must wait 0.1 sec after set frequency and
11 * card unmuted so I set frequency on unmute
12 * Signal handling seem to work only on autoscanning (not implemented)
13 *
14 * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
15 */ 7 */
16 8
9#include <linux/delay.h>
17#include <linux/module.h> /* Modules */ 10#include <linux/module.h> /* Modules */
18#include <linux/init.h> /* Initdata */ 11#include <linux/init.h> /* Initdata */
19#include <linux/ioport.h> /* request_region */ 12#include <linux/ioport.h> /* request_region */
20#include <linux/delay.h> /* udelay */
21#include <linux/videodev2.h> /* kernel radio structs */
22#include <linux/mutex.h>
23#include <linux/version.h> /* for KERNEL_VERSION MACRO */
24#include <linux/io.h> /* outb, outb_p */ 13#include <linux/io.h> /* outb, outb_p */
25#include <media/v4l2-device.h> 14#include <sound/tea575x-tuner.h>
26#include <media/v4l2-ioctl.h>
27 15
28MODULE_AUTHOR("Ziglio Frediano, freddy77@angelfire.com"); 16MODULE_AUTHOR("Ondrej Zary");
29MODULE_DESCRIPTION("A driver for the SF16FMR2 radio."); 17MODULE_DESCRIPTION("MediaForte SF16-FMR2 FM radio card driver");
30MODULE_LICENSE("GPL"); 18MODULE_LICENSE("GPL");
31 19
32static int io = 0x384; 20struct fmr2 {
33static int radio_nr = -1;
34
35module_param(io, int, 0);
36MODULE_PARM_DESC(io, "I/O address of the SF16FMR2 card (should be 0x384, if do not work try 0x284)");
37module_param(radio_nr, int, 0);
38
39#define RADIO_VERSION KERNEL_VERSION(0,0,2)
40
41#define AUD_VOL_INDEX 1
42
43#undef DEBUG
44//#define DEBUG 1
45
46#ifdef DEBUG
47# define debug_print(s) printk s
48#else
49# define debug_print(s)
50#endif
51
52/* this should be static vars for module size */
53struct fmr2
54{
55 struct v4l2_device v4l2_dev;
56 struct video_device vdev;
57 struct mutex lock;
58 int io; 21 int io;
59 int curvol; /* 0-15 */ 22 struct snd_tea575x tea;
60 int mute; 23 struct v4l2_ctrl *volume;
61 int stereo; /* card is producing stereo audio */ 24 struct v4l2_ctrl *balance;
62 unsigned long curfreq; /* freq in kHz */
63 int card_type;
64}; 25};
65 26
27/* the port is hardwired so no need to support multiple cards */
28#define FMR2_PORT 0x384
66static struct fmr2 fmr2_card; 29static struct fmr2 fmr2_card;
67 30
68/* hw precision is 12.5 kHz 31/* TEA575x tuner pins */
69 * It is only useful to give freq in interval of 200 (=0.0125Mhz), 32#define STR_DATA (1 << 0)
70 * other bits will be truncated 33#define STR_CLK (1 << 1)
71 */ 34#define STR_WREN (1 << 2)
72#define RSF16_ENCODE(x) ((x) / 200 + 856) 35#define STR_MOST (1 << 3)
73#define RSF16_MINFREQ (87 * 16000) 36/* PT2254A/TC9154A volume control pins */
74#define RSF16_MAXFREQ (108 * 16000) 37#define PT_ST (1 << 4)
75 38#define PT_CK (1 << 5)
76static inline void wait(int n, int io) 39#define PT_DATA (1 << 6)
77{ 40/* volume control presence pin */
78 for (; n; --n) 41#define FMR2_HASVOL (1 << 7)
79 inb(io);
80}
81
82static void outbits(int bits, unsigned int data, int nWait, int io)
83{
84 int bit;
85
86 for (; --bits >= 0;) {
87 bit = (data >> bits) & 1;
88 outb(bit, io);
89 wait(nWait, io);
90 outb(bit | 2, io);
91 wait(nWait, io);
92 outb(bit, io);
93 wait(nWait, io);
94 }
95}
96
97static inline void fmr2_mute(int io)
98{
99 outb(0x00, io);
100 wait(4, io);
101}
102
103static inline void fmr2_unmute(int io)
104{
105 outb(0x04, io);
106 wait(4, io);
107}
108
109static inline int fmr2_stereo_mode(int io)
110{
111 int n = inb(io);
112
113 outb(6, io);
114 inb(io);
115 n = ((n >> 3) & 1) ^ 1;
116 debug_print((KERN_DEBUG "stereo: %d\n", n));
117 return n;
118}
119
120static int fmr2_product_info(struct fmr2 *dev)
121{
122 int n = inb(dev->io);
123
124 n &= 0xC1;
125 if (n == 0) {
126 /* this should support volume set */
127 dev->card_type = 12;
128 return 0;
129 }
130 /* not volume (mine is 11) */
131 dev->card_type = (n == 128) ? 11 : 0;
132 return n;
133}
134 42
135static inline int fmr2_getsigstr(struct fmr2 *dev) 43static void fmr2_tea575x_set_pins(struct snd_tea575x *tea, u8 pins)
136{ 44{
137 /* !!! works only if scanning freq */ 45 struct fmr2 *fmr2 = tea->private_data;
138 int res = 0xffff; 46 u8 bits = 0;
139
140 outb(5, dev->io);
141 wait(4, dev->io);
142 if (!(inb(dev->io) & 1))
143 res = 0;
144 debug_print((KERN_DEBUG "signal: %d\n", res));
145 return res;
146}
147
148/* set frequency and unmute card */
149static int fmr2_setfreq(struct fmr2 *dev)
150{
151 unsigned long freq = dev->curfreq;
152
153 fmr2_mute(dev->io);
154
155 /* 0x42 for mono output
156 * 0x102 forward scanning
157 * 0x182 scansione avanti
158 */
159 outbits(9, 0x2, 3, dev->io);
160 outbits(16, RSF16_ENCODE(freq), 2, dev->io);
161
162 fmr2_unmute(dev->io);
163 47
164 /* wait 0.11 sec */ 48 bits |= (pins & TEA575X_DATA) ? STR_DATA : 0;
165 msleep(110); 49 bits |= (pins & TEA575X_CLK) ? STR_CLK : 0;
50 /* WRITE_ENABLE is inverted, DATA must be high during read */
51 bits |= (pins & TEA575X_WREN) ? 0 : STR_WREN | STR_DATA;
166 52
167 /* NOTE if mute this stop radio 53 outb(bits, fmr2->io);
168 you must set freq on unmute */
169 dev->stereo = fmr2_stereo_mode(dev->io);
170 return 0;
171}
172
173/* !!! not tested, in my card this doesn't work !!! */
174static int fmr2_setvolume(struct fmr2 *dev)
175{
176 int vol[16] = { 0x021, 0x084, 0x090, 0x104,
177 0x110, 0x204, 0x210, 0x402,
178 0x404, 0x408, 0x410, 0x801,
179 0x802, 0x804, 0x808, 0x810 };
180 int i, a;
181 int n = vol[dev->curvol & 0x0f];
182
183 if (dev->card_type != 11)
184 return 1;
185
186 for (i = 12; --i >= 0; ) {
187 a = ((n >> i) & 1) << 6; /* if (a==0) a = 0; else a = 0x40; */
188 outb(a | 4, dev->io);
189 wait(4, dev->io);
190 outb(a | 0x24, dev->io);
191 wait(4, dev->io);
192 outb(a | 4, dev->io);
193 wait(4, dev->io);
194 }
195 for (i = 6; --i >= 0; ) {
196 a = ((0x18 >> i) & 1) << 6;
197 outb(a | 4, dev->io);
198 wait(4, dev->io);
199 outb(a | 0x24, dev->io);
200 wait(4, dev->io);
201 outb(a | 4, dev->io);
202 wait(4, dev->io);
203 }
204 wait(4, dev->io);
205 outb(0x14, dev->io);
206 return 0;
207} 54}
208 55
209static int vidioc_querycap(struct file *file, void *priv, 56static u8 fmr2_tea575x_get_pins(struct snd_tea575x *tea)
210 struct v4l2_capability *v)
211{ 57{
212 strlcpy(v->driver, "radio-sf16fmr2", sizeof(v->driver)); 58 struct fmr2 *fmr2 = tea->private_data;
213 strlcpy(v->card, "SF16-FMR2 radio", sizeof(v->card)); 59 u8 bits = inb(fmr2->io);
214 strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
215 v->version = RADIO_VERSION;
216 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
217 return 0;
218}
219
220static int vidioc_g_tuner(struct file *file, void *priv,
221 struct v4l2_tuner *v)
222{
223 struct fmr2 *fmr2 = video_drvdata(file);
224
225 if (v->index > 0)
226 return -EINVAL;
227 60
228 strlcpy(v->name, "FM", sizeof(v->name)); 61 return (bits & STR_DATA) ? TEA575X_DATA : 0 |
229 v->type = V4L2_TUNER_RADIO; 62 (bits & STR_MOST) ? TEA575X_MOST : 0;
230
231 v->rangelow = RSF16_MINFREQ;
232 v->rangehigh = RSF16_MAXFREQ;
233 v->rxsubchans = fmr2->stereo ? V4L2_TUNER_SUB_STEREO :
234 V4L2_TUNER_SUB_MONO;
235 v->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LOW;
236 v->audmode = V4L2_TUNER_MODE_STEREO;
237 mutex_lock(&fmr2->lock);
238 v->signal = fmr2_getsigstr(fmr2);
239 mutex_unlock(&fmr2->lock);
240 return 0;
241} 63}
242 64
243static int vidioc_s_tuner(struct file *file, void *priv, 65static void fmr2_tea575x_set_direction(struct snd_tea575x *tea, bool output)
244 struct v4l2_tuner *v)
245{ 66{
246 return v->index ? -EINVAL : 0;
247} 67}
248 68
249static int vidioc_s_frequency(struct file *file, void *priv, 69static struct snd_tea575x_ops fmr2_tea_ops = {
250 struct v4l2_frequency *f) 70 .set_pins = fmr2_tea575x_set_pins,
251{ 71 .get_pins = fmr2_tea575x_get_pins,
252 struct fmr2 *fmr2 = video_drvdata(file); 72 .set_direction = fmr2_tea575x_set_direction,
73};
253 74
254 if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO) 75/* TC9154A/PT2254A volume control */
255 return -EINVAL; 76
256 if (f->frequency < RSF16_MINFREQ || 77/* 18-bit shift register bit definitions */
257 f->frequency > RSF16_MAXFREQ) 78#define TC9154A_ATT_MAJ_0DB (1 << 0)
258 return -EINVAL; 79#define TC9154A_ATT_MAJ_10DB (1 << 1)
259 /* rounding in steps of 200 to match the freq 80#define TC9154A_ATT_MAJ_20DB (1 << 2)
260 that will be used */ 81#define TC9154A_ATT_MAJ_30DB (1 << 3)
261 fmr2->curfreq = (f->frequency / 200) * 200; 82#define TC9154A_ATT_MAJ_40DB (1 << 4)
262 83#define TC9154A_ATT_MAJ_50DB (1 << 5)
263 /* set card freq (if not muted) */ 84#define TC9154A_ATT_MAJ_60DB (1 << 6)
264 if (fmr2->curvol && !fmr2->mute) { 85
265 mutex_lock(&fmr2->lock); 86#define TC9154A_ATT_MIN_0DB (1 << 7)
266 fmr2_setfreq(fmr2); 87#define TC9154A_ATT_MIN_2DB (1 << 8)
267 mutex_unlock(&fmr2->lock); 88#define TC9154A_ATT_MIN_4DB (1 << 9)
89#define TC9154A_ATT_MIN_6DB (1 << 10)
90#define TC9154A_ATT_MIN_8DB (1 << 11)
91/* bit 12 is ignored */
92#define TC9154A_CHANNEL_LEFT (1 << 13)
93#define TC9154A_CHANNEL_RIGHT (1 << 14)
94/* bits 15, 16, 17 must be 0 */
95
96#define TC9154A_ATT_MAJ(x) (1 << x)
97#define TC9154A_ATT_MIN(x) (1 << (7 + x))
98
99static void tc9154a_set_pins(struct fmr2 *fmr2, u8 pins)
100{
101 if (!fmr2->tea.mute)
102 pins |= STR_WREN;
103
104 outb(pins, fmr2->io);
105}
106
107static void tc9154a_set_attenuation(struct fmr2 *fmr2, int att, u32 channel)
108{
109 int i;
110 u32 reg;
111 u8 bit;
112
113 reg = TC9154A_ATT_MAJ(att / 10) | TC9154A_ATT_MIN((att % 10) / 2);
114 reg |= channel;
115 /* write 18-bit shift register, LSB first */
116 for (i = 0; i < 18; i++) {
117 bit = reg & (1 << i) ? PT_DATA : 0;
118 tc9154a_set_pins(fmr2, bit);
119 udelay(5);
120 tc9154a_set_pins(fmr2, bit | PT_CK);
121 udelay(5);
122 tc9154a_set_pins(fmr2, bit);
268 } 123 }
269 return 0;
270}
271
272static int vidioc_g_frequency(struct file *file, void *priv,
273 struct v4l2_frequency *f)
274{
275 struct fmr2 *fmr2 = video_drvdata(file);
276
277 if (f->tuner != 0)
278 return -EINVAL;
279 f->type = V4L2_TUNER_RADIO;
280 f->frequency = fmr2->curfreq;
281 return 0;
282}
283 124
284static int vidioc_queryctrl(struct file *file, void *priv, 125 /* latch register data */
285 struct v4l2_queryctrl *qc) 126 udelay(5);
286{ 127 tc9154a_set_pins(fmr2, PT_ST);
287 struct fmr2 *fmr2 = video_drvdata(file); 128 udelay(5);
288 129 tc9154a_set_pins(fmr2, 0);
289 switch (qc->id) {
290 case V4L2_CID_AUDIO_MUTE:
291 return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
292 case V4L2_CID_AUDIO_VOLUME:
293 /* Only card_type == 11 implements volume */
294 if (fmr2->card_type == 11)
295 return v4l2_ctrl_query_fill(qc, 0, 15, 1, 0);
296 return v4l2_ctrl_query_fill(qc, 0, 1, 1, 0);
297 }
298 return -EINVAL;
299} 130}
300 131
301static int vidioc_g_ctrl(struct file *file, void *priv, 132static int fmr2_s_ctrl(struct v4l2_ctrl *ctrl)
302 struct v4l2_control *ctrl)
303{ 133{
304 struct fmr2 *fmr2 = video_drvdata(file); 134 struct snd_tea575x *tea = container_of(ctrl->handler, struct snd_tea575x, ctrl_handler);
135 struct fmr2 *fmr2 = tea->private_data;
136 int volume, balance, left, right;
305 137
306 switch (ctrl->id) { 138 switch (ctrl->id) {
307 case V4L2_CID_AUDIO_MUTE:
308 ctrl->value = fmr2->mute;
309 return 0;
310 case V4L2_CID_AUDIO_VOLUME: 139 case V4L2_CID_AUDIO_VOLUME:
311 ctrl->value = fmr2->curvol; 140 volume = ctrl->val;
312 return 0; 141 balance = fmr2->balance->cur.val;
313 }
314 return -EINVAL;
315}
316
317static int vidioc_s_ctrl(struct file *file, void *priv,
318 struct v4l2_control *ctrl)
319{
320 struct fmr2 *fmr2 = video_drvdata(file);
321
322 switch (ctrl->id) {
323 case V4L2_CID_AUDIO_MUTE:
324 fmr2->mute = ctrl->value;
325 break; 142 break;
326 case V4L2_CID_AUDIO_VOLUME: 143 case V4L2_CID_AUDIO_BALANCE:
327 fmr2->curvol = ctrl->value; 144 balance = ctrl->val;
145 volume = fmr2->volume->cur.val;
328 break; 146 break;
329 default: 147 default:
330 return -EINVAL; 148 return -EINVAL;
331 } 149 }
332 150
333#ifdef DEBUG 151 left = right = volume;
334 if (fmr2->curvol && !fmr2->mute) 152 if (balance < 0)
335 printk(KERN_DEBUG "unmute\n"); 153 right = max(0, right + balance);
336 else 154 if (balance > 0)
337 printk(KERN_DEBUG "mute\n"); 155 left = max(0, left - balance);
338#endif
339
340 mutex_lock(&fmr2->lock);
341 if (fmr2->curvol && !fmr2->mute) {
342 fmr2_setvolume(fmr2);
343 /* Set frequency and unmute card */
344 fmr2_setfreq(fmr2);
345 } else
346 fmr2_mute(fmr2->io);
347 mutex_unlock(&fmr2->lock);
348 return 0;
349}
350
351static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
352{
353 *i = 0;
354 return 0;
355}
356 156
357static int vidioc_s_input(struct file *filp, void *priv, unsigned int i) 157 tc9154a_set_attenuation(fmr2, abs(left - 68), TC9154A_CHANNEL_LEFT);
358{ 158 tc9154a_set_attenuation(fmr2, abs(right - 68), TC9154A_CHANNEL_RIGHT);
359 return i ? -EINVAL : 0;
360}
361 159
362static int vidioc_g_audio(struct file *file, void *priv,
363 struct v4l2_audio *a)
364{
365 a->index = 0;
366 strlcpy(a->name, "Radio", sizeof(a->name));
367 a->capability = V4L2_AUDCAP_STEREO;
368 return 0; 160 return 0;
369} 161}
370 162
371static int vidioc_s_audio(struct file *file, void *priv, 163static const struct v4l2_ctrl_ops fmr2_ctrl_ops = {
372 struct v4l2_audio *a) 164 .s_ctrl = fmr2_s_ctrl,
165};
166
167static int fmr2_tea_ext_init(struct snd_tea575x *tea)
373{ 168{
374 return a->index ? -EINVAL : 0; 169 struct fmr2 *fmr2 = tea->private_data;
375}
376 170
377static const struct v4l2_file_operations fmr2_fops = { 171 if (inb(fmr2->io) & FMR2_HASVOL) {
378 .owner = THIS_MODULE, 172 fmr2->volume = v4l2_ctrl_new_std(&tea->ctrl_handler, &fmr2_ctrl_ops, V4L2_CID_AUDIO_VOLUME, 0, 68, 2, 56);
379 .unlocked_ioctl = video_ioctl2, 173 fmr2->balance = v4l2_ctrl_new_std(&tea->ctrl_handler, &fmr2_ctrl_ops, V4L2_CID_AUDIO_BALANCE, -68, 68, 2, 0);
380}; 174 if (tea->ctrl_handler.error) {
175 printk(KERN_ERR "radio-sf16fmr2: can't initialize contrls\n");
176 return tea->ctrl_handler.error;
177 }
178 }
381 179
382static const struct v4l2_ioctl_ops fmr2_ioctl_ops = { 180 return 0;
383 .vidioc_querycap = vidioc_querycap, 181}
384 .vidioc_g_tuner = vidioc_g_tuner,
385 .vidioc_s_tuner = vidioc_s_tuner,
386 .vidioc_g_audio = vidioc_g_audio,
387 .vidioc_s_audio = vidioc_s_audio,
388 .vidioc_g_input = vidioc_g_input,
389 .vidioc_s_input = vidioc_s_input,
390 .vidioc_g_frequency = vidioc_g_frequency,
391 .vidioc_s_frequency = vidioc_s_frequency,
392 .vidioc_queryctrl = vidioc_queryctrl,
393 .vidioc_g_ctrl = vidioc_g_ctrl,
394 .vidioc_s_ctrl = vidioc_s_ctrl,
395};
396 182
397static int __init fmr2_init(void) 183static int __init fmr2_init(void)
398{ 184{
399 struct fmr2 *fmr2 = &fmr2_card; 185 struct fmr2 *fmr2 = &fmr2_card;
400 struct v4l2_device *v4l2_dev = &fmr2->v4l2_dev;
401 int res;
402 186
403 strlcpy(v4l2_dev->name, "sf16fmr2", sizeof(v4l2_dev->name)); 187 fmr2->io = FMR2_PORT;
404 fmr2->io = io;
405 fmr2->stereo = 1;
406 mutex_init(&fmr2->lock);
407 188
408 if (!request_region(fmr2->io, 2, "sf16fmr2")) { 189 if (!request_region(fmr2->io, 2, "SF16-FMR2")) {
409 v4l2_err(v4l2_dev, "request_region failed!\n"); 190 printk(KERN_ERR "radio-sf16fmr2: I/O port 0x%x already in use\n", fmr2->io);
410 return -EBUSY; 191 return -EBUSY;
411 } 192 }
412 193
413 res = v4l2_device_register(NULL, v4l2_dev); 194 fmr2->tea.private_data = fmr2;
414 if (res < 0) { 195 fmr2->tea.ops = &fmr2_tea_ops;
415 release_region(fmr2->io, 2); 196 fmr2->tea.ext_init = fmr2_tea_ext_init;
416 v4l2_err(v4l2_dev, "Could not register v4l2_device\n"); 197 strlcpy(fmr2->tea.card, "SF16-FMR2", sizeof(fmr2->tea.card));
417 return res; 198 strcpy(fmr2->tea.bus_info, "ISA");
418 }
419 199
420 strlcpy(fmr2->vdev.name, v4l2_dev->name, sizeof(fmr2->vdev.name)); 200 if (snd_tea575x_init(&fmr2->tea)) {
421 fmr2->vdev.v4l2_dev = v4l2_dev; 201 printk(KERN_ERR "radio-sf16fmr2: Unable to detect TEA575x tuner\n");
422 fmr2->vdev.fops = &fmr2_fops;
423 fmr2->vdev.ioctl_ops = &fmr2_ioctl_ops;
424 fmr2->vdev.release = video_device_release_empty;
425 video_set_drvdata(&fmr2->vdev, fmr2);
426
427 /* mute card - prevents noisy bootups */
428 fmr2_mute(fmr2->io);
429 fmr2_product_info(fmr2);
430
431 if (video_register_device(&fmr2->vdev, VFL_TYPE_RADIO, radio_nr) < 0) {
432 v4l2_device_unregister(v4l2_dev);
433 release_region(fmr2->io, 2); 202 release_region(fmr2->io, 2);
434 return -EINVAL; 203 return -ENODEV;
435 } 204 }
436 205
437 v4l2_info(v4l2_dev, "SF16FMR2 radio card driver at 0x%x.\n", fmr2->io); 206 printk(KERN_INFO "radio-sf16fmr2: SF16-FMR2 radio card at 0x%x.\n", fmr2->io);
438 debug_print((KERN_DEBUG "card_type %d\n", fmr2->card_type));
439 return 0; 207 return 0;
440} 208}
441 209
@@ -443,22 +211,9 @@ static void __exit fmr2_exit(void)
443{ 211{
444 struct fmr2 *fmr2 = &fmr2_card; 212 struct fmr2 *fmr2 = &fmr2_card;
445 213
446 video_unregister_device(&fmr2->vdev); 214 snd_tea575x_exit(&fmr2->tea);
447 v4l2_device_unregister(&fmr2->v4l2_dev);
448 release_region(fmr2->io, 2); 215 release_region(fmr2->io, 2);
449} 216}
450 217
451module_init(fmr2_init); 218module_init(fmr2_init);
452module_exit(fmr2_exit); 219module_exit(fmr2_exit);
453
454#ifndef MODULE
455
456static int __init fmr2_setup_io(char *str)
457{
458 get_option(&str, &io);
459 return 1;
460}
461
462__setup("sf16fmr2=", fmr2_setup_io);
463
464#endif
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index 0e71d816c725..95ddcc4845d3 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -39,10 +39,8 @@
39#include <linux/i2c.h> /* I2C */ 39#include <linux/i2c.h> /* I2C */
40#include <media/v4l2-common.h> 40#include <media/v4l2-common.h>
41#include <media/v4l2-ioctl.h> 41#include <media/v4l2-ioctl.h>
42#include <linux/version.h> /* for KERNEL_VERSION MACRO */
43 42
44#define DRIVER_VERSION "v0.01" 43#define DRIVER_VERSION "0.0.2"
45#define RADIO_VERSION KERNEL_VERSION(0, 0, 1)
46 44
47#define DRIVER_AUTHOR "Fabio Belavenuto <belavenuto@gmail.com>" 45#define DRIVER_AUTHOR "Fabio Belavenuto <belavenuto@gmail.com>"
48#define DRIVER_DESC "A driver for the TEA5764 radio chip for EZX Phones." 46#define DRIVER_DESC "A driver for the TEA5764 radio chip for EZX Phones."
@@ -300,7 +298,6 @@ static int vidioc_querycap(struct file *file, void *priv,
300 strlcpy(v->card, dev->name, sizeof(v->card)); 298 strlcpy(v->card, dev->name, sizeof(v->card));
301 snprintf(v->bus_info, sizeof(v->bus_info), 299 snprintf(v->bus_info, sizeof(v->bus_info),
302 "I2C:%s", dev_name(&dev->dev)); 300 "I2C:%s", dev_name(&dev->dev));
303 v->version = RADIO_VERSION;
304 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO; 301 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
305 return 0; 302 return 0;
306} 303}
@@ -595,8 +592,9 @@ static void __exit tea5764_exit(void)
595MODULE_AUTHOR(DRIVER_AUTHOR); 592MODULE_AUTHOR(DRIVER_AUTHOR);
596MODULE_DESCRIPTION(DRIVER_DESC); 593MODULE_DESCRIPTION(DRIVER_DESC);
597MODULE_LICENSE("GPL"); 594MODULE_LICENSE("GPL");
595MODULE_VERSION(DRIVER_VERSION);
598 596
599module_param(use_xtal, int, 1); 597module_param(use_xtal, int, 0);
600MODULE_PARM_DESC(use_xtal, "Chip have a xtal connected in board"); 598MODULE_PARM_DESC(use_xtal, "Chip have a xtal connected in board");
601module_param(radio_nr, int, 0); 599module_param(radio_nr, int, 0);
602MODULE_PARM_DESC(radio_nr, "video4linux device number to use"); 600MODULE_PARM_DESC(radio_nr, "video4linux device number to use");
diff --git a/drivers/media/radio/radio-terratec.c b/drivers/media/radio/radio-terratec.c
index a32663917059..f2ed9cc3cf3b 100644
--- a/drivers/media/radio/radio-terratec.c
+++ b/drivers/media/radio/radio-terratec.c
@@ -29,7 +29,6 @@
29#include <linux/ioport.h> /* request_region */ 29#include <linux/ioport.h> /* request_region */
30#include <linux/videodev2.h> /* kernel radio structs */ 30#include <linux/videodev2.h> /* kernel radio structs */
31#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/version.h> /* for KERNEL_VERSION MACRO */
33#include <linux/io.h> /* outb, outb_p */ 32#include <linux/io.h> /* outb, outb_p */
34#include <media/v4l2-device.h> 33#include <media/v4l2-device.h>
35#include <media/v4l2-ioctl.h> 34#include <media/v4l2-ioctl.h>
@@ -37,6 +36,7 @@
37MODULE_AUTHOR("R.OFFERMANNS & others"); 36MODULE_AUTHOR("R.OFFERMANNS & others");
38MODULE_DESCRIPTION("A driver for the TerraTec ActiveRadio Standalone radio card."); 37MODULE_DESCRIPTION("A driver for the TerraTec ActiveRadio Standalone radio card.");
39MODULE_LICENSE("GPL"); 38MODULE_LICENSE("GPL");
39MODULE_VERSION("0.0.3");
40 40
41#ifndef CONFIG_RADIO_TERRATEC_PORT 41#ifndef CONFIG_RADIO_TERRATEC_PORT
42#define CONFIG_RADIO_TERRATEC_PORT 0x590 42#define CONFIG_RADIO_TERRATEC_PORT 0x590
@@ -49,8 +49,6 @@ module_param(io, int, 0);
49MODULE_PARM_DESC(io, "I/O address of the TerraTec ActiveRadio card (0x590 or 0x591)"); 49MODULE_PARM_DESC(io, "I/O address of the TerraTec ActiveRadio card (0x590 or 0x591)");
50module_param(radio_nr, int, 0); 50module_param(radio_nr, int, 0);
51 51
52#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
53
54static struct v4l2_queryctrl radio_qctrl[] = { 52static struct v4l2_queryctrl radio_qctrl[] = {
55 { 53 {
56 .id = V4L2_CID_AUDIO_MUTE, 54 .id = V4L2_CID_AUDIO_MUTE,
@@ -205,7 +203,6 @@ static int vidioc_querycap(struct file *file, void *priv,
205 strlcpy(v->driver, "radio-terratec", sizeof(v->driver)); 203 strlcpy(v->driver, "radio-terratec", sizeof(v->driver));
206 strlcpy(v->card, "ActiveRadio", sizeof(v->card)); 204 strlcpy(v->card, "ActiveRadio", sizeof(v->card));
207 strlcpy(v->bus_info, "ISA", sizeof(v->bus_info)); 205 strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
208 v->version = RADIO_VERSION;
209 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO; 206 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
210 return 0; 207 return 0;
211} 208}
diff --git a/drivers/media/radio/radio-timb.c b/drivers/media/radio/radio-timb.c
index a185610b376b..f17b540d68a5 100644
--- a/drivers/media/radio/radio-timb.c
+++ b/drivers/media/radio/radio-timb.c
@@ -16,7 +16,6 @@
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */ 17 */
18 18
19#include <linux/version.h>
20#include <linux/io.h> 19#include <linux/io.h>
21#include <media/v4l2-ioctl.h> 20#include <media/v4l2-ioctl.h>
22#include <media/v4l2-device.h> 21#include <media/v4l2-device.h>
@@ -44,7 +43,6 @@ static int timbradio_vidioc_querycap(struct file *file, void *priv,
44 strlcpy(v->driver, DRIVER_NAME, sizeof(v->driver)); 43 strlcpy(v->driver, DRIVER_NAME, sizeof(v->driver));
45 strlcpy(v->card, "Timberdale Radio", sizeof(v->card)); 44 strlcpy(v->card, "Timberdale Radio", sizeof(v->card));
46 snprintf(v->bus_info, sizeof(v->bus_info), "platform:"DRIVER_NAME); 45 snprintf(v->bus_info, sizeof(v->bus_info), "platform:"DRIVER_NAME);
47 v->version = KERNEL_VERSION(0, 0, 1);
48 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO; 46 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
49 return 0; 47 return 0;
50} 48}
@@ -245,4 +243,5 @@ module_exit(timbradio_exit);
245MODULE_DESCRIPTION("Timberdale Radio driver"); 243MODULE_DESCRIPTION("Timberdale Radio driver");
246MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>"); 244MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
247MODULE_LICENSE("GPL v2"); 245MODULE_LICENSE("GPL v2");
246MODULE_VERSION("0.0.2");
248MODULE_ALIAS("platform:"DRIVER_NAME); 247MODULE_ALIAS("platform:"DRIVER_NAME);
diff --git a/drivers/media/radio/radio-trust.c b/drivers/media/radio/radio-trust.c
index 22fa9cc28abe..b3f45a019d82 100644
--- a/drivers/media/radio/radio-trust.c
+++ b/drivers/media/radio/radio-trust.c
@@ -19,7 +19,6 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/ioport.h> 21#include <linux/ioport.h>
22#include <linux/version.h> /* for KERNEL_VERSION MACRO */
23#include <linux/videodev2.h> 22#include <linux/videodev2.h>
24#include <linux/io.h> 23#include <linux/io.h>
25#include <media/v4l2-device.h> 24#include <media/v4l2-device.h>
@@ -28,6 +27,7 @@
28MODULE_AUTHOR("Eric Lammerts, Russell Kroll, Quay Lu, Donald Song, Jason Lewis, Scott McGrath, William McGrath"); 27MODULE_AUTHOR("Eric Lammerts, Russell Kroll, Quay Lu, Donald Song, Jason Lewis, Scott McGrath, William McGrath");
29MODULE_DESCRIPTION("A driver for the Trust FM Radio card."); 28MODULE_DESCRIPTION("A driver for the Trust FM Radio card.");
30MODULE_LICENSE("GPL"); 29MODULE_LICENSE("GPL");
30MODULE_VERSION("0.0.3");
31 31
32/* acceptable ports: 0x350 (JP3 shorted), 0x358 (JP3 open) */ 32/* acceptable ports: 0x350 (JP3 shorted), 0x358 (JP3 open) */
33 33
@@ -42,8 +42,6 @@ module_param(io, int, 0);
42MODULE_PARM_DESC(io, "I/O address of the Trust FM Radio card (0x350 or 0x358)"); 42MODULE_PARM_DESC(io, "I/O address of the Trust FM Radio card (0x350 or 0x358)");
43module_param(radio_nr, int, 0); 43module_param(radio_nr, int, 0);
44 44
45#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
46
47struct trust { 45struct trust {
48 struct v4l2_device v4l2_dev; 46 struct v4l2_device v4l2_dev;
49 struct video_device vdev; 47 struct video_device vdev;
@@ -196,7 +194,6 @@ static int vidioc_querycap(struct file *file, void *priv,
196 strlcpy(v->driver, "radio-trust", sizeof(v->driver)); 194 strlcpy(v->driver, "radio-trust", sizeof(v->driver));
197 strlcpy(v->card, "Trust FM Radio", sizeof(v->card)); 195 strlcpy(v->card, "Trust FM Radio", sizeof(v->card));
198 strlcpy(v->bus_info, "ISA", sizeof(v->bus_info)); 196 strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
199 v->version = RADIO_VERSION;
200 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO; 197 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
201 return 0; 198 return 0;
202} 199}
diff --git a/drivers/media/radio/radio-typhoon.c b/drivers/media/radio/radio-typhoon.c
index 8dbbf08f2207..398726abc0c8 100644
--- a/drivers/media/radio/radio-typhoon.c
+++ b/drivers/media/radio/radio-typhoon.c
@@ -31,15 +31,17 @@
31#include <linux/module.h> /* Modules */ 31#include <linux/module.h> /* Modules */
32#include <linux/init.h> /* Initdata */ 32#include <linux/init.h> /* Initdata */
33#include <linux/ioport.h> /* request_region */ 33#include <linux/ioport.h> /* request_region */
34#include <linux/version.h> /* for KERNEL_VERSION MACRO */
35#include <linux/videodev2.h> /* kernel radio structs */ 34#include <linux/videodev2.h> /* kernel radio structs */
36#include <linux/io.h> /* outb, outb_p */ 35#include <linux/io.h> /* outb, outb_p */
37#include <media/v4l2-device.h> 36#include <media/v4l2-device.h>
38#include <media/v4l2-ioctl.h> 37#include <media/v4l2-ioctl.h>
39 38
39#define DRIVER_VERSION "0.1.2"
40
40MODULE_AUTHOR("Dr. Henrik Seidel"); 41MODULE_AUTHOR("Dr. Henrik Seidel");
41MODULE_DESCRIPTION("A driver for the Typhoon radio card (a.k.a. EcoRadio)."); 42MODULE_DESCRIPTION("A driver for the Typhoon radio card (a.k.a. EcoRadio).");
42MODULE_LICENSE("GPL"); 43MODULE_LICENSE("GPL");
44MODULE_VERSION(DRIVER_VERSION);
43 45
44#ifndef CONFIG_RADIO_TYPHOON_PORT 46#ifndef CONFIG_RADIO_TYPHOON_PORT
45#define CONFIG_RADIO_TYPHOON_PORT -1 47#define CONFIG_RADIO_TYPHOON_PORT -1
@@ -61,9 +63,7 @@ static unsigned long mutefreq = CONFIG_RADIO_TYPHOON_MUTEFREQ;
61module_param(mutefreq, ulong, 0); 63module_param(mutefreq, ulong, 0);
62MODULE_PARM_DESC(mutefreq, "Frequency used when muting the card (in kHz)"); 64MODULE_PARM_DESC(mutefreq, "Frequency used when muting the card (in kHz)");
63 65
64#define RADIO_VERSION KERNEL_VERSION(0, 1, 1) 66#define BANNER "Typhoon Radio Card driver v" DRIVER_VERSION "\n"
65
66#define BANNER "Typhoon Radio Card driver v0.1.1\n"
67 67
68struct typhoon { 68struct typhoon {
69 struct v4l2_device v4l2_dev; 69 struct v4l2_device v4l2_dev;
@@ -171,7 +171,6 @@ static int vidioc_querycap(struct file *file, void *priv,
171 strlcpy(v->driver, "radio-typhoon", sizeof(v->driver)); 171 strlcpy(v->driver, "radio-typhoon", sizeof(v->driver));
172 strlcpy(v->card, "Typhoon Radio", sizeof(v->card)); 172 strlcpy(v->card, "Typhoon Radio", sizeof(v->card));
173 strlcpy(v->bus_info, "ISA", sizeof(v->bus_info)); 173 strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
174 v->version = RADIO_VERSION;
175 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO; 174 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
176 return 0; 175 return 0;
177} 176}
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 459f7272d326..46cacf845049 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1382,7 +1382,7 @@ static int wl1273_fm_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
1382 1382
1383 switch (ctrl->id) { 1383 switch (ctrl->id) {
1384 case V4L2_CID_TUNE_ANTENNA_CAPACITOR: 1384 case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
1385 ctrl->cur.val = wl1273_fm_get_tx_ctune(radio); 1385 ctrl->val = wl1273_fm_get_tx_ctune(radio);
1386 break; 1386 break;
1387 1387
1388 default: 1388 default:
diff --git a/drivers/media/radio/radio-zoltrix.c b/drivers/media/radio/radio-zoltrix.c
index af99c5bd88c1..f5613b948203 100644
--- a/drivers/media/radio/radio-zoltrix.c
+++ b/drivers/media/radio/radio-zoltrix.c
@@ -35,7 +35,6 @@
35#include <linux/delay.h> /* udelay, msleep */ 35#include <linux/delay.h> /* udelay, msleep */
36#include <linux/videodev2.h> /* kernel radio structs */ 36#include <linux/videodev2.h> /* kernel radio structs */
37#include <linux/mutex.h> 37#include <linux/mutex.h>
38#include <linux/version.h> /* for KERNEL_VERSION MACRO */
39#include <linux/io.h> /* outb, outb_p */ 38#include <linux/io.h> /* outb, outb_p */
40#include <media/v4l2-device.h> 39#include <media/v4l2-device.h>
41#include <media/v4l2-ioctl.h> 40#include <media/v4l2-ioctl.h>
@@ -43,6 +42,7 @@
43MODULE_AUTHOR("C.van Schaik"); 42MODULE_AUTHOR("C.van Schaik");
44MODULE_DESCRIPTION("A driver for the Zoltrix Radio Plus."); 43MODULE_DESCRIPTION("A driver for the Zoltrix Radio Plus.");
45MODULE_LICENSE("GPL"); 44MODULE_LICENSE("GPL");
45MODULE_VERSION("0.0.3");
46 46
47#ifndef CONFIG_RADIO_ZOLTRIX_PORT 47#ifndef CONFIG_RADIO_ZOLTRIX_PORT
48#define CONFIG_RADIO_ZOLTRIX_PORT -1 48#define CONFIG_RADIO_ZOLTRIX_PORT -1
@@ -55,8 +55,6 @@ module_param(io, int, 0);
55MODULE_PARM_DESC(io, "I/O address of the Zoltrix Radio Plus (0x20c or 0x30c)"); 55MODULE_PARM_DESC(io, "I/O address of the Zoltrix Radio Plus (0x20c or 0x30c)");
56module_param(radio_nr, int, 0); 56module_param(radio_nr, int, 0);
57 57
58#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
59
60struct zoltrix { 58struct zoltrix {
61 struct v4l2_device v4l2_dev; 59 struct v4l2_device v4l2_dev;
62 struct video_device vdev; 60 struct video_device vdev;
@@ -228,7 +226,6 @@ static int vidioc_querycap(struct file *file, void *priv,
228 strlcpy(v->driver, "radio-zoltrix", sizeof(v->driver)); 226 strlcpy(v->driver, "radio-zoltrix", sizeof(v->driver));
229 strlcpy(v->card, "Zoltrix Radio", sizeof(v->card)); 227 strlcpy(v->card, "Zoltrix Radio", sizeof(v->card));
230 strlcpy(v->bus_info, "ISA", sizeof(v->bus_info)); 228 strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
231 v->version = RADIO_VERSION;
232 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO; 229 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
233 return 0; 230 return 0;
234} 231}
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index a2a67772c42c..fd3541b0e91c 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -24,10 +24,9 @@
24 24
25/* driver definitions */ 25/* driver definitions */
26#define DRIVER_AUTHOR "Joonyoung Shim <jy0922.shim@samsung.com>"; 26#define DRIVER_AUTHOR "Joonyoung Shim <jy0922.shim@samsung.com>";
27#define DRIVER_KERNEL_VERSION KERNEL_VERSION(1, 0, 1)
28#define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver" 27#define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver"
29#define DRIVER_DESC "I2C radio driver for Si470x FM Radio Receivers" 28#define DRIVER_DESC "I2C radio driver for Si470x FM Radio Receivers"
30#define DRIVER_VERSION "1.0.1" 29#define DRIVER_VERSION "1.0.2"
31 30
32/* kernel includes */ 31/* kernel includes */
33#include <linux/i2c.h> 32#include <linux/i2c.h>
@@ -248,7 +247,6 @@ int si470x_vidioc_querycap(struct file *file, void *priv,
248{ 247{
249 strlcpy(capability->driver, DRIVER_NAME, sizeof(capability->driver)); 248 strlcpy(capability->driver, DRIVER_NAME, sizeof(capability->driver));
250 strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card)); 249 strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card));
251 capability->version = DRIVER_KERNEL_VERSION;
252 capability->capabilities = V4L2_CAP_HW_FREQ_SEEK | 250 capability->capabilities = V4L2_CAP_HW_FREQ_SEEK |
253 V4L2_CAP_TUNER | V4L2_CAP_RADIO; 251 V4L2_CAP_TUNER | V4L2_CAP_RADIO;
254 252
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index 392e84fe90ef..4cf537043f99 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -29,7 +29,6 @@
29 29
30/* driver definitions */ 30/* driver definitions */
31#define DRIVER_AUTHOR "Tobias Lorenz <tobias.lorenz@gmx.net>" 31#define DRIVER_AUTHOR "Tobias Lorenz <tobias.lorenz@gmx.net>"
32#define DRIVER_KERNEL_VERSION KERNEL_VERSION(1, 0, 10)
33#define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver" 32#define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver"
34#define DRIVER_DESC "USB radio driver for Si470x FM Radio Receivers" 33#define DRIVER_DESC "USB radio driver for Si470x FM Radio Receivers"
35#define DRIVER_VERSION "1.0.10" 34#define DRIVER_VERSION "1.0.10"
@@ -626,7 +625,6 @@ int si470x_vidioc_querycap(struct file *file, void *priv,
626 strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card)); 625 strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card));
627 usb_make_path(radio->usbdev, capability->bus_info, 626 usb_make_path(radio->usbdev, capability->bus_info,
628 sizeof(capability->bus_info)); 627 sizeof(capability->bus_info));
629 capability->version = DRIVER_KERNEL_VERSION;
630 capability->capabilities = V4L2_CAP_HW_FREQ_SEEK | 628 capability->capabilities = V4L2_CAP_HW_FREQ_SEEK |
631 V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE; 629 V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE;
632 630
@@ -699,7 +697,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
699 radio->videodev = video_device_alloc(); 697 radio->videodev = video_device_alloc();
700 if (!radio->videodev) { 698 if (!radio->videodev) {
701 retval = -ENOMEM; 699 retval = -ENOMEM;
702 goto err_intbuffer; 700 goto err_urb;
703 } 701 }
704 memcpy(radio->videodev, &si470x_viddev_template, 702 memcpy(radio->videodev, &si470x_viddev_template,
705 sizeof(si470x_viddev_template)); 703 sizeof(si470x_viddev_template));
@@ -790,6 +788,8 @@ err_all:
790 kfree(radio->buffer); 788 kfree(radio->buffer);
791err_video: 789err_video:
792 video_device_release(radio->videodev); 790 video_device_release(radio->videodev);
791err_urb:
792 usb_free_urb(radio->int_in_urb);
793err_intbuffer: 793err_intbuffer:
794 kfree(radio->int_in_buffer); 794 kfree(radio->int_in_buffer);
795err_radio: 795err_radio:
diff --git a/drivers/media/radio/si470x/radio-si470x.h b/drivers/media/radio/si470x/radio-si470x.h
index 68da001b09dc..f300a55ed85c 100644
--- a/drivers/media/radio/si470x/radio-si470x.h
+++ b/drivers/media/radio/si470x/radio-si470x.h
@@ -32,7 +32,6 @@
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/input.h> 34#include <linux/input.h>
35#include <linux/version.h>
36#include <linux/videodev2.h> 35#include <linux/videodev2.h>
37#include <linux/mutex.h> 36#include <linux/mutex.h>
38#include <media/v4l2-common.h> 37#include <media/v4l2-common.h>
diff --git a/drivers/media/radio/wl128x/fmdrv.h b/drivers/media/radio/wl128x/fmdrv.h
index 1a45a5d847b0..d84ad9dad323 100644
--- a/drivers/media/radio/wl128x/fmdrv.h
+++ b/drivers/media/radio/wl128x/fmdrv.h
@@ -28,14 +28,11 @@
28#include <sound/core.h> 28#include <sound/core.h>
29#include <sound/initval.h> 29#include <sound/initval.h>
30#include <linux/timer.h> 30#include <linux/timer.h>
31#include <linux/version.h>
32#include <media/v4l2-ioctl.h> 31#include <media/v4l2-ioctl.h>
33#include <media/v4l2-common.h> 32#include <media/v4l2-common.h>
34#include <media/v4l2-ctrls.h> 33#include <media/v4l2-ctrls.h>
35 34
36#define FM_DRV_VERSION "0.10" 35#define FM_DRV_VERSION "0.1.1"
37/* Should match with FM_DRV_VERSION */
38#define FM_DRV_RADIO_VERSION KERNEL_VERSION(0, 0, 1)
39#define FM_DRV_NAME "ti_fmdrv" 36#define FM_DRV_NAME "ti_fmdrv"
40#define FM_DRV_CARD_SHORT_NAME "TI FM Radio" 37#define FM_DRV_CARD_SHORT_NAME "TI FM Radio"
41#define FM_DRV_CARD_LONG_NAME "Texas Instruments FM Radio" 38#define FM_DRV_CARD_LONG_NAME "Texas Instruments FM Radio"
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index 87010724f914..8c0e19276970 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -175,7 +175,6 @@ static int fm_v4l2_vidioc_querycap(struct file *file, void *priv,
175 strlcpy(capability->card, FM_DRV_CARD_SHORT_NAME, 175 strlcpy(capability->card, FM_DRV_CARD_SHORT_NAME,
176 sizeof(capability->card)); 176 sizeof(capability->card));
177 sprintf(capability->bus_info, "UART"); 177 sprintf(capability->bus_info, "UART");
178 capability->version = FM_DRV_RADIO_VERSION;
179 capability->capabilities = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_TUNER | 178 capability->capabilities = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_TUNER |
180 V4L2_CAP_RADIO | V4L2_CAP_MODULATOR | 179 V4L2_CAP_RADIO | V4L2_CAP_MODULATOR |
181 V4L2_CAP_AUDIO | V4L2_CAP_READWRITE | 180 V4L2_CAP_AUDIO | V4L2_CAP_READWRITE |
@@ -191,7 +190,7 @@ static int fm_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
191 190
192 switch (ctrl->id) { 191 switch (ctrl->id) {
193 case V4L2_CID_TUNE_ANTENNA_CAPACITOR: 192 case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
194 ctrl->cur.val = fm_tx_get_tune_cap_val(fmdev); 193 ctrl->val = fm_tx_get_tune_cap_val(fmdev);
195 break; 194 break;
196 default: 195 default:
197 fmwarn("%s: Unknown IOCTL: %d\n", __func__, ctrl->id); 196 fmwarn("%s: Unknown IOCTL: %d\n", __func__, ctrl->id);
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 7d4bbc226d06..899f783d92fb 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -87,6 +87,17 @@ config IR_RC5_SZ_DECODER
87 uses an IR protocol that is almost standard RC-5, but not quite, 87 uses an IR protocol that is almost standard RC-5, but not quite,
88 as it uses an additional bit). 88 as it uses an additional bit).
89 89
90config IR_MCE_KBD_DECODER
91 tristate "Enable IR raw decoder for the MCE keyboard/mouse protocol"
92 depends on RC_CORE
93 select BITREVERSE
94 default y
95
96 ---help---
97 Enable this option if you have a Microsoft Remote Keyboard for
98 Windows Media Center Edition, which you would like to use with
99 a raw IR receiver in your system.
100
90config IR_LIRC_CODEC 101config IR_LIRC_CODEC
91 tristate "Enable IR to LIRC bridge" 102 tristate "Enable IR to LIRC bridge"
92 depends on RC_CORE 103 depends on RC_CORE
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 52830e5f4eaa..f224db027c41 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_IR_RC6_DECODER) += ir-rc6-decoder.o
10obj-$(CONFIG_IR_JVC_DECODER) += ir-jvc-decoder.o 10obj-$(CONFIG_IR_JVC_DECODER) += ir-jvc-decoder.o
11obj-$(CONFIG_IR_SONY_DECODER) += ir-sony-decoder.o 11obj-$(CONFIG_IR_SONY_DECODER) += ir-sony-decoder.o
12obj-$(CONFIG_IR_RC5_SZ_DECODER) += ir-rc5-sz-decoder.o 12obj-$(CONFIG_IR_RC5_SZ_DECODER) += ir-rc5-sz-decoder.o
13obj-$(CONFIG_IR_MCE_KBD_DECODER) += ir-mce_kbd-decoder.o
13obj-$(CONFIG_IR_LIRC_CODEC) += ir-lirc-codec.o 14obj-$(CONFIG_IR_LIRC_CODEC) += ir-lirc-codec.o
14 15
15# stand-alone IR receivers/transmitters 16# stand-alone IR receivers/transmitters
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index a43ed6c41bfc..2b9c2569d74a 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -953,13 +953,13 @@ static void ene_set_idle(struct rc_dev *rdev, bool idle)
953} 953}
954 954
955/* outside interface: transmit */ 955/* outside interface: transmit */
956static int ene_transmit(struct rc_dev *rdev, int *buf, u32 n) 956static int ene_transmit(struct rc_dev *rdev, unsigned *buf, unsigned n)
957{ 957{
958 struct ene_device *dev = rdev->priv; 958 struct ene_device *dev = rdev->priv;
959 unsigned long flags; 959 unsigned long flags;
960 960
961 dev->tx_buffer = buf; 961 dev->tx_buffer = buf;
962 dev->tx_len = n / sizeof(int); 962 dev->tx_len = n;
963 dev->tx_pos = 0; 963 dev->tx_pos = 0;
964 dev->tx_reg = 0; 964 dev->tx_reg = 0;
965 dev->tx_done = 0; 965 dev->tx_done = 0;
diff --git a/drivers/media/rc/ene_ir.h b/drivers/media/rc/ene_ir.h
index 337a41d4450b..017c209cdf8a 100644
--- a/drivers/media/rc/ene_ir.h
+++ b/drivers/media/rc/ene_ir.h
@@ -235,7 +235,7 @@ struct ene_device {
235 bool tx_sample_pulse; /* current sample is pulse */ 235 bool tx_sample_pulse; /* current sample is pulse */
236 236
237 /* TX buffer */ 237 /* TX buffer */
238 int *tx_buffer; /* input samples buffer*/ 238 unsigned *tx_buffer; /* input samples buffer*/
239 int tx_pos; /* position in that bufer */ 239 int tx_pos; /* position in that bufer */
240 int tx_len; /* current len of tx buffer */ 240 int tx_len; /* current len of tx buffer */
241 int tx_done; /* done transmitting */ 241 int tx_done; /* done transmitting */
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index 1c5cc65ea1e1..e5eeec4da76e 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -103,19 +103,19 @@ static ssize_t ir_lirc_transmit_ir(struct file *file, const char *buf,
103{ 103{
104 struct lirc_codec *lirc; 104 struct lirc_codec *lirc;
105 struct rc_dev *dev; 105 struct rc_dev *dev;
106 int *txbuf; /* buffer with values to transmit */ 106 unsigned int *txbuf; /* buffer with values to transmit */
107 int ret = 0; 107 ssize_t ret = 0;
108 size_t count; 108 size_t count;
109 109
110 lirc = lirc_get_pdata(file); 110 lirc = lirc_get_pdata(file);
111 if (!lirc) 111 if (!lirc)
112 return -EFAULT; 112 return -EFAULT;
113 113
114 if (n % sizeof(int)) 114 if (n < sizeof(unsigned) || n % sizeof(unsigned))
115 return -EINVAL; 115 return -EINVAL;
116 116
117 count = n / sizeof(int); 117 count = n / sizeof(unsigned);
118 if (count > LIRCBUF_SIZE || count % 2 == 0 || n % sizeof(int) != 0) 118 if (count > LIRCBUF_SIZE || count % 2 == 0)
119 return -EINVAL; 119 return -EINVAL;
120 120
121 txbuf = memdup_user(buf, n); 121 txbuf = memdup_user(buf, n);
@@ -129,7 +129,10 @@ static ssize_t ir_lirc_transmit_ir(struct file *file, const char *buf,
129 } 129 }
130 130
131 if (dev->tx_ir) 131 if (dev->tx_ir)
132 ret = dev->tx_ir(dev, txbuf, (u32)n); 132 ret = dev->tx_ir(dev, txbuf, count);
133
134 if (ret > 0)
135 ret *= sizeof(unsigned);
133 136
134out: 137out:
135 kfree(txbuf); 138 kfree(txbuf);
diff --git a/drivers/media/rc/ir-mce_kbd-decoder.c b/drivers/media/rc/ir-mce_kbd-decoder.c
new file mode 100644
index 000000000000..3784ebf80ec7
--- /dev/null
+++ b/drivers/media/rc/ir-mce_kbd-decoder.c
@@ -0,0 +1,449 @@
1/* ir-mce_kbd-decoder.c - A decoder for the RC6-ish keyboard/mouse IR protocol
2 * used by the Microsoft Remote Keyboard for Windows Media Center Edition,
3 * referred to by Microsoft's Windows Media Center remote specification docs
4 * as "an internal protocol called MCIR-2".
5 *
6 * Copyright (C) 2011 by Jarod Wilson <jarod@redhat.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17#include <linux/module.h>
18
19#include "rc-core-priv.h"
20
21/*
22 * This decoder currently supports:
23 * - MCIR-2 29-bit IR signals used for mouse movement and buttons
24 * - MCIR-2 32-bit IR signals used for standard keyboard keys
25 *
26 * The media keys on the keyboard send RC-6 signals that are inditinguishable
27 * from the keys of the same name on the stock MCE remote, and will be handled
28 * by the standard RC-6 decoder, and be made available to the system via the
29 * input device for the remote, rather than the keyboard/mouse one.
30 */
31
32#define MCIR2_UNIT 333333 /* ns */
33#define MCIR2_HEADER_NBITS 5
34#define MCIR2_MOUSE_NBITS 29
35#define MCIR2_KEYBOARD_NBITS 32
36#define MCIR2_PREFIX_PULSE (8 * MCIR2_UNIT)
37#define MCIR2_PREFIX_SPACE (1 * MCIR2_UNIT)
38#define MCIR2_MAX_LEN (3 * MCIR2_UNIT)
39#define MCIR2_BIT_START (1 * MCIR2_UNIT)
40#define MCIR2_BIT_END (1 * MCIR2_UNIT)
41#define MCIR2_BIT_0 (1 * MCIR2_UNIT)
42#define MCIR2_BIT_SET (2 * MCIR2_UNIT)
43#define MCIR2_MODE_MASK 0xf /* for the header bits */
44#define MCIR2_KEYBOARD_HEADER 0x4
45#define MCIR2_MOUSE_HEADER 0x1
46#define MCIR2_MASK_KEYS_START 0xe0
47
48enum mce_kbd_mode {
49 MCIR2_MODE_KEYBOARD,
50 MCIR2_MODE_MOUSE,
51 MCIR2_MODE_UNKNOWN,
52};
53
54enum mce_kbd_state {
55 STATE_INACTIVE,
56 STATE_HEADER_BIT_START,
57 STATE_HEADER_BIT_END,
58 STATE_BODY_BIT_START,
59 STATE_BODY_BIT_END,
60 STATE_FINISHED,
61};
62
63static unsigned char kbd_keycodes[256] = {
64 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_A,
65 KEY_B, KEY_C, KEY_D, KEY_E, KEY_F,
66 KEY_G, KEY_H, KEY_I, KEY_J, KEY_K,
67 KEY_L, KEY_M, KEY_N, KEY_O, KEY_P,
68 KEY_Q, KEY_R, KEY_S, KEY_T, KEY_U,
69 KEY_V, KEY_W, KEY_X, KEY_Y, KEY_Z,
70 KEY_1, KEY_2, KEY_3, KEY_4, KEY_5,
71 KEY_6, KEY_7, KEY_8, KEY_9, KEY_0,
72 KEY_ENTER, KEY_ESC, KEY_BACKSPACE, KEY_TAB, KEY_SPACE,
73 KEY_MINUS, KEY_EQUAL, KEY_LEFTBRACE, KEY_RIGHTBRACE, KEY_BACKSLASH,
74 KEY_RESERVED, KEY_SEMICOLON, KEY_APOSTROPHE, KEY_GRAVE, KEY_COMMA,
75 KEY_DOT, KEY_SLASH, KEY_CAPSLOCK, KEY_F1, KEY_F2,
76 KEY_F3, KEY_F4, KEY_F5, KEY_F6, KEY_F7,
77 KEY_F8, KEY_F9, KEY_F10, KEY_F11, KEY_F12,
78 KEY_SYSRQ, KEY_SCROLLLOCK, KEY_PAUSE, KEY_INSERT, KEY_HOME,
79 KEY_PAGEUP, KEY_DELETE, KEY_END, KEY_PAGEDOWN, KEY_RIGHT,
80 KEY_LEFT, KEY_DOWN, KEY_UP, KEY_NUMLOCK, KEY_KPSLASH,
81 KEY_KPASTERISK, KEY_KPMINUS, KEY_KPPLUS, KEY_KPENTER, KEY_KP1,
82 KEY_KP2, KEY_KP3, KEY_KP4, KEY_KP5, KEY_KP6,
83 KEY_KP7, KEY_KP8, KEY_KP9, KEY_KP0, KEY_KPDOT,
84 KEY_102ND, KEY_COMPOSE, KEY_POWER, KEY_KPEQUAL, KEY_F13,
85 KEY_F14, KEY_F15, KEY_F16, KEY_F17, KEY_F18,
86 KEY_F19, KEY_F20, KEY_F21, KEY_F22, KEY_F23,
87 KEY_F24, KEY_OPEN, KEY_HELP, KEY_PROPS, KEY_FRONT,
88 KEY_STOP, KEY_AGAIN, KEY_UNDO, KEY_CUT, KEY_COPY,
89 KEY_PASTE, KEY_FIND, KEY_MUTE, KEY_VOLUMEUP, KEY_VOLUMEDOWN,
90 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_KPCOMMA, KEY_RESERVED,
91 KEY_RO, KEY_KATAKANAHIRAGANA, KEY_YEN, KEY_HENKAN, KEY_MUHENKAN,
92 KEY_KPJPCOMMA, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_HANGUEL,
93 KEY_HANJA, KEY_KATAKANA, KEY_HIRAGANA, KEY_ZENKAKUHANKAKU, KEY_RESERVED,
94 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
95 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
96 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
97 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
98 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
99 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
100 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
101 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
102 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
103 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
104 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
105 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
106 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
107 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
108 KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_LEFTCTRL,
109 KEY_LEFTSHIFT, KEY_LEFTALT, KEY_LEFTMETA, KEY_RIGHTCTRL, KEY_RIGHTSHIFT,
110 KEY_RIGHTALT, KEY_RIGHTMETA, KEY_PLAYPAUSE, KEY_STOPCD, KEY_PREVIOUSSONG,
111 KEY_NEXTSONG, KEY_EJECTCD, KEY_VOLUMEUP, KEY_VOLUMEDOWN, KEY_MUTE,
112 KEY_WWW, KEY_BACK, KEY_FORWARD, KEY_STOP, KEY_FIND,
113 KEY_SCROLLUP, KEY_SCROLLDOWN, KEY_EDIT, KEY_SLEEP, KEY_COFFEE,
114 KEY_REFRESH, KEY_CALC, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
115 KEY_RESERVED
116};
117
118static void mce_kbd_rx_timeout(unsigned long data)
119{
120 struct mce_kbd_dec *mce_kbd = (struct mce_kbd_dec *)data;
121 int i;
122 unsigned char maskcode;
123
124 IR_dprintk(2, "timer callback clearing all keys\n");
125
126 for (i = 0; i < 7; i++) {
127 maskcode = kbd_keycodes[MCIR2_MASK_KEYS_START + i];
128 input_report_key(mce_kbd->idev, maskcode, 0);
129 }
130
131 for (i = 0; i < MCIR2_MASK_KEYS_START; i++)
132 input_report_key(mce_kbd->idev, kbd_keycodes[i], 0);
133}
134
135static enum mce_kbd_mode mce_kbd_mode(struct mce_kbd_dec *data)
136{
137 switch (data->header & MCIR2_MODE_MASK) {
138 case MCIR2_KEYBOARD_HEADER:
139 return MCIR2_MODE_KEYBOARD;
140 case MCIR2_MOUSE_HEADER:
141 return MCIR2_MODE_MOUSE;
142 default:
143 return MCIR2_MODE_UNKNOWN;
144 }
145}
146
147static void ir_mce_kbd_process_keyboard_data(struct input_dev *idev,
148 u32 scancode)
149{
150 u8 keydata = (scancode >> 8) & 0xff;
151 u8 shiftmask = scancode & 0xff;
152 unsigned char keycode, maskcode;
153 int i, keystate;
154
155 IR_dprintk(1, "keyboard: keydata = 0x%02x, shiftmask = 0x%02x\n",
156 keydata, shiftmask);
157
158 for (i = 0; i < 7; i++) {
159 maskcode = kbd_keycodes[MCIR2_MASK_KEYS_START + i];
160 if (shiftmask & (1 << i))
161 keystate = 1;
162 else
163 keystate = 0;
164 input_report_key(idev, maskcode, keystate);
165 }
166
167 if (keydata) {
168 keycode = kbd_keycodes[keydata];
169 input_report_key(idev, keycode, 1);
170 } else {
171 for (i = 0; i < MCIR2_MASK_KEYS_START; i++)
172 input_report_key(idev, kbd_keycodes[i], 0);
173 }
174}
175
176static void ir_mce_kbd_process_mouse_data(struct input_dev *idev, u32 scancode)
177{
178 /* raw mouse coordinates */
179 u8 xdata = (scancode >> 7) & 0x7f;
180 u8 ydata = (scancode >> 14) & 0x7f;
181 int x, y;
182 /* mouse buttons */
183 bool right = scancode & 0x40;
184 bool left = scancode & 0x20;
185
186 if (xdata & 0x40)
187 x = -((~xdata & 0x7f) + 1);
188 else
189 x = xdata;
190
191 if (ydata & 0x40)
192 y = -((~ydata & 0x7f) + 1);
193 else
194 y = ydata;
195
196 IR_dprintk(1, "mouse: x = %d, y = %d, btns = %s%s\n",
197 x, y, left ? "L" : "", right ? "R" : "");
198
199 input_report_rel(idev, REL_X, x);
200 input_report_rel(idev, REL_Y, y);
201
202 input_report_key(idev, BTN_LEFT, left);
203 input_report_key(idev, BTN_RIGHT, right);
204}
205
206/**
207 * ir_mce_kbd_decode() - Decode one mce_kbd pulse or space
208 * @dev: the struct rc_dev descriptor of the device
209 * @ev: the struct ir_raw_event descriptor of the pulse/space
210 *
211 * This function returns -EINVAL if the pulse violates the state machine
212 */
213static int ir_mce_kbd_decode(struct rc_dev *dev, struct ir_raw_event ev)
214{
215 struct mce_kbd_dec *data = &dev->raw->mce_kbd;
216 u32 scancode;
217 unsigned long delay;
218
219 if (!(dev->raw->enabled_protocols & RC_TYPE_MCE_KBD))
220 return 0;
221
222 if (!is_timing_event(ev)) {
223 if (ev.reset)
224 data->state = STATE_INACTIVE;
225 return 0;
226 }
227
228 if (!geq_margin(ev.duration, MCIR2_UNIT, MCIR2_UNIT / 2))
229 goto out;
230
231again:
232 IR_dprintk(2, "started at state %i (%uus %s)\n",
233 data->state, TO_US(ev.duration), TO_STR(ev.pulse));
234
235 if (!geq_margin(ev.duration, MCIR2_UNIT, MCIR2_UNIT / 2))
236 return 0;
237
238 switch (data->state) {
239
240 case STATE_INACTIVE:
241 if (!ev.pulse)
242 break;
243
244 /* Note: larger margin on first pulse since each MCIR2_UNIT
245 is quite short and some hardware takes some time to
246 adjust to the signal */
247 if (!eq_margin(ev.duration, MCIR2_PREFIX_PULSE, MCIR2_UNIT))
248 break;
249
250 data->state = STATE_HEADER_BIT_START;
251 data->count = 0;
252 data->header = 0;
253 return 0;
254
255 case STATE_HEADER_BIT_START:
256 if (geq_margin(ev.duration, MCIR2_MAX_LEN, MCIR2_UNIT / 2))
257 break;
258
259 data->header <<= 1;
260 if (ev.pulse)
261 data->header |= 1;
262 data->count++;
263 data->state = STATE_HEADER_BIT_END;
264 return 0;
265
266 case STATE_HEADER_BIT_END:
267 if (!is_transition(&ev, &dev->raw->prev_ev))
268 break;
269
270 decrease_duration(&ev, MCIR2_BIT_END);
271
272 if (data->count != MCIR2_HEADER_NBITS) {
273 data->state = STATE_HEADER_BIT_START;
274 goto again;
275 }
276
277 switch (mce_kbd_mode(data)) {
278 case MCIR2_MODE_KEYBOARD:
279 data->wanted_bits = MCIR2_KEYBOARD_NBITS;
280 break;
281 case MCIR2_MODE_MOUSE:
282 data->wanted_bits = MCIR2_MOUSE_NBITS;
283 break;
284 default:
285 IR_dprintk(1, "not keyboard or mouse data\n");
286 goto out;
287 }
288
289 data->count = 0;
290 data->body = 0;
291 data->state = STATE_BODY_BIT_START;
292 goto again;
293
294 case STATE_BODY_BIT_START:
295 if (geq_margin(ev.duration, MCIR2_MAX_LEN, MCIR2_UNIT / 2))
296 break;
297
298 data->body <<= 1;
299 if (ev.pulse)
300 data->body |= 1;
301 data->count++;
302 data->state = STATE_BODY_BIT_END;
303 return 0;
304
305 case STATE_BODY_BIT_END:
306 if (!is_transition(&ev, &dev->raw->prev_ev))
307 break;
308
309 if (data->count == data->wanted_bits)
310 data->state = STATE_FINISHED;
311 else
312 data->state = STATE_BODY_BIT_START;
313
314 decrease_duration(&ev, MCIR2_BIT_END);
315 goto again;
316
317 case STATE_FINISHED:
318 if (ev.pulse)
319 break;
320
321 switch (data->wanted_bits) {
322 case MCIR2_KEYBOARD_NBITS:
323 scancode = data->body & 0xffff;
324 IR_dprintk(1, "keyboard data 0x%08x\n", data->body);
325 if (dev->timeout)
326 delay = usecs_to_jiffies(dev->timeout / 1000);
327 else
328 delay = msecs_to_jiffies(100);
329 mod_timer(&data->rx_timeout, jiffies + delay);
330 /* Pass data to keyboard buffer parser */
331 ir_mce_kbd_process_keyboard_data(data->idev, scancode);
332 break;
333 case MCIR2_MOUSE_NBITS:
334 scancode = data->body & 0x1fffff;
335 IR_dprintk(1, "mouse data 0x%06x\n", scancode);
336 /* Pass data to mouse buffer parser */
337 ir_mce_kbd_process_mouse_data(data->idev, scancode);
338 break;
339 default:
340 IR_dprintk(1, "not keyboard or mouse data\n");
341 goto out;
342 }
343
344 data->state = STATE_INACTIVE;
345 input_sync(data->idev);
346 return 0;
347 }
348
349out:
350 IR_dprintk(1, "failed at state %i (%uus %s)\n",
351 data->state, TO_US(ev.duration), TO_STR(ev.pulse));
352 data->state = STATE_INACTIVE;
353 input_sync(data->idev);
354 return -EINVAL;
355}
356
357static int ir_mce_kbd_register(struct rc_dev *dev)
358{
359 struct mce_kbd_dec *mce_kbd = &dev->raw->mce_kbd;
360 struct input_dev *idev;
361 int i, ret;
362
363 idev = input_allocate_device();
364 if (!idev)
365 return -ENOMEM;
366
367 snprintf(mce_kbd->name, sizeof(mce_kbd->name),
368 "MCE IR Keyboard/Mouse (%s)", dev->driver_name);
369 strlcat(mce_kbd->phys, "/input0", sizeof(mce_kbd->phys));
370
371 idev->name = mce_kbd->name;
372 idev->phys = mce_kbd->phys;
373
374 /* Keyboard bits */
375 set_bit(EV_KEY, idev->evbit);
376 set_bit(EV_REP, idev->evbit);
377 for (i = 0; i < sizeof(kbd_keycodes); i++)
378 set_bit(kbd_keycodes[i], idev->keybit);
379
380 /* Mouse bits */
381 set_bit(EV_REL, idev->evbit);
382 set_bit(REL_X, idev->relbit);
383 set_bit(REL_Y, idev->relbit);
384 set_bit(BTN_LEFT, idev->keybit);
385 set_bit(BTN_RIGHT, idev->keybit);
386
387 /* Report scancodes too */
388 set_bit(EV_MSC, idev->evbit);
389 set_bit(MSC_SCAN, idev->mscbit);
390
391 setup_timer(&mce_kbd->rx_timeout, mce_kbd_rx_timeout,
392 (unsigned long)mce_kbd);
393
394 input_set_drvdata(idev, mce_kbd);
395
396#if 0
397 /* Adding this reference means two input devices are associated with
398 * this rc-core device, which ir-keytable doesn't cope with yet */
399 idev->dev.parent = &dev->dev;
400#endif
401
402 ret = input_register_device(idev);
403 if (ret < 0) {
404 input_free_device(idev);
405 return -EIO;
406 }
407
408 mce_kbd->idev = idev;
409
410 return 0;
411}
412
413static int ir_mce_kbd_unregister(struct rc_dev *dev)
414{
415 struct mce_kbd_dec *mce_kbd = &dev->raw->mce_kbd;
416 struct input_dev *idev = mce_kbd->idev;
417
418 del_timer_sync(&mce_kbd->rx_timeout);
419 input_unregister_device(idev);
420
421 return 0;
422}
423
424static struct ir_raw_handler mce_kbd_handler = {
425 .protocols = RC_TYPE_MCE_KBD,
426 .decode = ir_mce_kbd_decode,
427 .raw_register = ir_mce_kbd_register,
428 .raw_unregister = ir_mce_kbd_unregister,
429};
430
431static int __init ir_mce_kbd_decode_init(void)
432{
433 ir_raw_handler_register(&mce_kbd_handler);
434
435 printk(KERN_INFO "IR MCE Keyboard/mouse protocol handler initialized\n");
436 return 0;
437}
438
439static void __exit ir_mce_kbd_decode_exit(void)
440{
441 ir_raw_handler_unregister(&mce_kbd_handler);
442}
443
444module_init(ir_mce_kbd_decode_init);
445module_exit(ir_mce_kbd_decode_exit);
446
447MODULE_LICENSE("GPL");
448MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
449MODULE_DESCRIPTION("MCE Keyboard/mouse IR protocol decoder");
diff --git a/drivers/media/rc/ir-raw.c b/drivers/media/rc/ir-raw.c
index 423ed45d6c55..27808bb59eba 100644
--- a/drivers/media/rc/ir-raw.c
+++ b/drivers/media/rc/ir-raw.c
@@ -355,6 +355,7 @@ static void init_decoders(struct work_struct *work)
355 load_rc6_decode(); 355 load_rc6_decode();
356 load_jvc_decode(); 356 load_jvc_decode();
357 load_sony_decode(); 357 load_sony_decode();
358 load_mce_kbd_decode();
358 load_lirc_codec(); 359 load_lirc_codec();
359 360
360 /* If needed, we may later add some init code. In this case, 361 /* If needed, we may later add some init code. In this case,
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index d20168fe4c40..682009d76cdf 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -382,7 +382,7 @@ static int ite_set_tx_duty_cycle(struct rc_dev *rcdev, u32 duty_cycle)
382/* transmit out IR pulses; what you get here is a batch of alternating 382/* transmit out IR pulses; what you get here is a batch of alternating
383 * pulse/space/pulse/space lengths that we should write out completely through 383 * pulse/space/pulse/space lengths that we should write out completely through
384 * the FIFO, blocking on a full FIFO */ 384 * the FIFO, blocking on a full FIFO */
385static int ite_tx_ir(struct rc_dev *rcdev, int *txbuf, u32 n) 385static int ite_tx_ir(struct rc_dev *rcdev, unsigned *txbuf, unsigned n)
386{ 386{
387 unsigned long flags; 387 unsigned long flags;
388 struct ite_dev *dev = rcdev->priv; 388 struct ite_dev *dev = rcdev->priv;
@@ -398,9 +398,6 @@ static int ite_tx_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
398 /* clear the array just in case */ 398 /* clear the array just in case */
399 memset(last_sent, 0, ARRAY_SIZE(last_sent)); 399 memset(last_sent, 0, ARRAY_SIZE(last_sent));
400 400
401 /* n comes in bytes; convert to ints */
402 n /= sizeof(int);
403
404 spin_lock_irqsave(&dev->lock, flags); 401 spin_lock_irqsave(&dev->lock, flags);
405 402
406 /* let everybody know we're now transmitting */ 403 /* let everybody know we're now transmitting */
diff --git a/drivers/media/rc/keymaps/rc-rc6-mce.c b/drivers/media/rc/keymaps/rc-rc6-mce.c
index 01b69bcc8666..c3907e211d39 100644
--- a/drivers/media/rc/keymaps/rc-rc6-mce.c
+++ b/drivers/media/rc/keymaps/rc-rc6-mce.c
@@ -29,7 +29,7 @@ static struct rc_map_table rc6_mce[] = {
29 29
30 { 0x800f040a, KEY_DELETE }, 30 { 0x800f040a, KEY_DELETE },
31 { 0x800f040b, KEY_ENTER }, 31 { 0x800f040b, KEY_ENTER },
32 { 0x800f040c, KEY_POWER }, /* PC Power */ 32 { 0x800f040c, KEY_SLEEP }, /* Formerly PC Power */
33 { 0x800f040d, KEY_MEDIA }, /* Windows MCE button */ 33 { 0x800f040d, KEY_MEDIA }, /* Windows MCE button */
34 { 0x800f040e, KEY_MUTE }, 34 { 0x800f040e, KEY_MUTE },
35 { 0x800f040f, KEY_INFO }, 35 { 0x800f040f, KEY_INFO },
@@ -44,7 +44,6 @@ static struct rc_map_table rc6_mce[] = {
44 { 0x800f0416, KEY_PLAY }, 44 { 0x800f0416, KEY_PLAY },
45 { 0x800f0417, KEY_RECORD }, 45 { 0x800f0417, KEY_RECORD },
46 { 0x800f0418, KEY_PAUSE }, 46 { 0x800f0418, KEY_PAUSE },
47 { 0x800f046e, KEY_PLAYPAUSE },
48 { 0x800f0419, KEY_STOP }, 47 { 0x800f0419, KEY_STOP },
49 { 0x800f041a, KEY_NEXT }, 48 { 0x800f041a, KEY_NEXT },
50 { 0x800f041b, KEY_PREVIOUS }, 49 { 0x800f041b, KEY_PREVIOUS },
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index ec972dc25790..85ff9a1ffb39 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -692,20 +692,18 @@ static void mce_flush_rx_buffer(struct mceusb_dev *ir, int size)
692} 692}
693 693
694/* Send data out the IR blaster port(s) */ 694/* Send data out the IR blaster port(s) */
695static int mceusb_tx_ir(struct rc_dev *dev, int *txbuf, u32 n) 695static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
696{ 696{
697 struct mceusb_dev *ir = dev->priv; 697 struct mceusb_dev *ir = dev->priv;
698 int i, ret = 0; 698 int i, ret = 0;
699 int count, cmdcount = 0; 699 int cmdcount = 0;
700 unsigned char *cmdbuf; /* MCE command buffer */ 700 unsigned char *cmdbuf; /* MCE command buffer */
701 long signal_duration = 0; /* Singnal length in us */ 701 long signal_duration = 0; /* Singnal length in us */
702 struct timeval start_time, end_time; 702 struct timeval start_time, end_time;
703 703
704 do_gettimeofday(&start_time); 704 do_gettimeofday(&start_time);
705 705
706 count = n / sizeof(int); 706 cmdbuf = kzalloc(sizeof(unsigned) * MCE_CMDBUF_SIZE, GFP_KERNEL);
707
708 cmdbuf = kzalloc(sizeof(int) * MCE_CMDBUF_SIZE, GFP_KERNEL);
709 if (!cmdbuf) 707 if (!cmdbuf)
710 return -ENOMEM; 708 return -ENOMEM;
711 709
@@ -774,7 +772,7 @@ static int mceusb_tx_ir(struct rc_dev *dev, int *txbuf, u32 n)
774 772
775out: 773out:
776 kfree(cmdbuf); 774 kfree(cmdbuf);
777 return ret ? ret : n; 775 return ret ? ret : count;
778} 776}
779 777
780/* Sets active IR outputs -- mce devices typically have two */ 778/* Sets active IR outputs -- mce devices typically have two */
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index ce595f9ab4c7..eae05b500476 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -546,24 +546,18 @@ static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
546 * number may larger than TXFCONT (0xff). So in interrupt_handler, it has to 546 * number may larger than TXFCONT (0xff). So in interrupt_handler, it has to
547 * set TXFCONT as 0xff, until buf_count less than 0xff. 547 * set TXFCONT as 0xff, until buf_count less than 0xff.
548 */ 548 */
549static int nvt_tx_ir(struct rc_dev *dev, int *txbuf, u32 n) 549static int nvt_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned n)
550{ 550{
551 struct nvt_dev *nvt = dev->priv; 551 struct nvt_dev *nvt = dev->priv;
552 unsigned long flags; 552 unsigned long flags;
553 size_t cur_count;
554 unsigned int i; 553 unsigned int i;
555 u8 iren; 554 u8 iren;
556 int ret; 555 int ret;
557 556
558 spin_lock_irqsave(&nvt->tx.lock, flags); 557 spin_lock_irqsave(&nvt->tx.lock, flags);
559 558
560 if (n >= TX_BUF_LEN) { 559 ret = min((unsigned)(TX_BUF_LEN / sizeof(unsigned)), n);
561 nvt->tx.buf_count = cur_count = TX_BUF_LEN; 560 nvt->tx.buf_count = (ret * sizeof(unsigned));
562 ret = TX_BUF_LEN;
563 } else {
564 nvt->tx.buf_count = cur_count = n;
565 ret = n;
566 }
567 561
568 memcpy(nvt->tx.buf, txbuf, nvt->tx.buf_count); 562 memcpy(nvt->tx.buf, txbuf, nvt->tx.buf_count);
569 563
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 873b38789751..04c2c722b6ec 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -84,6 +84,17 @@ struct ir_raw_event_ctrl {
84 unsigned count; 84 unsigned count;
85 unsigned wanted_bits; 85 unsigned wanted_bits;
86 } rc5_sz; 86 } rc5_sz;
87 struct mce_kbd_dec {
88 struct input_dev *idev;
89 struct timer_list rx_timeout;
90 char name[64];
91 char phys[64];
92 int state;
93 u8 header;
94 u32 body;
95 unsigned count;
96 unsigned wanted_bits;
97 } mce_kbd;
87 struct lirc_codec { 98 struct lirc_codec {
88 struct rc_dev *dev; 99 struct rc_dev *dev;
89 struct lirc_driver *drv; 100 struct lirc_driver *drv;
@@ -182,6 +193,13 @@ void ir_raw_init(void);
182#define load_sony_decode() 0 193#define load_sony_decode() 0
183#endif 194#endif
184 195
196/* from ir-mce_kbd-decoder.c */
197#ifdef CONFIG_IR_MCE_KBD_DECODER_MODULE
198#define load_mce_kbd_decode() request_module("ir-mce_kbd-decoder")
199#else
200#define load_mce_kbd_decode() 0
201#endif
202
185/* from ir-lirc-codec.c */ 203/* from ir-lirc-codec.c */
186#ifdef CONFIG_IR_LIRC_CODEC_MODULE 204#ifdef CONFIG_IR_LIRC_CODEC_MODULE
187#define load_lirc_codec() request_module("ir-lirc-codec") 205#define load_lirc_codec() request_module("ir-lirc-codec")
diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
index cc846b2619cf..efc6a514348a 100644
--- a/drivers/media/rc/rc-loopback.c
+++ b/drivers/media/rc/rc-loopback.c
@@ -101,21 +101,14 @@ static int loop_set_rx_carrier_range(struct rc_dev *dev, u32 min, u32 max)
101 return 0; 101 return 0;
102} 102}
103 103
104static int loop_tx_ir(struct rc_dev *dev, int *txbuf, u32 n) 104static int loop_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
105{ 105{
106 struct loopback_dev *lodev = dev->priv; 106 struct loopback_dev *lodev = dev->priv;
107 u32 rxmask; 107 u32 rxmask;
108 unsigned count;
109 unsigned total_duration = 0; 108 unsigned total_duration = 0;
110 unsigned i; 109 unsigned i;
111 DEFINE_IR_RAW_EVENT(rawir); 110 DEFINE_IR_RAW_EVENT(rawir);
112 111
113 if (n == 0 || n % sizeof(int)) {
114 dprintk("invalid tx buffer size\n");
115 return -EINVAL;
116 }
117
118 count = n / sizeof(int);
119 for (i = 0; i < count; i++) 112 for (i = 0; i < count; i++)
120 total_duration += abs(txbuf[i]); 113 total_duration += abs(txbuf[i]);
121 114
@@ -142,7 +135,7 @@ static int loop_tx_ir(struct rc_dev *dev, int *txbuf, u32 n)
142 135
143 for (i = 0; i < count; i++) { 136 for (i = 0; i < count; i++) {
144 rawir.pulse = i % 2 ? false : true; 137 rawir.pulse = i % 2 ? false : true;
145 rawir.duration = abs(txbuf[i]) * 1000; 138 rawir.duration = txbuf[i] * 1000;
146 if (rawir.duration) 139 if (rawir.duration)
147 ir_raw_event_store_with_filter(dev, &rawir); 140 ir_raw_event_store_with_filter(dev, &rawir);
148 } 141 }
@@ -158,7 +151,7 @@ out:
158 /* Lirc expects this function to take as long as the total duration */ 151 /* Lirc expects this function to take as long as the total duration */
159 set_current_state(TASK_INTERRUPTIBLE); 152 set_current_state(TASK_INTERRUPTIBLE);
160 schedule_timeout(usecs_to_jiffies(total_duration)); 153 schedule_timeout(usecs_to_jiffies(total_duration));
161 return n; 154 return count;
162} 155}
163 156
164static void loop_set_idle(struct rc_dev *dev, bool enable) 157static void loop_set_idle(struct rc_dev *dev, bool enable)
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 3186ac7c2c10..51a23f48bc7d 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -735,6 +735,7 @@ static struct {
735 { RC_TYPE_JVC, "jvc" }, 735 { RC_TYPE_JVC, "jvc" },
736 { RC_TYPE_SONY, "sony" }, 736 { RC_TYPE_SONY, "sony" },
737 { RC_TYPE_RC5_SZ, "rc-5-sz" }, 737 { RC_TYPE_RC5_SZ, "rc-5-sz" },
738 { RC_TYPE_MCE_KBD, "mce_kbd" },
738 { RC_TYPE_LIRC, "lirc" }, 739 { RC_TYPE_LIRC, "lirc" },
739 { RC_TYPE_OTHER, "other" }, 740 { RC_TYPE_OTHER, "other" },
740}; 741};
@@ -1099,7 +1100,6 @@ int rc_register_device(struct rc_dev *dev)
1099 if (rc < 0) 1100 if (rc < 0)
1100 goto out_input; 1101 goto out_input;
1101 } 1102 }
1102 mutex_unlock(&dev->lock);
1103 1103
1104 if (dev->change_protocol) { 1104 if (dev->change_protocol) {
1105 rc = dev->change_protocol(dev, rc_map->rc_type); 1105 rc = dev->change_protocol(dev, rc_map->rc_type);
@@ -1107,6 +1107,8 @@ int rc_register_device(struct rc_dev *dev)
1107 goto out_raw; 1107 goto out_raw;
1108 } 1108 }
1109 1109
1110 mutex_unlock(&dev->lock);
1111
1110 IR_dprintk(1, "Registered rc%ld (driver: %s, remote: %s, mode %s)\n", 1112 IR_dprintk(1, "Registered rc%ld (driver: %s, remote: %s, mode %s)\n",
1111 dev->devno, 1113 dev->devno,
1112 dev->driver_name ? dev->driver_name : "unknown", 1114 dev->driver_name ? dev->driver_name : "unknown",
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 5147767ccb78..a16604477917 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -205,6 +205,7 @@ struct redrat3_dev {
205 205
206 /* rx signal timeout timer */ 206 /* rx signal timeout timer */
207 struct timer_list rx_timeout; 207 struct timer_list rx_timeout;
208 u32 hw_timeout;
208 209
209 /* Is the device currently receiving? */ 210 /* Is the device currently receiving? */
210 bool recv_in_progress; 211 bool recv_in_progress;
@@ -414,20 +415,10 @@ static u32 redrat3_us_to_len(u32 microsec)
414 415
415} 416}
416 417
417/* timer callback to send long trailing space on receive timeout */ 418/* timer callback to send reset event */
418static void redrat3_rx_timeout(unsigned long data) 419static void redrat3_rx_timeout(unsigned long data)
419{ 420{
420 struct redrat3_dev *rr3 = (struct redrat3_dev *)data; 421 struct redrat3_dev *rr3 = (struct redrat3_dev *)data;
421 DEFINE_IR_RAW_EVENT(rawir);
422
423 rawir.pulse = false;
424 rawir.duration = rr3->rc->timeout;
425 rr3_dbg(rr3->dev, "storing trailing space with duration %d\n",
426 rawir.duration);
427 ir_raw_event_store_with_filter(rr3->rc, &rawir);
428
429 rr3_dbg(rr3->dev, "calling ir_raw_event_handle\n");
430 ir_raw_event_handle(rr3->rc);
431 422
432 rr3_dbg(rr3->dev, "calling ir_raw_event_reset\n"); 423 rr3_dbg(rr3->dev, "calling ir_raw_event_reset\n");
433 ir_raw_event_reset(rr3->rc); 424 ir_raw_event_reset(rr3->rc);
@@ -438,7 +429,7 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
438 DEFINE_IR_RAW_EVENT(rawir); 429 DEFINE_IR_RAW_EVENT(rawir);
439 struct redrat3_signal_header header; 430 struct redrat3_signal_header header;
440 struct device *dev; 431 struct device *dev;
441 int i; 432 int i, trailer = 0;
442 unsigned long delay; 433 unsigned long delay;
443 u32 mod_freq, single_len; 434 u32 mod_freq, single_len;
444 u16 *len_vals; 435 u16 *len_vals;
@@ -464,7 +455,8 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
464 if (!(header.length >= RR3_HEADER_LENGTH)) 455 if (!(header.length >= RR3_HEADER_LENGTH))
465 dev_warn(dev, "read returned less than rr3 header len\n"); 456 dev_warn(dev, "read returned less than rr3 header len\n");
466 457
467 delay = usecs_to_jiffies(rr3->rc->timeout / 1000); 458 /* Make sure we reset the IR kfifo after a bit of inactivity */
459 delay = usecs_to_jiffies(rr3->hw_timeout);
468 mod_timer(&rr3->rx_timeout, jiffies + delay); 460 mod_timer(&rr3->rx_timeout, jiffies + delay);
469 461
470 memcpy(&tmp32, sig_data + RR3_PAUSE_OFFSET, sizeof(tmp32)); 462 memcpy(&tmp32, sig_data + RR3_PAUSE_OFFSET, sizeof(tmp32));
@@ -506,9 +498,6 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
506 u16 val = len_vals[data_vals[i]]; 498 u16 val = len_vals[data_vals[i]];
507 single_len = redrat3_len_to_us((u32)be16_to_cpu(val)); 499 single_len = redrat3_len_to_us((u32)be16_to_cpu(val));
508 500
509 /* cap the value to IR_MAX_DURATION */
510 single_len &= IR_MAX_DURATION;
511
512 /* we should always get pulse/space/pulse/space samples */ 501 /* we should always get pulse/space/pulse/space samples */
513 if (i % 2) 502 if (i % 2)
514 rawir.pulse = false; 503 rawir.pulse = false;
@@ -516,6 +505,12 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
516 rawir.pulse = true; 505 rawir.pulse = true;
517 506
518 rawir.duration = US_TO_NS(single_len); 507 rawir.duration = US_TO_NS(single_len);
508 /* Save initial pulse length to fudge trailer */
509 if (i == 0)
510 trailer = rawir.duration;
511 /* cap the value to IR_MAX_DURATION */
512 rawir.duration &= IR_MAX_DURATION;
513
519 rr3_dbg(dev, "storing %s with duration %d (i: %d)\n", 514 rr3_dbg(dev, "storing %s with duration %d (i: %d)\n",
520 rawir.pulse ? "pulse" : "space", rawir.duration, i); 515 rawir.pulse ? "pulse" : "space", rawir.duration, i);
521 ir_raw_event_store_with_filter(rr3->rc, &rawir); 516 ir_raw_event_store_with_filter(rr3->rc, &rawir);
@@ -525,7 +520,10 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
525 if (i % 2) { 520 if (i % 2) {
526 rawir.pulse = false; 521 rawir.pulse = false;
527 /* this duration is made up, and may not be ideal... */ 522 /* this duration is made up, and may not be ideal... */
528 rawir.duration = rr3->rc->timeout / 2; 523 if (trailer < US_TO_NS(1000))
524 rawir.duration = US_TO_NS(2800);
525 else
526 rawir.duration = trailer;
529 rr3_dbg(dev, "storing trailing space with duration %d\n", 527 rr3_dbg(dev, "storing trailing space with duration %d\n",
530 rawir.duration); 528 rawir.duration);
531 ir_raw_event_store_with_filter(rr3->rc, &rawir); 529 ir_raw_event_store_with_filter(rr3->rc, &rawir);
@@ -629,36 +627,31 @@ static inline void redrat3_delete(struct redrat3_dev *rr3,
629 kfree(rr3); 627 kfree(rr3);
630} 628}
631 629
632static u32 redrat3_get_timeout(struct device *dev, 630static u32 redrat3_get_timeout(struct redrat3_dev *rr3)
633 struct rc_dev *rc, struct usb_device *udev)
634{ 631{
635 u32 *tmp; 632 u32 *tmp;
636 u32 timeout = MS_TO_NS(150); /* a sane default, if things go haywire */ 633 u32 timeout = MS_TO_US(150); /* a sane default, if things go haywire */
637 int len, ret, pipe; 634 int len, ret, pipe;
638 635
639 len = sizeof(*tmp); 636 len = sizeof(*tmp);
640 tmp = kzalloc(len, GFP_KERNEL); 637 tmp = kzalloc(len, GFP_KERNEL);
641 if (!tmp) { 638 if (!tmp) {
642 dev_warn(dev, "Memory allocation faillure\n"); 639 dev_warn(rr3->dev, "Memory allocation faillure\n");
643 return timeout; 640 return timeout;
644 } 641 }
645 642
646 pipe = usb_rcvctrlpipe(udev, 0); 643 pipe = usb_rcvctrlpipe(rr3->udev, 0);
647 ret = usb_control_msg(udev, pipe, RR3_GET_IR_PARAM, 644 ret = usb_control_msg(rr3->udev, pipe, RR3_GET_IR_PARAM,
648 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 645 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
649 RR3_IR_IO_SIG_TIMEOUT, 0, tmp, len, HZ * 5); 646 RR3_IR_IO_SIG_TIMEOUT, 0, tmp, len, HZ * 5);
650 if (ret != len) { 647 if (ret != len) {
651 dev_warn(dev, "Failed to read timeout from hardware\n"); 648 dev_warn(rr3->dev, "Failed to read timeout from hardware\n");
652 return timeout; 649 return timeout;
653 } 650 }
654 651
655 timeout = US_TO_NS(redrat3_len_to_us(be32_to_cpu(*tmp))); 652 timeout = redrat3_len_to_us(be32_to_cpu(*tmp));
656 if (timeout < rc->min_timeout)
657 timeout = rc->min_timeout;
658 else if (timeout > rc->max_timeout)
659 timeout = rc->max_timeout;
660 653
661 rr3_dbg(dev, "Got timeout of %d ms\n", timeout / (1000 * 1000)); 654 rr3_dbg(rr3->dev, "Got timeout of %d ms\n", timeout / 1000);
662 return timeout; 655 return timeout;
663} 656}
664 657
@@ -1110,9 +1103,7 @@ static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3)
1110 rc->priv = rr3; 1103 rc->priv = rr3;
1111 rc->driver_type = RC_DRIVER_IR_RAW; 1104 rc->driver_type = RC_DRIVER_IR_RAW;
1112 rc->allowed_protos = RC_TYPE_ALL; 1105 rc->allowed_protos = RC_TYPE_ALL;
1113 rc->min_timeout = MS_TO_NS(RR3_RX_MIN_TIMEOUT); 1106 rc->timeout = US_TO_NS(2750);
1114 rc->max_timeout = MS_TO_NS(RR3_RX_MAX_TIMEOUT);
1115 rc->timeout = redrat3_get_timeout(dev, rc, rr3->udev);
1116 rc->tx_ir = redrat3_transmit_ir; 1107 rc->tx_ir = redrat3_transmit_ir;
1117 rc->s_tx_carrier = redrat3_set_tx_carrier; 1108 rc->s_tx_carrier = redrat3_set_tx_carrier;
1118 rc->driver_name = DRIVER_NAME; 1109 rc->driver_name = DRIVER_NAME;
@@ -1186,7 +1177,7 @@ static int __devinit redrat3_dev_probe(struct usb_interface *intf,
1186 rr3 = kzalloc(sizeof(*rr3), GFP_KERNEL); 1177 rr3 = kzalloc(sizeof(*rr3), GFP_KERNEL);
1187 if (rr3 == NULL) { 1178 if (rr3 == NULL) {
1188 dev_err(dev, "Memory allocation failure\n"); 1179 dev_err(dev, "Memory allocation failure\n");
1189 goto error; 1180 goto no_endpoints;
1190 } 1181 }
1191 1182
1192 rr3->dev = &intf->dev; 1183 rr3->dev = &intf->dev;
@@ -1242,6 +1233,9 @@ static int __devinit redrat3_dev_probe(struct usb_interface *intf,
1242 if (retval < 0) 1233 if (retval < 0)
1243 goto error; 1234 goto error;
1244 1235
1236 /* store current hardware timeout, in us, will use for kfifo resets */
1237 rr3->hw_timeout = redrat3_get_timeout(rr3);
1238
1245 /* default.. will get overridden by any sends with a freq defined */ 1239 /* default.. will get overridden by any sends with a freq defined */
1246 rr3->carrier = 38000; 1240 rr3->carrier = 38000;
1247 1241
@@ -1280,6 +1274,7 @@ static void __devexit redrat3_dev_disconnect(struct usb_interface *intf)
1280 1274
1281 usb_set_intfdata(intf, NULL); 1275 usb_set_intfdata(intf, NULL);
1282 rc_unregister_device(rr3->rc); 1276 rc_unregister_device(rr3->rc);
1277 del_timer_sync(&rr3->rx_timeout);
1283 redrat3_delete(rr3, udev); 1278 redrat3_delete(rr3, udev);
1284 1279
1285 rr3_ftr(&intf->dev, "RedRat3 IR Transceiver now disconnected\n"); 1280 rr3_ftr(&intf->dev, "RedRat3 IR Transceiver now disconnected\n");
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 5d06b899e859..bec8abc965f7 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -6,8 +6,8 @@
6 * could probably support others (Winbond WEC102X, NatSemi, etc) 6 * could probably support others (Winbond WEC102X, NatSemi, etc)
7 * with minor modifications. 7 * with minor modifications.
8 * 8 *
9 * Original Author: David Härdeman <david@hardeman.nu> 9 * Original Author: David Härdeman <david@hardeman.nu>
10 * Copyright (C) 2009 - 2010 David Härdeman <david@hardeman.nu> 10 * Copyright (C) 2009 - 2011 David Härdeman <david@hardeman.nu>
11 * 11 *
12 * Dedicated to my daughter Matilda, without whose loving attention this 12 * Dedicated to my daughter Matilda, without whose loving attention this
13 * driver would have been finished in half the time and with a fraction 13 * driver would have been finished in half the time and with a fraction
@@ -577,16 +577,12 @@ wbcir_txmask(struct rc_dev *dev, u32 mask)
577} 577}
578 578
579static int 579static int
580wbcir_tx(struct rc_dev *dev, int *buf, u32 bufsize) 580wbcir_tx(struct rc_dev *dev, unsigned *buf, unsigned count)
581{ 581{
582 struct wbcir_data *data = dev->priv; 582 struct wbcir_data *data = dev->priv;
583 u32 count;
584 unsigned i; 583 unsigned i;
585 unsigned long flags; 584 unsigned long flags;
586 585
587 /* bufsize has been sanity checked by the caller */
588 count = bufsize / sizeof(int);
589
590 /* Not sure if this is possible, but better safe than sorry */ 586 /* Not sure if this is possible, but better safe than sorry */
591 spin_lock_irqsave(&data->spinlock, flags); 587 spin_lock_irqsave(&data->spinlock, flags);
592 if (data->txstate != WBCIR_TXSTATE_INACTIVE) { 588 if (data->txstate != WBCIR_TXSTATE_INACTIVE) {
@@ -876,18 +872,8 @@ wbcir_init_hw(struct wbcir_data *data)
876 /* prescaler 1.0, tx/rx fifo lvl 16 */ 872 /* prescaler 1.0, tx/rx fifo lvl 16 */
877 outb(0x30, data->sbase + WBCIR_REG_SP3_EXCR2); 873 outb(0x30, data->sbase + WBCIR_REG_SP3_EXCR2);
878 874
879 /* Set baud divisor to generate one byte per bit/cell */ 875 /* Set baud divisor to sample every 10 us */
880 switch (protocol) { 876 outb(0x0F, data->sbase + WBCIR_REG_SP3_BGDL);
881 case IR_PROTOCOL_RC5:
882 outb(0xA7, data->sbase + WBCIR_REG_SP3_BGDL);
883 break;
884 case IR_PROTOCOL_RC6:
885 outb(0x53, data->sbase + WBCIR_REG_SP3_BGDL);
886 break;
887 case IR_PROTOCOL_NEC:
888 outb(0x69, data->sbase + WBCIR_REG_SP3_BGDL);
889 break;
890 }
891 outb(0x00, data->sbase + WBCIR_REG_SP3_BGDH); 877 outb(0x00, data->sbase + WBCIR_REG_SP3_BGDH);
892 878
893 /* Set CEIR mode */ 879 /* Set CEIR mode */
@@ -896,9 +882,9 @@ wbcir_init_hw(struct wbcir_data *data)
896 inb(data->sbase + WBCIR_REG_SP3_LSR); /* Clear LSR */ 882 inb(data->sbase + WBCIR_REG_SP3_LSR); /* Clear LSR */
897 inb(data->sbase + WBCIR_REG_SP3_MSR); /* Clear MSR */ 883 inb(data->sbase + WBCIR_REG_SP3_MSR); /* Clear MSR */
898 884
899 /* Disable RX demod, run-length encoding/decoding, set freq span */ 885 /* Disable RX demod, enable run-length enc/dec, set freq span */
900 wbcir_select_bank(data, WBCIR_BANK_7); 886 wbcir_select_bank(data, WBCIR_BANK_7);
901 outb(0x10, data->sbase + WBCIR_REG_SP3_RCCFG); 887 outb(0x90, data->sbase + WBCIR_REG_SP3_RCCFG);
902 888
903 /* Disable timer */ 889 /* Disable timer */
904 wbcir_select_bank(data, WBCIR_BANK_4); 890 wbcir_select_bank(data, WBCIR_BANK_4);
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index bb53de7fe408..f574dc012cad 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -489,6 +489,15 @@ config VIDEO_TCM825X
489 This is a driver for the Toshiba TCM825x VGA camera sensor. 489 This is a driver for the Toshiba TCM825x VGA camera sensor.
490 It is used for example in Nokia N800. 490 It is used for example in Nokia N800.
491 491
492comment "Flash devices"
493
494config VIDEO_ADP1653
495 tristate "ADP1653 flash support"
496 depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
497 ---help---
498 This is a driver for the ADP1653 flash controller. It is used for
499 example in Nokia N900.
500
492comment "Video improvement chips" 501comment "Video improvement chips"
493 502
494config VIDEO_UPD64031A 503config VIDEO_UPD64031A
@@ -707,6 +716,8 @@ source "drivers/media/video/cx18/Kconfig"
707 716
708source "drivers/media/video/saa7164/Kconfig" 717source "drivers/media/video/saa7164/Kconfig"
709 718
719source "drivers/media/video/marvell-ccic/Kconfig"
720
710config VIDEO_M32R_AR 721config VIDEO_M32R_AR
711 tristate "AR devices" 722 tristate "AR devices"
712 depends on M32R && VIDEO_V4L2 723 depends on M32R && VIDEO_V4L2
@@ -726,15 +737,6 @@ config VIDEO_M32R_AR_M64278
726 To compile this driver as a module, choose M here: the 737 To compile this driver as a module, choose M here: the
727 module will be called arv. 738 module will be called arv.
728 739
729config VIDEO_CAFE_CCIC
730 tristate "Marvell 88ALP01 (Cafe) CMOS Camera Controller support"
731 depends on PCI && I2C && VIDEO_V4L2
732 select VIDEO_OV7670
733 ---help---
734 This is a video4linux2 driver for the Marvell 88ALP01 integrated
735 CMOS camera controller. This is the controller found on first-
736 generation OLPC systems.
737
738config VIDEO_SR030PC30 740config VIDEO_SR030PC30
739 tristate "SR030PC30 VGA camera sensor support" 741 tristate "SR030PC30 VGA camera sensor support"
740 depends on I2C && VIDEO_V4L2 742 depends on I2C && VIDEO_V4L2
@@ -846,6 +848,12 @@ config SOC_CAMERA_OV2640
846 help 848 help
847 This is a ov2640 camera driver 849 This is a ov2640 camera driver
848 850
851config SOC_CAMERA_OV5642
852 tristate "ov5642 camera support"
853 depends on SOC_CAMERA && I2C
854 help
855 This is a V4L2 camera driver for the OmniVision OV5642 sensor
856
849config SOC_CAMERA_OV6650 857config SOC_CAMERA_OV6650
850 tristate "ov6650 sensor support" 858 tristate "ov6650 sensor support"
851 depends on SOC_CAMERA && I2C 859 depends on SOC_CAMERA && I2C
@@ -952,6 +960,14 @@ config VIDEO_SAMSUNG_S5P_FIMC
952 To compile this driver as a module, choose M here: the 960 To compile this driver as a module, choose M here: the
953 module will be called s5p-fimc. 961 module will be called s5p-fimc.
954 962
963config VIDEO_ATMEL_ISI
964 tristate "ATMEL Image Sensor Interface (ISI) support"
965 depends on VIDEO_DEV && SOC_CAMERA && ARCH_AT91
966 select VIDEOBUF2_DMA_CONTIG
967 ---help---
968 This module makes the ATMEL Image Sensor Interface available
969 as a v4l2 device.
970
955config VIDEO_S5P_MIPI_CSIS 971config VIDEO_S5P_MIPI_CSIS
956 tristate "Samsung S5P and EXYNOS4 MIPI CSI receiver driver" 972 tristate "Samsung S5P and EXYNOS4 MIPI CSI receiver driver"
957 depends on VIDEO_V4L2 && PM_RUNTIME && PLAT_S5P && VIDEO_V4L2_SUBDEV_API 973 depends on VIDEO_V4L2 && PM_RUNTIME && PLAT_S5P && VIDEO_V4L2_SUBDEV_API
@@ -961,6 +977,8 @@ config VIDEO_S5P_MIPI_CSIS
961 To compile this driver as a module, choose M here: the 977 To compile this driver as a module, choose M here: the
962 module will be called s5p-csis. 978 module will be called s5p-csis.
963 979
980source "drivers/media/video/s5p-tv/Kconfig"
981
964# 982#
965# USB Multimedia device configuration 983# USB Multimedia device configuration
966# 984#
@@ -1056,4 +1074,12 @@ config VIDEO_MEM2MEM_TESTDEV
1056 framework. 1074 framework.
1057 1075
1058 1076
1077config VIDEO_SAMSUNG_S5P_MFC
1078 tristate "Samsung S5P MFC 5.1 Video Codec"
1079 depends on VIDEO_DEV && VIDEO_V4L2 && PLAT_S5P
1080 select VIDEOBUF2_DMA_CONTIG
1081 default n
1082 help
1083 MFC 5.1 driver for V4L2.
1084
1059endif # V4L_MEM2MEM_DRIVERS 1085endif # V4L_MEM2MEM_DRIVERS
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index f0fecd6f6a33..272390072aef 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -70,6 +70,7 @@ obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o
70obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o 70obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o
71obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o 71obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o
72obj-$(CONFIG_VIDEO_M5MOLS) += m5mols/ 72obj-$(CONFIG_VIDEO_M5MOLS) += m5mols/
73obj-$(CONFIG_VIDEO_ADP1653) += adp1653.o
73 74
74obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074.o 75obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074.o
75obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o 76obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
@@ -78,6 +79,7 @@ obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031.o
78obj-$(CONFIG_SOC_CAMERA_MT9T112) += mt9t112.o 79obj-$(CONFIG_SOC_CAMERA_MT9T112) += mt9t112.o
79obj-$(CONFIG_SOC_CAMERA_MT9V022) += mt9v022.o 80obj-$(CONFIG_SOC_CAMERA_MT9V022) += mt9v022.o
80obj-$(CONFIG_SOC_CAMERA_OV2640) += ov2640.o 81obj-$(CONFIG_SOC_CAMERA_OV2640) += ov2640.o
82obj-$(CONFIG_SOC_CAMERA_OV5642) += ov5642.o
81obj-$(CONFIG_SOC_CAMERA_OV6650) += ov6650.o 83obj-$(CONFIG_SOC_CAMERA_OV6650) += ov6650.o
82obj-$(CONFIG_SOC_CAMERA_OV772X) += ov772x.o 84obj-$(CONFIG_SOC_CAMERA_OV772X) += ov772x.o
83obj-$(CONFIG_SOC_CAMERA_OV9640) += ov9640.o 85obj-$(CONFIG_SOC_CAMERA_OV9640) += ov9640.o
@@ -127,7 +129,8 @@ obj-$(CONFIG_VIDEO_M32R_AR_M64278) += arv.o
127 129
128obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o 130obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
129 131
130obj-$(CONFIG_VIDEO_CAFE_CCIC) += cafe_ccic.o 132obj-$(CONFIG_VIDEO_CAFE_CCIC) += marvell-ccic/
133obj-$(CONFIG_VIDEO_MMP_CAMERA) += marvell-ccic/
131 134
132obj-$(CONFIG_VIDEO_VIA_CAMERA) += via-camera.o 135obj-$(CONFIG_VIDEO_VIA_CAMERA) += via-camera.o
133 136
@@ -166,8 +169,11 @@ obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
166obj-$(CONFIG_VIDEO_SH_MOBILE_CSI2) += sh_mobile_csi2.o 169obj-$(CONFIG_VIDEO_SH_MOBILE_CSI2) += sh_mobile_csi2.o
167obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o 170obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o
168obj-$(CONFIG_VIDEO_OMAP1) += omap1_camera.o 171obj-$(CONFIG_VIDEO_OMAP1) += omap1_camera.o
172obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel-isi.o
169 173
170obj-$(CONFIG_VIDEO_SAMSUNG_S5P_FIMC) += s5p-fimc/ 174obj-$(CONFIG_VIDEO_SAMSUNG_S5P_FIMC) += s5p-fimc/
175obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc/
176obj-$(CONFIG_VIDEO_SAMSUNG_S5P_TV) += s5p-tv/
171 177
172obj-$(CONFIG_ARCH_DAVINCI) += davinci/ 178obj-$(CONFIG_ARCH_DAVINCI) += davinci/
173 179
diff --git a/drivers/media/video/adp1653.c b/drivers/media/video/adp1653.c
new file mode 100644
index 000000000000..be7befd60947
--- /dev/null
+++ b/drivers/media/video/adp1653.c
@@ -0,0 +1,491 @@
1/*
2 * drivers/media/video/adp1653.c
3 *
4 * Copyright (C) 2008--2011 Nokia Corporation
5 *
6 * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
7 *
8 * Contributors:
9 * Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
10 * Tuukka Toivonen <tuukkat76@gmail.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * version 2 as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 * 02110-1301 USA
25 *
26 * TODO:
27 * - fault interrupt handling
28 * - hardware strobe
29 * - power doesn't need to be ON if all lights are off
30 *
31 */
32
33#include <linux/delay.h>
34#include <linux/i2c.h>
35#include <linux/slab.h>
36#include <linux/version.h>
37#include <media/adp1653.h>
38#include <media/v4l2-device.h>
39
40#define TIMEOUT_MAX 820000
41#define TIMEOUT_STEP 54600
42#define TIMEOUT_MIN (TIMEOUT_MAX - ADP1653_REG_CONFIG_TMR_SET_MAX \
43 * TIMEOUT_STEP)
44#define TIMEOUT_US_TO_CODE(t) ((TIMEOUT_MAX + (TIMEOUT_STEP / 2) - (t)) \
45 / TIMEOUT_STEP)
46#define TIMEOUT_CODE_TO_US(c) (TIMEOUT_MAX - (c) * TIMEOUT_STEP)
47
48/* Write values into ADP1653 registers. */
49static int adp1653_update_hw(struct adp1653_flash *flash)
50{
51 struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
52 u8 out_sel;
53 u8 config = 0;
54 int rval;
55
56 out_sel = ADP1653_INDICATOR_INTENSITY_uA_TO_REG(
57 flash->indicator_intensity->val)
58 << ADP1653_REG_OUT_SEL_ILED_SHIFT;
59
60 switch (flash->led_mode->val) {
61 case V4L2_FLASH_LED_MODE_NONE:
62 break;
63 case V4L2_FLASH_LED_MODE_FLASH:
64 /* Flash mode, light on with strobe, duration from timer */
65 config = ADP1653_REG_CONFIG_TMR_CFG;
66 config |= TIMEOUT_US_TO_CODE(flash->flash_timeout->val)
67 << ADP1653_REG_CONFIG_TMR_SET_SHIFT;
68 break;
69 case V4L2_FLASH_LED_MODE_TORCH:
70 /* Torch mode, light immediately on, duration indefinite */
71 out_sel |= ADP1653_FLASH_INTENSITY_mA_TO_REG(
72 flash->torch_intensity->val)
73 << ADP1653_REG_OUT_SEL_HPLED_SHIFT;
74 break;
75 }
76
77 rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, out_sel);
78 if (rval < 0)
79 return rval;
80
81 rval = i2c_smbus_write_byte_data(client, ADP1653_REG_CONFIG, config);
82 if (rval < 0)
83 return rval;
84
85 return 0;
86}
87
88static int adp1653_get_fault(struct adp1653_flash *flash)
89{
90 struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
91 int fault;
92 int rval;
93
94 fault = i2c_smbus_read_byte_data(client, ADP1653_REG_FAULT);
95 if (IS_ERR_VALUE(fault))
96 return fault;
97
98 flash->fault |= fault;
99
100 if (!flash->fault)
101 return 0;
102
103 /* Clear faults. */
104 rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, 0);
105 if (IS_ERR_VALUE(rval))
106 return rval;
107
108 flash->led_mode->val = V4L2_FLASH_LED_MODE_NONE;
109
110 rval = adp1653_update_hw(flash);
111 if (IS_ERR_VALUE(rval))
112 return rval;
113
114 return flash->fault;
115}
116
117static int adp1653_strobe(struct adp1653_flash *flash, int enable)
118{
119 struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
120 u8 out_sel = ADP1653_INDICATOR_INTENSITY_uA_TO_REG(
121 flash->indicator_intensity->val)
122 << ADP1653_REG_OUT_SEL_ILED_SHIFT;
123 int rval;
124
125 if (flash->led_mode->val != V4L2_FLASH_LED_MODE_FLASH)
126 return -EBUSY;
127
128 if (!enable)
129 return i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL,
130 out_sel);
131
132 out_sel |= ADP1653_FLASH_INTENSITY_mA_TO_REG(
133 flash->flash_intensity->val)
134 << ADP1653_REG_OUT_SEL_HPLED_SHIFT;
135 rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, out_sel);
136 if (rval)
137 return rval;
138
139 /* Software strobe using i2c */
140 rval = i2c_smbus_write_byte_data(client, ADP1653_REG_SW_STROBE,
141 ADP1653_REG_SW_STROBE_SW_STROBE);
142 if (rval)
143 return rval;
144 return i2c_smbus_write_byte_data(client, ADP1653_REG_SW_STROBE, 0);
145}
146
147/* --------------------------------------------------------------------------
148 * V4L2 controls
149 */
150
151static int adp1653_get_ctrl(struct v4l2_ctrl *ctrl)
152{
153 struct adp1653_flash *flash =
154 container_of(ctrl->handler, struct adp1653_flash, ctrls);
155 int rval;
156
157 rval = adp1653_get_fault(flash);
158 if (IS_ERR_VALUE(rval))
159 return rval;
160
161 ctrl->cur.val = 0;
162
163 if (flash->fault & ADP1653_REG_FAULT_FLT_SCP)
164 ctrl->cur.val |= V4L2_FLASH_FAULT_SHORT_CIRCUIT;
165 if (flash->fault & ADP1653_REG_FAULT_FLT_OT)
166 ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_TEMPERATURE;
167 if (flash->fault & ADP1653_REG_FAULT_FLT_TMR)
168 ctrl->cur.val |= V4L2_FLASH_FAULT_TIMEOUT;
169 if (flash->fault & ADP1653_REG_FAULT_FLT_OV)
170 ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_VOLTAGE;
171
172 flash->fault = 0;
173
174 return 0;
175}
176
177static int adp1653_set_ctrl(struct v4l2_ctrl *ctrl)
178{
179 struct adp1653_flash *flash =
180 container_of(ctrl->handler, struct adp1653_flash, ctrls);
181 int rval;
182
183 rval = adp1653_get_fault(flash);
184 if (IS_ERR_VALUE(rval))
185 return rval;
186 if ((rval & (ADP1653_REG_FAULT_FLT_SCP |
187 ADP1653_REG_FAULT_FLT_OT |
188 ADP1653_REG_FAULT_FLT_OV)) &&
189 (ctrl->id == V4L2_CID_FLASH_STROBE ||
190 ctrl->id == V4L2_CID_FLASH_TORCH_INTENSITY ||
191 ctrl->id == V4L2_CID_FLASH_LED_MODE))
192 return -EBUSY;
193
194 switch (ctrl->id) {
195 case V4L2_CID_FLASH_STROBE:
196 return adp1653_strobe(flash, 1);
197 case V4L2_CID_FLASH_STROBE_STOP:
198 return adp1653_strobe(flash, 0);
199 }
200
201 return adp1653_update_hw(flash);
202}
203
204static const struct v4l2_ctrl_ops adp1653_ctrl_ops = {
205 .g_volatile_ctrl = adp1653_get_ctrl,
206 .s_ctrl = adp1653_set_ctrl,
207};
208
209static int adp1653_init_controls(struct adp1653_flash *flash)
210{
211 struct v4l2_ctrl *fault;
212
213 v4l2_ctrl_handler_init(&flash->ctrls, 9);
214
215 flash->led_mode =
216 v4l2_ctrl_new_std_menu(&flash->ctrls, &adp1653_ctrl_ops,
217 V4L2_CID_FLASH_LED_MODE,
218 V4L2_FLASH_LED_MODE_TORCH, ~0x7, 0);
219 v4l2_ctrl_new_std_menu(&flash->ctrls, &adp1653_ctrl_ops,
220 V4L2_CID_FLASH_STROBE_SOURCE,
221 V4L2_FLASH_STROBE_SOURCE_SOFTWARE, ~0x1, 0);
222 v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops,
223 V4L2_CID_FLASH_STROBE, 0, 0, 0, 0);
224 v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops,
225 V4L2_CID_FLASH_STROBE_STOP, 0, 0, 0, 0);
226 flash->flash_timeout =
227 v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops,
228 V4L2_CID_FLASH_TIMEOUT, TIMEOUT_MIN,
229 flash->platform_data->max_flash_timeout,
230 TIMEOUT_STEP,
231 flash->platform_data->max_flash_timeout);
232 flash->flash_intensity =
233 v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops,
234 V4L2_CID_FLASH_INTENSITY,
235 ADP1653_FLASH_INTENSITY_MIN,
236 flash->platform_data->max_flash_intensity,
237 1, flash->platform_data->max_flash_intensity);
238 flash->torch_intensity =
239 v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops,
240 V4L2_CID_FLASH_TORCH_INTENSITY,
241 ADP1653_TORCH_INTENSITY_MIN,
242 flash->platform_data->max_torch_intensity,
243 ADP1653_FLASH_INTENSITY_STEP,
244 flash->platform_data->max_torch_intensity);
245 flash->indicator_intensity =
246 v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops,
247 V4L2_CID_FLASH_INDICATOR_INTENSITY,
248 ADP1653_INDICATOR_INTENSITY_MIN,
249 flash->platform_data->max_indicator_intensity,
250 ADP1653_INDICATOR_INTENSITY_STEP,
251 ADP1653_INDICATOR_INTENSITY_MIN);
252 fault = v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops,
253 V4L2_CID_FLASH_FAULT, 0,
254 V4L2_FLASH_FAULT_OVER_VOLTAGE
255 | V4L2_FLASH_FAULT_OVER_TEMPERATURE
256 | V4L2_FLASH_FAULT_SHORT_CIRCUIT, 0, 0);
257
258 if (flash->ctrls.error)
259 return flash->ctrls.error;
260
261 fault->is_volatile = 1;
262
263 flash->subdev.ctrl_handler = &flash->ctrls;
264 return 0;
265}
266
267/* --------------------------------------------------------------------------
268 * V4L2 subdev operations
269 */
270
271static int
272adp1653_init_device(struct adp1653_flash *flash)
273{
274 struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
275 int rval;
276
277 /* Clear FAULT register by writing zero to OUT_SEL */
278 rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, 0);
279 if (rval < 0) {
280 dev_err(&client->dev, "failed writing fault register\n");
281 return -EIO;
282 }
283
284 mutex_lock(&flash->ctrls.lock);
285 /* Reset faults before reading new ones. */
286 flash->fault = 0;
287 rval = adp1653_get_fault(flash);
288 mutex_unlock(&flash->ctrls.lock);
289 if (rval > 0) {
290 dev_err(&client->dev, "faults detected: 0x%1.1x\n", rval);
291 return -EIO;
292 }
293
294 mutex_lock(&flash->ctrls.lock);
295 rval = adp1653_update_hw(flash);
296 mutex_unlock(&flash->ctrls.lock);
297 if (rval) {
298 dev_err(&client->dev,
299 "adp1653_update_hw failed at %s\n", __func__);
300 return -EIO;
301 }
302
303 return 0;
304}
305
306static int
307__adp1653_set_power(struct adp1653_flash *flash, int on)
308{
309 int ret;
310
311 ret = flash->platform_data->power(&flash->subdev, on);
312 if (ret < 0)
313 return ret;
314
315 if (!on)
316 return 0;
317
318 ret = adp1653_init_device(flash);
319 if (ret < 0)
320 flash->platform_data->power(&flash->subdev, 0);
321
322 return ret;
323}
324
325static int
326adp1653_set_power(struct v4l2_subdev *subdev, int on)
327{
328 struct adp1653_flash *flash = to_adp1653_flash(subdev);
329 int ret = 0;
330
331 mutex_lock(&flash->power_lock);
332
333 /* If the power count is modified from 0 to != 0 or from != 0 to 0,
334 * update the power state.
335 */
336 if (flash->power_count == !on) {
337 ret = __adp1653_set_power(flash, !!on);
338 if (ret < 0)
339 goto done;
340 }
341
342 /* Update the power count. */
343 flash->power_count += on ? 1 : -1;
344 WARN_ON(flash->power_count < 0);
345
346done:
347 mutex_unlock(&flash->power_lock);
348 return ret;
349}
350
351static int adp1653_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
352{
353 return adp1653_set_power(sd, 1);
354}
355
356static int adp1653_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
357{
358 return adp1653_set_power(sd, 0);
359}
360
361static const struct v4l2_subdev_core_ops adp1653_core_ops = {
362 .s_power = adp1653_set_power,
363};
364
365static const struct v4l2_subdev_ops adp1653_ops = {
366 .core = &adp1653_core_ops,
367};
368
369static const struct v4l2_subdev_internal_ops adp1653_internal_ops = {
370 .open = adp1653_open,
371 .close = adp1653_close,
372};
373
374/* --------------------------------------------------------------------------
375 * I2C driver
376 */
377#ifdef CONFIG_PM
378
379static int adp1653_suspend(struct device *dev)
380{
381 struct i2c_client *client = to_i2c_client(dev);
382 struct v4l2_subdev *subdev = i2c_get_clientdata(client);
383 struct adp1653_flash *flash = to_adp1653_flash(subdev);
384
385 if (!flash->power_count)
386 return 0;
387
388 return __adp1653_set_power(flash, 0);
389}
390
391static int adp1653_resume(struct device *dev)
392{
393 struct i2c_client *client = to_i2c_client(dev);
394 struct v4l2_subdev *subdev = i2c_get_clientdata(client);
395 struct adp1653_flash *flash = to_adp1653_flash(subdev);
396
397 if (!flash->power_count)
398 return 0;
399
400 return __adp1653_set_power(flash, 1);
401}
402
403#else
404
405#define adp1653_suspend NULL
406#define adp1653_resume NULL
407
408#endif /* CONFIG_PM */
409
410static int adp1653_probe(struct i2c_client *client,
411 const struct i2c_device_id *devid)
412{
413 struct adp1653_flash *flash;
414 int ret;
415
416 flash = kzalloc(sizeof(*flash), GFP_KERNEL);
417 if (flash == NULL)
418 return -ENOMEM;
419
420 flash->platform_data = client->dev.platform_data;
421
422 mutex_init(&flash->power_lock);
423
424 v4l2_i2c_subdev_init(&flash->subdev, client, &adp1653_ops);
425 flash->subdev.internal_ops = &adp1653_internal_ops;
426 flash->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
427
428 adp1653_init_controls(flash);
429
430 ret = media_entity_init(&flash->subdev.entity, 0, NULL, 0);
431 if (ret < 0)
432 kfree(flash);
433
434 return ret;
435}
436
437static int __exit adp1653_remove(struct i2c_client *client)
438{
439 struct v4l2_subdev *subdev = i2c_get_clientdata(client);
440 struct adp1653_flash *flash = to_adp1653_flash(subdev);
441
442 v4l2_device_unregister_subdev(&flash->subdev);
443 v4l2_ctrl_handler_free(&flash->ctrls);
444 media_entity_cleanup(&flash->subdev.entity);
445 kfree(flash);
446 return 0;
447}
448
449static const struct i2c_device_id adp1653_id_table[] = {
450 { ADP1653_NAME, 0 },
451 { }
452};
453MODULE_DEVICE_TABLE(i2c, adp1653_id_table);
454
455static struct dev_pm_ops adp1653_pm_ops = {
456 .suspend = adp1653_suspend,
457 .resume = adp1653_resume,
458};
459
460static struct i2c_driver adp1653_i2c_driver = {
461 .driver = {
462 .name = ADP1653_NAME,
463 .pm = &adp1653_pm_ops,
464 },
465 .probe = adp1653_probe,
466 .remove = __exit_p(adp1653_remove),
467 .id_table = adp1653_id_table,
468};
469
470static int __init adp1653_init(void)
471{
472 int rval;
473
474 rval = i2c_add_driver(&adp1653_i2c_driver);
475 if (rval)
476 printk(KERN_ALERT "%s: failed at i2c_add_driver\n", __func__);
477
478 return rval;
479}
480
481static void __exit adp1653_exit(void)
482{
483 i2c_del_driver(&adp1653_i2c_driver);
484}
485
486module_init(adp1653_init);
487module_exit(adp1653_exit);
488
489MODULE_AUTHOR("Sakari Ailus <sakari.ailus@nokia.com>");
490MODULE_DESCRIPTION("Analog Devices ADP1653 LED flash driver");
491MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/arv.c b/drivers/media/video/arv.c
index f989f2820d88..b6ed44aebe30 100644
--- a/drivers/media/video/arv.c
+++ b/drivers/media/video/arv.c
@@ -27,7 +27,6 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/version.h>
31#include <linux/videodev2.h> 30#include <linux/videodev2.h>
32#include <media/v4l2-common.h> 31#include <media/v4l2-common.h>
33#include <media/v4l2-device.h> 32#include <media/v4l2-device.h>
@@ -54,7 +53,7 @@
54 */ 53 */
55#define USE_INT 0 /* Don't modify */ 54#define USE_INT 0 /* Don't modify */
56 55
57#define VERSION "0.04" 56#define VERSION "0.0.5"
58 57
59#define ar_inl(addr) inl((unsigned long)(addr)) 58#define ar_inl(addr) inl((unsigned long)(addr))
60#define ar_outl(val, addr) outl((unsigned long)(val), (unsigned long)(addr)) 59#define ar_outl(val, addr) outl((unsigned long)(val), (unsigned long)(addr))
@@ -404,7 +403,6 @@ static int ar_querycap(struct file *file, void *priv,
404 strlcpy(vcap->driver, ar->vdev.name, sizeof(vcap->driver)); 403 strlcpy(vcap->driver, ar->vdev.name, sizeof(vcap->driver));
405 strlcpy(vcap->card, "Colour AR VGA", sizeof(vcap->card)); 404 strlcpy(vcap->card, "Colour AR VGA", sizeof(vcap->card));
406 strlcpy(vcap->bus_info, "Platform", sizeof(vcap->bus_info)); 405 strlcpy(vcap->bus_info, "Platform", sizeof(vcap->bus_info));
407 vcap->version = KERNEL_VERSION(0, 0, 4);
408 vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE; 406 vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
409 return 0; 407 return 0;
410} 408}
@@ -879,3 +877,4 @@ module_exit(ar_cleanup_module);
879MODULE_AUTHOR("Takeo Takahashi <takahashi.takeo@renesas.com>"); 877MODULE_AUTHOR("Takeo Takahashi <takahashi.takeo@renesas.com>");
880MODULE_DESCRIPTION("Colour AR M64278(VGA) for Video4Linux"); 878MODULE_DESCRIPTION("Colour AR M64278(VGA) for Video4Linux");
881MODULE_LICENSE("GPL"); 879MODULE_LICENSE("GPL");
880MODULE_VERSION(VERSION);
diff --git a/drivers/media/video/atmel-isi.c b/drivers/media/video/atmel-isi.c
new file mode 100644
index 000000000000..7b89f00501b8
--- /dev/null
+++ b/drivers/media/video/atmel-isi.c
@@ -0,0 +1,1048 @@
1/*
2 * Copyright (c) 2011 Atmel Corporation
3 * Josh Wu, <josh.wu@atmel.com>
4 *
5 * Based on previous work by Lars Haring, <lars.haring@atmel.com>
6 * and Sedji Gaouaou
7 * Based on the bttv driver for Bt848 with respective copyright holders
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/clk.h>
15#include <linux/completion.h>
16#include <linux/delay.h>
17#include <linux/fs.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/platform_device.h>
23#include <linux/slab.h>
24
25#include <media/atmel-isi.h>
26#include <media/soc_camera.h>
27#include <media/soc_mediabus.h>
28#include <media/videobuf2-dma-contig.h>
29
30#define MAX_BUFFER_NUM 32
31#define MAX_SUPPORT_WIDTH 2048
32#define MAX_SUPPORT_HEIGHT 2048
33#define VID_LIMIT_BYTES (16 * 1024 * 1024)
34#define MIN_FRAME_RATE 15
35#define FRAME_INTERVAL_MILLI_SEC (1000 / MIN_FRAME_RATE)
36
37/* ISI states */
38enum {
39 ISI_STATE_IDLE = 0,
40 ISI_STATE_READY,
41 ISI_STATE_WAIT_SOF,
42};
43
44/* Frame buffer descriptor */
45struct fbd {
46 /* Physical address of the frame buffer */
47 u32 fb_address;
48 /* DMA Control Register(only in HISI2) */
49 u32 dma_ctrl;
50 /* Physical address of the next fbd */
51 u32 next_fbd_address;
52};
53
54static void set_dma_ctrl(struct fbd *fb_desc, u32 ctrl)
55{
56 fb_desc->dma_ctrl = ctrl;
57}
58
59struct isi_dma_desc {
60 struct list_head list;
61 struct fbd *p_fbd;
62 u32 fbd_phys;
63};
64
65/* Frame buffer data */
66struct frame_buffer {
67 struct vb2_buffer vb;
68 struct isi_dma_desc *p_dma_desc;
69 struct list_head list;
70};
71
72struct atmel_isi {
73 /* Protects the access of variables shared with the ISR */
74 spinlock_t lock;
75 void __iomem *regs;
76
77 int sequence;
78 /* State of the ISI module in capturing mode */
79 int state;
80
81 /* Wait queue for waiting for SOF */
82 wait_queue_head_t vsync_wq;
83
84 struct vb2_alloc_ctx *alloc_ctx;
85
86 /* Allocate descriptors for dma buffer use */
87 struct fbd *p_fb_descriptors;
88 u32 fb_descriptors_phys;
89 struct list_head dma_desc_head;
90 struct isi_dma_desc dma_desc[MAX_BUFFER_NUM];
91
92 struct completion complete;
93 struct clk *pclk;
94 unsigned int irq;
95
96 struct isi_platform_data *pdata;
97
98 struct list_head video_buffer_list;
99 struct frame_buffer *active;
100
101 struct soc_camera_device *icd;
102 struct soc_camera_host soc_host;
103};
104
105static void isi_writel(struct atmel_isi *isi, u32 reg, u32 val)
106{
107 writel(val, isi->regs + reg);
108}
109static u32 isi_readl(struct atmel_isi *isi, u32 reg)
110{
111 return readl(isi->regs + reg);
112}
113
114static int configure_geometry(struct atmel_isi *isi, u32 width,
115 u32 height, enum v4l2_mbus_pixelcode code)
116{
117 u32 cfg2, cr;
118
119 switch (code) {
120 /* YUV, including grey */
121 case V4L2_MBUS_FMT_Y8_1X8:
122 cr = ISI_CFG2_GRAYSCALE;
123 break;
124 case V4L2_MBUS_FMT_UYVY8_2X8:
125 cr = ISI_CFG2_YCC_SWAP_MODE_3;
126 break;
127 case V4L2_MBUS_FMT_VYUY8_2X8:
128 cr = ISI_CFG2_YCC_SWAP_MODE_2;
129 break;
130 case V4L2_MBUS_FMT_YUYV8_2X8:
131 cr = ISI_CFG2_YCC_SWAP_MODE_1;
132 break;
133 case V4L2_MBUS_FMT_YVYU8_2X8:
134 cr = ISI_CFG2_YCC_SWAP_DEFAULT;
135 break;
136 /* RGB, TODO */
137 default:
138 return -EINVAL;
139 }
140
141 isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
142
143 cfg2 = isi_readl(isi, ISI_CFG2);
144 cfg2 |= cr;
145 /* Set width */
146 cfg2 &= ~(ISI_CFG2_IM_HSIZE_MASK);
147 cfg2 |= ((width - 1) << ISI_CFG2_IM_HSIZE_OFFSET) &
148 ISI_CFG2_IM_HSIZE_MASK;
149 /* Set height */
150 cfg2 &= ~(ISI_CFG2_IM_VSIZE_MASK);
151 cfg2 |= ((height - 1) << ISI_CFG2_IM_VSIZE_OFFSET)
152 & ISI_CFG2_IM_VSIZE_MASK;
153 isi_writel(isi, ISI_CFG2, cfg2);
154
155 return 0;
156}
157
158static irqreturn_t atmel_isi_handle_streaming(struct atmel_isi *isi)
159{
160 if (isi->active) {
161 struct vb2_buffer *vb = &isi->active->vb;
162 struct frame_buffer *buf = isi->active;
163
164 list_del_init(&buf->list);
165 do_gettimeofday(&vb->v4l2_buf.timestamp);
166 vb->v4l2_buf.sequence = isi->sequence++;
167 vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
168 }
169
170 if (list_empty(&isi->video_buffer_list)) {
171 isi->active = NULL;
172 } else {
173 /* start next dma frame. */
174 isi->active = list_entry(isi->video_buffer_list.next,
175 struct frame_buffer, list);
176 isi_writel(isi, ISI_DMA_C_DSCR,
177 isi->active->p_dma_desc->fbd_phys);
178 isi_writel(isi, ISI_DMA_C_CTRL,
179 ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
180 isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH);
181 }
182 return IRQ_HANDLED;
183}
184
185/* ISI interrupt service routine */
186static irqreturn_t isi_interrupt(int irq, void *dev_id)
187{
188 struct atmel_isi *isi = dev_id;
189 u32 status, mask, pending;
190 irqreturn_t ret = IRQ_NONE;
191
192 spin_lock(&isi->lock);
193
194 status = isi_readl(isi, ISI_STATUS);
195 mask = isi_readl(isi, ISI_INTMASK);
196 pending = status & mask;
197
198 if (pending & ISI_CTRL_SRST) {
199 complete(&isi->complete);
200 isi_writel(isi, ISI_INTDIS, ISI_CTRL_SRST);
201 ret = IRQ_HANDLED;
202 } else if (pending & ISI_CTRL_DIS) {
203 complete(&isi->complete);
204 isi_writel(isi, ISI_INTDIS, ISI_CTRL_DIS);
205 ret = IRQ_HANDLED;
206 } else {
207 if ((pending & ISI_SR_VSYNC) &&
208 (isi->state == ISI_STATE_IDLE)) {
209 isi->state = ISI_STATE_READY;
210 wake_up_interruptible(&isi->vsync_wq);
211 ret = IRQ_HANDLED;
212 }
213 if (likely(pending & ISI_SR_CXFR_DONE))
214 ret = atmel_isi_handle_streaming(isi);
215 }
216
217 spin_unlock(&isi->lock);
218 return ret;
219}
220
221#define WAIT_ISI_RESET 1
222#define WAIT_ISI_DISABLE 0
223static int atmel_isi_wait_status(struct atmel_isi *isi, int wait_reset)
224{
225 unsigned long timeout;
226 /*
227 * The reset or disable will only succeed if we have a
228 * pixel clock from the camera.
229 */
230 init_completion(&isi->complete);
231
232 if (wait_reset) {
233 isi_writel(isi, ISI_INTEN, ISI_CTRL_SRST);
234 isi_writel(isi, ISI_CTRL, ISI_CTRL_SRST);
235 } else {
236 isi_writel(isi, ISI_INTEN, ISI_CTRL_DIS);
237 isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
238 }
239
240 timeout = wait_for_completion_timeout(&isi->complete,
241 msecs_to_jiffies(100));
242 if (timeout == 0)
243 return -ETIMEDOUT;
244
245 return 0;
246}
247
248/* ------------------------------------------------------------------
249 Videobuf operations
250 ------------------------------------------------------------------*/
251static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
252 unsigned int *nplanes, unsigned long sizes[],
253 void *alloc_ctxs[])
254{
255 struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
256 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
257 struct atmel_isi *isi = ici->priv;
258 unsigned long size;
259 int ret, bytes_per_line;
260
261 /* Reset ISI */
262 ret = atmel_isi_wait_status(isi, WAIT_ISI_RESET);
263 if (ret < 0) {
264 dev_err(icd->parent, "Reset ISI timed out\n");
265 return ret;
266 }
267 /* Disable all interrupts */
268 isi_writel(isi, ISI_INTDIS, ~0UL);
269
270 bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
271 icd->current_fmt->host_fmt);
272
273 if (bytes_per_line < 0)
274 return bytes_per_line;
275
276 size = bytes_per_line * icd->user_height;
277
278 if (!*nbuffers || *nbuffers > MAX_BUFFER_NUM)
279 *nbuffers = MAX_BUFFER_NUM;
280
281 if (size * *nbuffers > VID_LIMIT_BYTES)
282 *nbuffers = VID_LIMIT_BYTES / size;
283
284 *nplanes = 1;
285 sizes[0] = size;
286 alloc_ctxs[0] = isi->alloc_ctx;
287
288 isi->sequence = 0;
289 isi->active = NULL;
290
291 dev_dbg(icd->parent, "%s, count=%d, size=%ld\n", __func__,
292 *nbuffers, size);
293
294 return 0;
295}
296
297static int buffer_init(struct vb2_buffer *vb)
298{
299 struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
300
301 buf->p_dma_desc = NULL;
302 INIT_LIST_HEAD(&buf->list);
303
304 return 0;
305}
306
307static int buffer_prepare(struct vb2_buffer *vb)
308{
309 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
310 struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
311 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
312 struct atmel_isi *isi = ici->priv;
313 unsigned long size;
314 struct isi_dma_desc *desc;
315 int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
316 icd->current_fmt->host_fmt);
317
318 if (bytes_per_line < 0)
319 return bytes_per_line;
320
321 size = bytes_per_line * icd->user_height;
322
323 if (vb2_plane_size(vb, 0) < size) {
324 dev_err(icd->parent, "%s data will not fit into plane (%lu < %lu)\n",
325 __func__, vb2_plane_size(vb, 0), size);
326 return -EINVAL;
327 }
328
329 vb2_set_plane_payload(&buf->vb, 0, size);
330
331 if (!buf->p_dma_desc) {
332 if (list_empty(&isi->dma_desc_head)) {
333 dev_err(icd->parent, "Not enough dma descriptors.\n");
334 return -EINVAL;
335 } else {
336 /* Get an available descriptor */
337 desc = list_entry(isi->dma_desc_head.next,
338 struct isi_dma_desc, list);
339 /* Delete the descriptor since now it is used */
340 list_del_init(&desc->list);
341
342 /* Initialize the dma descriptor */
343 desc->p_fbd->fb_address =
344 vb2_dma_contig_plane_paddr(vb, 0);
345 desc->p_fbd->next_fbd_address = 0;
346 set_dma_ctrl(desc->p_fbd, ISI_DMA_CTRL_WB);
347
348 buf->p_dma_desc = desc;
349 }
350 }
351 return 0;
352}
353
354static void buffer_cleanup(struct vb2_buffer *vb)
355{
356 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
357 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
358 struct atmel_isi *isi = ici->priv;
359 struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
360
361 /* This descriptor is available now and we add to head list */
362 if (buf->p_dma_desc)
363 list_add(&buf->p_dma_desc->list, &isi->dma_desc_head);
364}
365
366static void start_dma(struct atmel_isi *isi, struct frame_buffer *buffer)
367{
368 u32 ctrl, cfg1;
369
370 cfg1 = isi_readl(isi, ISI_CFG1);
371 /* Enable irq: cxfr for the codec path, pxfr for the preview path */
372 isi_writel(isi, ISI_INTEN,
373 ISI_SR_CXFR_DONE | ISI_SR_PXFR_DONE);
374
375 /* Check if already in a frame */
376 if (isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) {
377 dev_err(isi->icd->parent, "Already in frame handling.\n");
378 return;
379 }
380
381 isi_writel(isi, ISI_DMA_C_DSCR, buffer->p_dma_desc->fbd_phys);
382 isi_writel(isi, ISI_DMA_C_CTRL, ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
383 isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH);
384
385 /* Enable linked list */
386 cfg1 |= isi->pdata->frate | ISI_CFG1_DISCR;
387
388 /* Enable codec path and ISI */
389 ctrl = ISI_CTRL_CDC | ISI_CTRL_EN;
390 isi_writel(isi, ISI_CTRL, ctrl);
391 isi_writel(isi, ISI_CFG1, cfg1);
392}
393
394static void buffer_queue(struct vb2_buffer *vb)
395{
396 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
397 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
398 struct atmel_isi *isi = ici->priv;
399 struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
400 unsigned long flags = 0;
401
402 spin_lock_irqsave(&isi->lock, flags);
403 list_add_tail(&buf->list, &isi->video_buffer_list);
404
405 if (isi->active == NULL) {
406 isi->active = buf;
407 start_dma(isi, buf);
408 }
409 spin_unlock_irqrestore(&isi->lock, flags);
410}
411
412static int start_streaming(struct vb2_queue *vq)
413{
414 struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
415 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
416 struct atmel_isi *isi = ici->priv;
417
418 u32 sr = 0;
419 int ret;
420
421 spin_lock_irq(&isi->lock);
422 isi->state = ISI_STATE_IDLE;
423 /* Clear any pending SOF interrupt */
424 sr = isi_readl(isi, ISI_STATUS);
425 /* Enable VSYNC interrupt for SOF */
426 isi_writel(isi, ISI_INTEN, ISI_SR_VSYNC);
427 isi_writel(isi, ISI_CTRL, ISI_CTRL_EN);
428 spin_unlock_irq(&isi->lock);
429
430 dev_dbg(icd->parent, "Waiting for SOF\n");
431 ret = wait_event_interruptible(isi->vsync_wq,
432 isi->state != ISI_STATE_IDLE);
433 if (ret)
434 return ret;
435
436 if (isi->state != ISI_STATE_READY)
437 return -EIO;
438
439 spin_lock_irq(&isi->lock);
440 isi->state = ISI_STATE_WAIT_SOF;
441 isi_writel(isi, ISI_INTDIS, ISI_SR_VSYNC);
442 spin_unlock_irq(&isi->lock);
443
444 return 0;
445}
446
447/* abort streaming and wait for last buffer */
448static int stop_streaming(struct vb2_queue *vq)
449{
450 struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
451 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
452 struct atmel_isi *isi = ici->priv;
453 struct frame_buffer *buf, *node;
454 int ret = 0;
455 unsigned long timeout;
456
457 spin_lock_irq(&isi->lock);
458 isi->active = NULL;
459 /* Release all active buffers */
460 list_for_each_entry_safe(buf, node, &isi->video_buffer_list, list) {
461 list_del_init(&buf->list);
462 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
463 }
464 spin_unlock_irq(&isi->lock);
465
466 timeout = jiffies + FRAME_INTERVAL_MILLI_SEC * HZ;
467 /* Wait until the end of the current frame. */
468 while ((isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) &&
469 time_before(jiffies, timeout))
470 msleep(1);
471
472 if (time_after(jiffies, timeout)) {
473 dev_err(icd->parent,
474 "Timeout waiting for finishing codec request\n");
475 return -ETIMEDOUT;
476 }
477
478 /* Disable interrupts */
479 isi_writel(isi, ISI_INTDIS,
480 ISI_SR_CXFR_DONE | ISI_SR_PXFR_DONE);
481
482 /* Disable ISI and wait for it is done */
483 ret = atmel_isi_wait_status(isi, WAIT_ISI_DISABLE);
484 if (ret < 0)
485 dev_err(icd->parent, "Disable ISI timed out\n");
486
487 return ret;
488}
489
490static struct vb2_ops isi_video_qops = {
491 .queue_setup = queue_setup,
492 .buf_init = buffer_init,
493 .buf_prepare = buffer_prepare,
494 .buf_cleanup = buffer_cleanup,
495 .buf_queue = buffer_queue,
496 .start_streaming = start_streaming,
497 .stop_streaming = stop_streaming,
498 .wait_prepare = soc_camera_unlock,
499 .wait_finish = soc_camera_lock,
500};
501
502/* ------------------------------------------------------------------
503 SOC camera operations for the device
504 ------------------------------------------------------------------*/
505static int isi_camera_init_videobuf(struct vb2_queue *q,
506 struct soc_camera_device *icd)
507{
508 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
509 q->io_modes = VB2_MMAP;
510 q->drv_priv = icd;
511 q->buf_struct_size = sizeof(struct frame_buffer);
512 q->ops = &isi_video_qops;
513 q->mem_ops = &vb2_dma_contig_memops;
514
515 return vb2_queue_init(q);
516}
517
518static int isi_camera_set_fmt(struct soc_camera_device *icd,
519 struct v4l2_format *f)
520{
521 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
522 struct atmel_isi *isi = ici->priv;
523 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
524 const struct soc_camera_format_xlate *xlate;
525 struct v4l2_pix_format *pix = &f->fmt.pix;
526 struct v4l2_mbus_framefmt mf;
527 int ret;
528
529 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
530 if (!xlate) {
531 dev_warn(icd->parent, "Format %x not found\n",
532 pix->pixelformat);
533 return -EINVAL;
534 }
535
536 dev_dbg(icd->parent, "Plan to set format %dx%d\n",
537 pix->width, pix->height);
538
539 mf.width = pix->width;
540 mf.height = pix->height;
541 mf.field = pix->field;
542 mf.colorspace = pix->colorspace;
543 mf.code = xlate->code;
544
545 ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
546 if (ret < 0)
547 return ret;
548
549 if (mf.code != xlate->code)
550 return -EINVAL;
551
552 ret = configure_geometry(isi, pix->width, pix->height, xlate->code);
553 if (ret < 0)
554 return ret;
555
556 pix->width = mf.width;
557 pix->height = mf.height;
558 pix->field = mf.field;
559 pix->colorspace = mf.colorspace;
560 icd->current_fmt = xlate;
561
562 dev_dbg(icd->parent, "Finally set format %dx%d\n",
563 pix->width, pix->height);
564
565 return ret;
566}
567
568static int isi_camera_try_fmt(struct soc_camera_device *icd,
569 struct v4l2_format *f)
570{
571 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
572 const struct soc_camera_format_xlate *xlate;
573 struct v4l2_pix_format *pix = &f->fmt.pix;
574 struct v4l2_mbus_framefmt mf;
575 u32 pixfmt = pix->pixelformat;
576 int ret;
577
578 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
579 if (pixfmt && !xlate) {
580 dev_warn(icd->parent, "Format %x not found\n", pixfmt);
581 return -EINVAL;
582 }
583
584 /* limit to Atmel ISI hardware capabilities */
585 if (pix->height > MAX_SUPPORT_HEIGHT)
586 pix->height = MAX_SUPPORT_HEIGHT;
587 if (pix->width > MAX_SUPPORT_WIDTH)
588 pix->width = MAX_SUPPORT_WIDTH;
589
590 /* limit to sensor capabilities */
591 mf.width = pix->width;
592 mf.height = pix->height;
593 mf.field = pix->field;
594 mf.colorspace = pix->colorspace;
595 mf.code = xlate->code;
596
597 ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
598 if (ret < 0)
599 return ret;
600
601 pix->width = mf.width;
602 pix->height = mf.height;
603 pix->colorspace = mf.colorspace;
604
605 switch (mf.field) {
606 case V4L2_FIELD_ANY:
607 pix->field = V4L2_FIELD_NONE;
608 break;
609 case V4L2_FIELD_NONE:
610 break;
611 default:
612 dev_err(icd->parent, "Field type %d unsupported.\n",
613 mf.field);
614 ret = -EINVAL;
615 }
616
617 return ret;
618}
619
620static const struct soc_mbus_pixelfmt isi_camera_formats[] = {
621 {
622 .fourcc = V4L2_PIX_FMT_YUYV,
623 .name = "Packed YUV422 16 bit",
624 .bits_per_sample = 8,
625 .packing = SOC_MBUS_PACKING_2X8_PADHI,
626 .order = SOC_MBUS_ORDER_LE,
627 },
628};
629
630/* This will be corrected as we get more formats */
631static bool isi_camera_packing_supported(const struct soc_mbus_pixelfmt *fmt)
632{
633 return fmt->packing == SOC_MBUS_PACKING_NONE ||
634 (fmt->bits_per_sample == 8 &&
635 fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
636 (fmt->bits_per_sample > 8 &&
637 fmt->packing == SOC_MBUS_PACKING_EXTEND16);
638}
639
640static unsigned long make_bus_param(struct atmel_isi *isi)
641{
642 unsigned long flags;
643 /*
644 * Platform specified synchronization and pixel clock polarities are
645 * only a recommendation and are only used during probing. Atmel ISI
646 * camera interface only works in master mode, i.e., uses HSYNC and
647 * VSYNC signals from the sensor
648 */
649 flags = SOCAM_MASTER |
650 SOCAM_HSYNC_ACTIVE_HIGH |
651 SOCAM_HSYNC_ACTIVE_LOW |
652 SOCAM_VSYNC_ACTIVE_HIGH |
653 SOCAM_VSYNC_ACTIVE_LOW |
654 SOCAM_PCLK_SAMPLE_RISING |
655 SOCAM_PCLK_SAMPLE_FALLING |
656 SOCAM_DATA_ACTIVE_HIGH;
657
658 if (isi->pdata->data_width_flags & ISI_DATAWIDTH_10)
659 flags |= SOCAM_DATAWIDTH_10;
660
661 if (isi->pdata->data_width_flags & ISI_DATAWIDTH_8)
662 flags |= SOCAM_DATAWIDTH_8;
663
664 if (flags & SOCAM_DATAWIDTH_MASK)
665 return flags;
666
667 return 0;
668}
669
670static int isi_camera_try_bus_param(struct soc_camera_device *icd,
671 unsigned char buswidth)
672{
673 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
674 struct atmel_isi *isi = ici->priv;
675 unsigned long camera_flags;
676 int ret;
677
678 camera_flags = icd->ops->query_bus_param(icd);
679 ret = soc_camera_bus_param_compatible(camera_flags,
680 make_bus_param(isi));
681 if (!ret)
682 return -EINVAL;
683 return 0;
684}
685
686
687static int isi_camera_get_formats(struct soc_camera_device *icd,
688 unsigned int idx,
689 struct soc_camera_format_xlate *xlate)
690{
691 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
692 int formats = 0, ret;
693 /* sensor format */
694 enum v4l2_mbus_pixelcode code;
695 /* soc camera host format */
696 const struct soc_mbus_pixelfmt *fmt;
697
698 ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
699 if (ret < 0)
700 /* No more formats */
701 return 0;
702
703 fmt = soc_mbus_get_fmtdesc(code);
704 if (!fmt) {
705 dev_err(icd->parent,
706 "Invalid format code #%u: %d\n", idx, code);
707 return 0;
708 }
709
710 /* This also checks support for the requested bits-per-sample */
711 ret = isi_camera_try_bus_param(icd, fmt->bits_per_sample);
712 if (ret < 0) {
713 dev_err(icd->parent,
714 "Fail to try the bus parameters.\n");
715 return 0;
716 }
717
718 switch (code) {
719 case V4L2_MBUS_FMT_UYVY8_2X8:
720 case V4L2_MBUS_FMT_VYUY8_2X8:
721 case V4L2_MBUS_FMT_YUYV8_2X8:
722 case V4L2_MBUS_FMT_YVYU8_2X8:
723 formats++;
724 if (xlate) {
725 xlate->host_fmt = &isi_camera_formats[0];
726 xlate->code = code;
727 xlate++;
728 dev_dbg(icd->parent, "Providing format %s using code %d\n",
729 isi_camera_formats[0].name, code);
730 }
731 break;
732 default:
733 if (!isi_camera_packing_supported(fmt))
734 return 0;
735 if (xlate)
736 dev_dbg(icd->parent,
737 "Providing format %s in pass-through mode\n",
738 fmt->name);
739 }
740
741 /* Generic pass-through */
742 formats++;
743 if (xlate) {
744 xlate->host_fmt = fmt;
745 xlate->code = code;
746 xlate++;
747 }
748
749 return formats;
750}
751
752/* Called with .video_lock held */
753static int isi_camera_add_device(struct soc_camera_device *icd)
754{
755 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
756 struct atmel_isi *isi = ici->priv;
757 int ret;
758
759 if (isi->icd)
760 return -EBUSY;
761
762 ret = clk_enable(isi->pclk);
763 if (ret)
764 return ret;
765
766 isi->icd = icd;
767 dev_dbg(icd->parent, "Atmel ISI Camera driver attached to camera %d\n",
768 icd->devnum);
769 return 0;
770}
771/* Called with .video_lock held */
772static void isi_camera_remove_device(struct soc_camera_device *icd)
773{
774 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
775 struct atmel_isi *isi = ici->priv;
776
777 BUG_ON(icd != isi->icd);
778
779 clk_disable(isi->pclk);
780 isi->icd = NULL;
781
782 dev_dbg(icd->parent, "Atmel ISI Camera driver detached from camera %d\n",
783 icd->devnum);
784}
785
786static unsigned int isi_camera_poll(struct file *file, poll_table *pt)
787{
788 struct soc_camera_device *icd = file->private_data;
789
790 return vb2_poll(&icd->vb2_vidq, file, pt);
791}
792
793static int isi_camera_querycap(struct soc_camera_host *ici,
794 struct v4l2_capability *cap)
795{
796 strcpy(cap->driver, "atmel-isi");
797 strcpy(cap->card, "Atmel Image Sensor Interface");
798 cap->capabilities = (V4L2_CAP_VIDEO_CAPTURE |
799 V4L2_CAP_STREAMING);
800 return 0;
801}
802
803static int isi_camera_set_bus_param(struct soc_camera_device *icd, u32 pixfmt)
804{
805 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
806 struct atmel_isi *isi = ici->priv;
807 unsigned long bus_flags, camera_flags, common_flags;
808 int ret;
809 u32 cfg1 = 0;
810
811 camera_flags = icd->ops->query_bus_param(icd);
812
813 bus_flags = make_bus_param(isi);
814 common_flags = soc_camera_bus_param_compatible(camera_flags, bus_flags);
815 dev_dbg(icd->parent, "Flags cam: 0x%lx host: 0x%lx common: 0x%lx\n",
816 camera_flags, bus_flags, common_flags);
817 if (!common_flags)
818 return -EINVAL;
819
820 /* Make choises, based on platform preferences */
821 if ((common_flags & SOCAM_HSYNC_ACTIVE_HIGH) &&
822 (common_flags & SOCAM_HSYNC_ACTIVE_LOW)) {
823 if (isi->pdata->hsync_act_low)
824 common_flags &= ~SOCAM_HSYNC_ACTIVE_HIGH;
825 else
826 common_flags &= ~SOCAM_HSYNC_ACTIVE_LOW;
827 }
828
829 if ((common_flags & SOCAM_VSYNC_ACTIVE_HIGH) &&
830 (common_flags & SOCAM_VSYNC_ACTIVE_LOW)) {
831 if (isi->pdata->vsync_act_low)
832 common_flags &= ~SOCAM_VSYNC_ACTIVE_HIGH;
833 else
834 common_flags &= ~SOCAM_VSYNC_ACTIVE_LOW;
835 }
836
837 if ((common_flags & SOCAM_PCLK_SAMPLE_RISING) &&
838 (common_flags & SOCAM_PCLK_SAMPLE_FALLING)) {
839 if (isi->pdata->pclk_act_falling)
840 common_flags &= ~SOCAM_PCLK_SAMPLE_RISING;
841 else
842 common_flags &= ~SOCAM_PCLK_SAMPLE_FALLING;
843 }
844
845 ret = icd->ops->set_bus_param(icd, common_flags);
846 if (ret < 0) {
847 dev_dbg(icd->parent, "Camera set_bus_param(%lx) returned %d\n",
848 common_flags, ret);
849 return ret;
850 }
851
852 /* set bus param for ISI */
853 if (common_flags & SOCAM_HSYNC_ACTIVE_LOW)
854 cfg1 |= ISI_CFG1_HSYNC_POL_ACTIVE_LOW;
855 if (common_flags & SOCAM_VSYNC_ACTIVE_LOW)
856 cfg1 |= ISI_CFG1_VSYNC_POL_ACTIVE_LOW;
857 if (common_flags & SOCAM_PCLK_SAMPLE_FALLING)
858 cfg1 |= ISI_CFG1_PIXCLK_POL_ACTIVE_FALLING;
859
860 if (isi->pdata->has_emb_sync)
861 cfg1 |= ISI_CFG1_EMB_SYNC;
862 if (isi->pdata->isi_full_mode)
863 cfg1 |= ISI_CFG1_FULL_MODE;
864
865 isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
866 isi_writel(isi, ISI_CFG1, cfg1);
867
868 return 0;
869}
870
871static struct soc_camera_host_ops isi_soc_camera_host_ops = {
872 .owner = THIS_MODULE,
873 .add = isi_camera_add_device,
874 .remove = isi_camera_remove_device,
875 .set_fmt = isi_camera_set_fmt,
876 .try_fmt = isi_camera_try_fmt,
877 .get_formats = isi_camera_get_formats,
878 .init_videobuf2 = isi_camera_init_videobuf,
879 .poll = isi_camera_poll,
880 .querycap = isi_camera_querycap,
881 .set_bus_param = isi_camera_set_bus_param,
882};
883
884/* -----------------------------------------------------------------------*/
885static int __devexit atmel_isi_remove(struct platform_device *pdev)
886{
887 struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
888 struct atmel_isi *isi = container_of(soc_host,
889 struct atmel_isi, soc_host);
890
891 free_irq(isi->irq, isi);
892 soc_camera_host_unregister(soc_host);
893 vb2_dma_contig_cleanup_ctx(isi->alloc_ctx);
894 dma_free_coherent(&pdev->dev,
895 sizeof(struct fbd) * MAX_BUFFER_NUM,
896 isi->p_fb_descriptors,
897 isi->fb_descriptors_phys);
898
899 iounmap(isi->regs);
900 clk_put(isi->pclk);
901 kfree(isi);
902
903 return 0;
904}
905
906static int __devinit atmel_isi_probe(struct platform_device *pdev)
907{
908 unsigned int irq;
909 struct atmel_isi *isi;
910 struct clk *pclk;
911 struct resource *regs;
912 int ret, i;
913 struct device *dev = &pdev->dev;
914 struct soc_camera_host *soc_host;
915 struct isi_platform_data *pdata;
916
917 pdata = dev->platform_data;
918 if (!pdata || !pdata->data_width_flags) {
919 dev_err(&pdev->dev,
920 "No config available for Atmel ISI\n");
921 return -EINVAL;
922 }
923
924 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
925 if (!regs)
926 return -ENXIO;
927
928 pclk = clk_get(&pdev->dev, "isi_clk");
929 if (IS_ERR(pclk))
930 return PTR_ERR(pclk);
931
932 isi = kzalloc(sizeof(struct atmel_isi), GFP_KERNEL);
933 if (!isi) {
934 ret = -ENOMEM;
935 dev_err(&pdev->dev, "Can't allocate interface!\n");
936 goto err_alloc_isi;
937 }
938
939 isi->pclk = pclk;
940 isi->pdata = pdata;
941 isi->active = NULL;
942 spin_lock_init(&isi->lock);
943 init_waitqueue_head(&isi->vsync_wq);
944 INIT_LIST_HEAD(&isi->video_buffer_list);
945 INIT_LIST_HEAD(&isi->dma_desc_head);
946
947 isi->p_fb_descriptors = dma_alloc_coherent(&pdev->dev,
948 sizeof(struct fbd) * MAX_BUFFER_NUM,
949 &isi->fb_descriptors_phys,
950 GFP_KERNEL);
951 if (!isi->p_fb_descriptors) {
952 ret = -ENOMEM;
953 dev_err(&pdev->dev, "Can't allocate descriptors!\n");
954 goto err_alloc_descriptors;
955 }
956
957 for (i = 0; i < MAX_BUFFER_NUM; i++) {
958 isi->dma_desc[i].p_fbd = isi->p_fb_descriptors + i;
959 isi->dma_desc[i].fbd_phys = isi->fb_descriptors_phys +
960 i * sizeof(struct fbd);
961 list_add(&isi->dma_desc[i].list, &isi->dma_desc_head);
962 }
963
964 isi->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
965 if (IS_ERR(isi->alloc_ctx)) {
966 ret = PTR_ERR(isi->alloc_ctx);
967 goto err_alloc_ctx;
968 }
969
970 isi->regs = ioremap(regs->start, resource_size(regs));
971 if (!isi->regs) {
972 ret = -ENOMEM;
973 goto err_ioremap;
974 }
975
976 isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
977
978 irq = platform_get_irq(pdev, 0);
979 if (irq < 0) {
980 ret = irq;
981 goto err_req_irq;
982 }
983
984 ret = request_irq(irq, isi_interrupt, 0, "isi", isi);
985 if (ret) {
986 dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
987 goto err_req_irq;
988 }
989 isi->irq = irq;
990
991 soc_host = &isi->soc_host;
992 soc_host->drv_name = "isi-camera";
993 soc_host->ops = &isi_soc_camera_host_ops;
994 soc_host->priv = isi;
995 soc_host->v4l2_dev.dev = &pdev->dev;
996 soc_host->nr = pdev->id;
997
998 ret = soc_camera_host_register(soc_host);
999 if (ret) {
1000 dev_err(&pdev->dev, "Unable to register soc camera host\n");
1001 goto err_register_soc_camera_host;
1002 }
1003 return 0;
1004
1005err_register_soc_camera_host:
1006 free_irq(isi->irq, isi);
1007err_req_irq:
1008 iounmap(isi->regs);
1009err_ioremap:
1010 vb2_dma_contig_cleanup_ctx(isi->alloc_ctx);
1011err_alloc_ctx:
1012 dma_free_coherent(&pdev->dev,
1013 sizeof(struct fbd) * MAX_BUFFER_NUM,
1014 isi->p_fb_descriptors,
1015 isi->fb_descriptors_phys);
1016err_alloc_descriptors:
1017 kfree(isi);
1018err_alloc_isi:
1019 clk_put(isi->pclk);
1020
1021 return ret;
1022}
1023
1024static struct platform_driver atmel_isi_driver = {
1025 .probe = atmel_isi_probe,
1026 .remove = __devexit_p(atmel_isi_remove),
1027 .driver = {
1028 .name = "atmel_isi",
1029 .owner = THIS_MODULE,
1030 },
1031};
1032
1033static int __init atmel_isi_init_module(void)
1034{
1035 return platform_driver_probe(&atmel_isi_driver, &atmel_isi_probe);
1036}
1037
1038static void __exit atmel_isi_exit(void)
1039{
1040 platform_driver_unregister(&atmel_isi_driver);
1041}
1042module_init(atmel_isi_init_module);
1043module_exit(atmel_isi_exit);
1044
1045MODULE_AUTHOR("Josh Wu <josh.wu@atmel.com>");
1046MODULE_DESCRIPTION("The V4L2 driver for Atmel Linux");
1047MODULE_LICENSE("GPL");
1048MODULE_SUPPORTED_DEVICE("video");
diff --git a/drivers/media/video/au0828/au0828-core.c b/drivers/media/video/au0828/au0828-core.c
index ca342e4c61fc..1e4ce5068ec2 100644
--- a/drivers/media/video/au0828/au0828-core.c
+++ b/drivers/media/video/au0828/au0828-core.c
@@ -292,3 +292,4 @@ module_exit(au0828_exit);
292MODULE_DESCRIPTION("Driver for Auvitek AU0828 based products"); 292MODULE_DESCRIPTION("Driver for Auvitek AU0828 based products");
293MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>"); 293MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
294MODULE_LICENSE("GPL"); 294MODULE_LICENSE("GPL");
295MODULE_VERSION("0.0.2");
diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
index c03eb29a9ee6..0b3e481ffe8c 100644
--- a/drivers/media/video/au0828/au0828-video.c
+++ b/drivers/media/video/au0828/au0828-video.c
@@ -33,7 +33,6 @@
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/device.h> 34#include <linux/device.h>
35#include <linux/suspend.h> 35#include <linux/suspend.h>
36#include <linux/version.h>
37#include <media/v4l2-common.h> 36#include <media/v4l2-common.h>
38#include <media/v4l2-ioctl.h> 37#include <media/v4l2-ioctl.h>
39#include <media/v4l2-chip-ident.h> 38#include <media/v4l2-chip-ident.h>
@@ -43,8 +42,6 @@
43 42
44static DEFINE_MUTEX(au0828_sysfs_lock); 43static DEFINE_MUTEX(au0828_sysfs_lock);
45 44
46#define AU0828_VERSION_CODE KERNEL_VERSION(0, 0, 1)
47
48/* ------------------------------------------------------------------ 45/* ------------------------------------------------------------------
49 Videobuf operations 46 Videobuf operations
50 ------------------------------------------------------------------*/ 47 ------------------------------------------------------------------*/
@@ -1254,8 +1251,6 @@ static int vidioc_querycap(struct file *file, void *priv,
1254 strlcpy(cap->card, dev->board.name, sizeof(cap->card)); 1251 strlcpy(cap->card, dev->board.name, sizeof(cap->card));
1255 strlcpy(cap->bus_info, dev->v4l2_dev.name, sizeof(cap->bus_info)); 1252 strlcpy(cap->bus_info, dev->v4l2_dev.name, sizeof(cap->bus_info));
1256 1253
1257 cap->version = AU0828_VERSION_CODE;
1258
1259 /*set the device capabilities */ 1254 /*set the device capabilities */
1260 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | 1255 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
1261 V4L2_CAP_VBI_CAPTURE | 1256 V4L2_CAP_VBI_CAPTURE |
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 3c9e6c7e7b52..5b15f63bf065 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -2892,13 +2892,10 @@ void __devinit bttv_idcard(struct bttv *btv)
2892{ 2892{
2893 unsigned int gpiobits; 2893 unsigned int gpiobits;
2894 int i,type; 2894 int i,type;
2895 unsigned short tmp;
2896 2895
2897 /* read PCI subsystem ID */ 2896 /* read PCI subsystem ID */
2898 pci_read_config_word(btv->c.pci, PCI_SUBSYSTEM_ID, &tmp); 2897 btv->cardid = btv->c.pci->subsystem_device << 16;
2899 btv->cardid = tmp << 16; 2898 btv->cardid |= btv->c.pci->subsystem_vendor;
2900 pci_read_config_word(btv->c.pci, PCI_SUBSYSTEM_VENDOR_ID, &tmp);
2901 btv->cardid |= tmp;
2902 2899
2903 if (0 != btv->cardid && 0xffffffff != btv->cardid) { 2900 if (0 != btv->cardid && 0xffffffff != btv->cardid) {
2904 /* look for the card */ 2901 /* look for the card */
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 834a48394bce..14444de67d5e 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -57,6 +57,7 @@
57 57
58#include <media/saa6588.h> 58#include <media/saa6588.h>
59 59
60#define BTTV_VERSION "0.9.19"
60 61
61unsigned int bttv_num; /* number of Bt848s in use */ 62unsigned int bttv_num; /* number of Bt848s in use */
62struct bttv *bttvs[BTTV_MAX]; 63struct bttv *bttvs[BTTV_MAX];
@@ -163,6 +164,7 @@ MODULE_PARM_DESC(radio_nr, "radio device numbers");
163MODULE_DESCRIPTION("bttv - v4l/v4l2 driver module for bt848/878 based cards"); 164MODULE_DESCRIPTION("bttv - v4l/v4l2 driver module for bt848/878 based cards");
164MODULE_AUTHOR("Ralph Metzler & Marcus Metzler & Gerd Knorr"); 165MODULE_AUTHOR("Ralph Metzler & Marcus Metzler & Gerd Knorr");
165MODULE_LICENSE("GPL"); 166MODULE_LICENSE("GPL");
167MODULE_VERSION(BTTV_VERSION);
166 168
167/* ----------------------------------------------------------------------- */ 169/* ----------------------------------------------------------------------- */
168/* sysfs */ 170/* sysfs */
@@ -2616,7 +2618,6 @@ static int bttv_querycap(struct file *file, void *priv,
2616 strlcpy(cap->card, btv->video_dev->name, sizeof(cap->card)); 2618 strlcpy(cap->card, btv->video_dev->name, sizeof(cap->card));
2617 snprintf(cap->bus_info, sizeof(cap->bus_info), 2619 snprintf(cap->bus_info, sizeof(cap->bus_info),
2618 "PCI:%s", pci_name(btv->c.pci)); 2620 "PCI:%s", pci_name(btv->c.pci));
2619 cap->version = BTTV_VERSION_CODE;
2620 cap->capabilities = 2621 cap->capabilities =
2621 V4L2_CAP_VIDEO_CAPTURE | 2622 V4L2_CAP_VIDEO_CAPTURE |
2622 V4L2_CAP_VBI_CAPTURE | 2623 V4L2_CAP_VBI_CAPTURE |
@@ -3416,7 +3417,6 @@ static int radio_querycap(struct file *file, void *priv,
3416 strcpy(cap->driver, "bttv"); 3417 strcpy(cap->driver, "bttv");
3417 strlcpy(cap->card, btv->radio_dev->name, sizeof(cap->card)); 3418 strlcpy(cap->card, btv->radio_dev->name, sizeof(cap->card));
3418 sprintf(cap->bus_info, "PCI:%s", pci_name(btv->c.pci)); 3419 sprintf(cap->bus_info, "PCI:%s", pci_name(btv->c.pci));
3419 cap->version = BTTV_VERSION_CODE;
3420 cap->capabilities = V4L2_CAP_TUNER; 3420 cap->capabilities = V4L2_CAP_TUNER;
3421 3421
3422 return 0; 3422 return 0;
@@ -4585,14 +4585,8 @@ static int __init bttv_init_module(void)
4585 4585
4586 bttv_num = 0; 4586 bttv_num = 0;
4587 4587
4588 printk(KERN_INFO "bttv: driver version %d.%d.%d loaded\n", 4588 printk(KERN_INFO "bttv: driver version %s loaded\n",
4589 (BTTV_VERSION_CODE >> 16) & 0xff, 4589 BTTV_VERSION);
4590 (BTTV_VERSION_CODE >> 8) & 0xff,
4591 BTTV_VERSION_CODE & 0xff);
4592#ifdef SNAPSHOT
4593 printk(KERN_INFO "bttv: snapshot date %04d-%02d-%02d\n",
4594 SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
4595#endif
4596 if (gbuffers < 2 || gbuffers > VIDEO_MAX_FRAME) 4590 if (gbuffers < 2 || gbuffers > VIDEO_MAX_FRAME)
4597 gbuffers = 2; 4591 gbuffers = 2;
4598 if (gbufsize > BTTV_MAX_FBUF) 4592 if (gbufsize > BTTV_MAX_FBUF)
diff --git a/drivers/media/video/bt8xx/bttvp.h b/drivers/media/video/bt8xx/bttvp.h
index 9b776faf0741..318edf2830b4 100644
--- a/drivers/media/video/bt8xx/bttvp.h
+++ b/drivers/media/video/bt8xx/bttvp.h
@@ -25,9 +25,6 @@
25#ifndef _BTTVP_H_ 25#ifndef _BTTVP_H_
26#define _BTTVP_H_ 26#define _BTTVP_H_
27 27
28#include <linux/version.h>
29#define BTTV_VERSION_CODE KERNEL_VERSION(0,9,18)
30
31#include <linux/types.h> 28#include <linux/types.h>
32#include <linux/wait.h> 29#include <linux/wait.h>
33#include <linux/i2c.h> 30#include <linux/i2c.h>
diff --git a/drivers/media/video/bw-qcam.c b/drivers/media/video/bw-qcam.c
index c1193506131c..f09df9dffaae 100644
--- a/drivers/media/video/bw-qcam.c
+++ b/drivers/media/video/bw-qcam.c
@@ -71,7 +71,6 @@ OTHER DEALINGS IN THE SOFTWARE.
71#include <linux/mm.h> 71#include <linux/mm.h>
72#include <linux/parport.h> 72#include <linux/parport.h>
73#include <linux/sched.h> 73#include <linux/sched.h>
74#include <linux/version.h>
75#include <linux/videodev2.h> 74#include <linux/videodev2.h>
76#include <linux/mutex.h> 75#include <linux/mutex.h>
77#include <asm/uaccess.h> 76#include <asm/uaccess.h>
@@ -647,7 +646,6 @@ static int qcam_querycap(struct file *file, void *priv,
647 strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver)); 646 strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver));
648 strlcpy(vcap->card, "B&W Quickcam", sizeof(vcap->card)); 647 strlcpy(vcap->card, "B&W Quickcam", sizeof(vcap->card));
649 strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info)); 648 strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
650 vcap->version = KERNEL_VERSION(0, 0, 2);
651 vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE; 649 vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
652 return 0; 650 return 0;
653} 651}
@@ -895,6 +893,7 @@ static struct qcam *qcam_init(struct parport *port)
895 893
896 if (v4l2_device_register(NULL, v4l2_dev) < 0) { 894 if (v4l2_device_register(NULL, v4l2_dev) < 0) {
897 v4l2_err(v4l2_dev, "Could not register v4l2_device\n"); 895 v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
896 kfree(qcam);
898 return NULL; 897 return NULL;
899 } 898 }
900 899
@@ -1092,3 +1091,4 @@ module_init(init_bw_qcams);
1092module_exit(exit_bw_qcams); 1091module_exit(exit_bw_qcams);
1093 1092
1094MODULE_LICENSE("GPL"); 1093MODULE_LICENSE("GPL");
1094MODULE_VERSION("0.0.3");
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index 24fc00965a12..cd8ff0473184 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -35,7 +35,6 @@
35#include <linux/sched.h> 35#include <linux/sched.h>
36#include <linux/mutex.h> 36#include <linux/mutex.h>
37#include <linux/jiffies.h> 37#include <linux/jiffies.h>
38#include <linux/version.h>
39#include <linux/videodev2.h> 38#include <linux/videodev2.h>
40#include <asm/uaccess.h> 39#include <asm/uaccess.h>
41#include <media/v4l2-device.h> 40#include <media/v4l2-device.h>
@@ -517,7 +516,6 @@ static int qcam_querycap(struct file *file, void *priv,
517 strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver)); 516 strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver));
518 strlcpy(vcap->card, "Color Quickcam", sizeof(vcap->card)); 517 strlcpy(vcap->card, "Color Quickcam", sizeof(vcap->card));
519 strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info)); 518 strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
520 vcap->version = KERNEL_VERSION(0, 0, 3);
521 vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE; 519 vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
522 return 0; 520 return 0;
523} 521}
@@ -752,6 +750,7 @@ static struct qcam *qcam_init(struct parport *port)
752 750
753 if (v4l2_device_register(NULL, v4l2_dev) < 0) { 751 if (v4l2_device_register(NULL, v4l2_dev) < 0) {
754 v4l2_err(v4l2_dev, "Could not register v4l2_device\n"); 752 v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
753 kfree(qcam);
755 return NULL; 754 return NULL;
756 } 755 }
757 756
@@ -886,6 +885,7 @@ static void __exit cqcam_cleanup(void)
886MODULE_AUTHOR("Philip Blundell <philb@gnu.org>"); 885MODULE_AUTHOR("Philip Blundell <philb@gnu.org>");
887MODULE_DESCRIPTION(BANNER); 886MODULE_DESCRIPTION(BANNER);
888MODULE_LICENSE("GPL"); 887MODULE_LICENSE("GPL");
888MODULE_VERSION("0.0.4");
889 889
890module_init(cqcam_init); 890module_init(cqcam_init);
891module_exit(cqcam_cleanup); 891module_exit(cqcam_cleanup);
diff --git a/drivers/media/video/cafe_ccic-regs.h b/drivers/media/video/cafe_ccic-regs.h
deleted file mode 100644
index 8e2a87cdc791..000000000000
--- a/drivers/media/video/cafe_ccic-regs.h
+++ /dev/null
@@ -1,166 +0,0 @@
1/*
2 * Register definitions for the m88alp01 camera interface. Offsets in bytes
3 * as given in the spec.
4 *
5 * Copyright 2006 One Laptop Per Child Association, Inc.
6 *
7 * Written by Jonathan Corbet, corbet@lwn.net.
8 *
9 * This file may be distributed under the terms of the GNU General
10 * Public License, version 2.
11 */
12#define REG_Y0BAR 0x00
13#define REG_Y1BAR 0x04
14#define REG_Y2BAR 0x08
15/* ... */
16
17#define REG_IMGPITCH 0x24 /* Image pitch register */
18#define IMGP_YP_SHFT 2 /* Y pitch params */
19#define IMGP_YP_MASK 0x00003ffc /* Y pitch field */
20#define IMGP_UVP_SHFT 18 /* UV pitch (planar) */
21#define IMGP_UVP_MASK 0x3ffc0000
22#define REG_IRQSTATRAW 0x28 /* RAW IRQ Status */
23#define IRQ_EOF0 0x00000001 /* End of frame 0 */
24#define IRQ_EOF1 0x00000002 /* End of frame 1 */
25#define IRQ_EOF2 0x00000004 /* End of frame 2 */
26#define IRQ_SOF0 0x00000008 /* Start of frame 0 */
27#define IRQ_SOF1 0x00000010 /* Start of frame 1 */
28#define IRQ_SOF2 0x00000020 /* Start of frame 2 */
29#define IRQ_OVERFLOW 0x00000040 /* FIFO overflow */
30#define IRQ_TWSIW 0x00010000 /* TWSI (smbus) write */
31#define IRQ_TWSIR 0x00020000 /* TWSI read */
32#define IRQ_TWSIE 0x00040000 /* TWSI error */
33#define TWSIIRQS (IRQ_TWSIW|IRQ_TWSIR|IRQ_TWSIE)
34#define FRAMEIRQS (IRQ_EOF0|IRQ_EOF1|IRQ_EOF2|IRQ_SOF0|IRQ_SOF1|IRQ_SOF2)
35#define ALLIRQS (TWSIIRQS|FRAMEIRQS|IRQ_OVERFLOW)
36#define REG_IRQMASK 0x2c /* IRQ mask - same bits as IRQSTAT */
37#define REG_IRQSTAT 0x30 /* IRQ status / clear */
38
39#define REG_IMGSIZE 0x34 /* Image size */
40#define IMGSZ_V_MASK 0x1fff0000
41#define IMGSZ_V_SHIFT 16
42#define IMGSZ_H_MASK 0x00003fff
43#define REG_IMGOFFSET 0x38 /* IMage offset */
44
45#define REG_CTRL0 0x3c /* Control 0 */
46#define C0_ENABLE 0x00000001 /* Makes the whole thing go */
47
48/* Mask for all the format bits */
49#define C0_DF_MASK 0x00fffffc /* Bits 2-23 */
50
51/* RGB ordering */
52#define C0_RGB4_RGBX 0x00000000
53#define C0_RGB4_XRGB 0x00000004
54#define C0_RGB4_BGRX 0x00000008
55#define C0_RGB4_XBGR 0x0000000c
56#define C0_RGB5_RGGB 0x00000000
57#define C0_RGB5_GRBG 0x00000004
58#define C0_RGB5_GBRG 0x00000008
59#define C0_RGB5_BGGR 0x0000000c
60
61/* Spec has two fields for DIN and DOUT, but they must match, so
62 combine them here. */
63#define C0_DF_YUV 0x00000000 /* Data is YUV */
64#define C0_DF_RGB 0x000000a0 /* ... RGB */
65#define C0_DF_BAYER 0x00000140 /* ... Bayer */
66/* 8-8-8 must be missing from the below - ask */
67#define C0_RGBF_565 0x00000000
68#define C0_RGBF_444 0x00000800
69#define C0_RGB_BGR 0x00001000 /* Blue comes first */
70#define C0_YUV_PLANAR 0x00000000 /* YUV 422 planar format */
71#define C0_YUV_PACKED 0x00008000 /* YUV 422 packed */
72#define C0_YUV_420PL 0x0000a000 /* YUV 420 planar */
73/* Think that 420 packed must be 111 - ask */
74#define C0_YUVE_YUYV 0x00000000 /* Y1CbY0Cr */
75#define C0_YUVE_YVYU 0x00010000 /* Y1CrY0Cb */
76#define C0_YUVE_VYUY 0x00020000 /* CrY1CbY0 */
77#define C0_YUVE_UYVY 0x00030000 /* CbY1CrY0 */
78#define C0_YUVE_XYUV 0x00000000 /* 420: .YUV */
79#define C0_YUVE_XYVU 0x00010000 /* 420: .YVU */
80#define C0_YUVE_XUVY 0x00020000 /* 420: .UVY */
81#define C0_YUVE_XVUY 0x00030000 /* 420: .VUY */
82/* Bayer bits 18,19 if needed */
83#define C0_HPOL_LOW 0x01000000 /* HSYNC polarity active low */
84#define C0_VPOL_LOW 0x02000000 /* VSYNC polarity active low */
85#define C0_VCLK_LOW 0x04000000 /* VCLK on falling edge */
86#define C0_DOWNSCALE 0x08000000 /* Enable downscaler */
87#define C0_SIFM_MASK 0xc0000000 /* SIF mode bits */
88#define C0_SIF_HVSYNC 0x00000000 /* Use H/VSYNC */
89#define CO_SOF_NOSYNC 0x40000000 /* Use inband active signaling */
90
91
92#define REG_CTRL1 0x40 /* Control 1 */
93#define C1_444ALPHA 0x00f00000 /* Alpha field in RGB444 */
94#define C1_ALPHA_SHFT 20
95#define C1_DMAB32 0x00000000 /* 32-byte DMA burst */
96#define C1_DMAB16 0x02000000 /* 16-byte DMA burst */
97#define C1_DMAB64 0x04000000 /* 64-byte DMA burst */
98#define C1_DMAB_MASK 0x06000000
99#define C1_TWOBUFS 0x08000000 /* Use only two DMA buffers */
100#define C1_PWRDWN 0x10000000 /* Power down */
101
102#define REG_CLKCTRL 0x88 /* Clock control */
103#define CLK_DIV_MASK 0x0000ffff /* Upper bits RW "reserved" */
104
105#define REG_GPR 0xb4 /* General purpose register. This
106 controls inputs to the power and reset
107 pins on the OV7670 used with OLPC;
108 other deployments could differ. */
109#define GPR_C1EN 0x00000020 /* Pad 1 (power down) enable */
110#define GPR_C0EN 0x00000010 /* Pad 0 (reset) enable */
111#define GPR_C1 0x00000002 /* Control 1 value */
112/*
113 * Control 0 is wired to reset on OLPC machines. For ov7x sensors,
114 * it is active low, for 0v6x, instead, it's active high. What
115 * fun.
116 */
117#define GPR_C0 0x00000001 /* Control 0 value */
118
119#define REG_TWSIC0 0xb8 /* TWSI (smbus) control 0 */
120#define TWSIC0_EN 0x00000001 /* TWSI enable */
121#define TWSIC0_MODE 0x00000002 /* 1 = 16-bit, 0 = 8-bit */
122#define TWSIC0_SID 0x000003fc /* Slave ID */
123#define TWSIC0_SID_SHIFT 2
124#define TWSIC0_CLKDIV 0x0007fc00 /* Clock divider */
125#define TWSIC0_MASKACK 0x00400000 /* Mask ack from sensor */
126#define TWSIC0_OVMAGIC 0x00800000 /* Make it work on OV sensors */
127
128#define REG_TWSIC1 0xbc /* TWSI control 1 */
129#define TWSIC1_DATA 0x0000ffff /* Data to/from camchip */
130#define TWSIC1_ADDR 0x00ff0000 /* Address (register) */
131#define TWSIC1_ADDR_SHIFT 16
132#define TWSIC1_READ 0x01000000 /* Set for read op */
133#define TWSIC1_WSTAT 0x02000000 /* Write status */
134#define TWSIC1_RVALID 0x04000000 /* Read data valid */
135#define TWSIC1_ERROR 0x08000000 /* Something screwed up */
136
137
138#define REG_UBAR 0xc4 /* Upper base address register */
139
140/*
141 * Here's the weird global control registers which are said to live
142 * way up here.
143 */
144#define REG_GL_CSR 0x3004 /* Control/status register */
145#define GCSR_SRS 0x00000001 /* SW Reset set */
146#define GCSR_SRC 0x00000002 /* SW Reset clear */
147#define GCSR_MRS 0x00000004 /* Master reset set */
148#define GCSR_MRC 0x00000008 /* HW Reset clear */
149#define GCSR_CCIC_EN 0x00004000 /* CCIC Clock enable */
150#define REG_GL_IMASK 0x300c /* Interrupt mask register */
151#define GIMSK_CCIC_EN 0x00000004 /* CCIC Interrupt enable */
152
153#define REG_GL_FCR 0x3038 /* GPIO functional control register */
154#define GFCR_GPIO_ON 0x08 /* Camera GPIO enabled */
155#define REG_GL_GPIOR 0x315c /* GPIO register */
156#define GGPIO_OUT 0x80000 /* GPIO output */
157#define GGPIO_VAL 0x00008 /* Output pin value */
158
159#define REG_LEN REG_GL_IMASK + 4
160
161
162/*
163 * Useful stuff that probably belongs somewhere global.
164 */
165#define VGA_WIDTH 640
166#define VGA_HEIGHT 480
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
deleted file mode 100644
index 664703398493..000000000000
--- a/drivers/media/video/cafe_ccic.c
+++ /dev/null
@@ -1,2267 +0,0 @@
1/*
2 * A driver for the CMOS camera controller in the Marvell 88ALP01 "cafe"
3 * multifunction chip. Currently works with the Omnivision OV7670
4 * sensor.
5 *
6 * The data sheet for this device can be found at:
7 * http://www.marvell.com/products/pc_connectivity/88alp01/
8 *
9 * Copyright 2006 One Laptop Per Child Association, Inc.
10 * Copyright 2006-7 Jonathan Corbet <corbet@lwn.net>
11 *
12 * Written by Jonathan Corbet, corbet@lwn.net.
13 *
14 * v4l2_device/v4l2_subdev conversion by:
15 * Copyright (C) 2009 Hans Verkuil <hverkuil@xs4all.nl>
16 *
17 * Note: this conversion is untested! Please contact the linux-media
18 * mailinglist if you can test this, together with the test results.
19 *
20 * This file may be distributed under the terms of the GNU General
21 * Public License, version 2.
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/fs.h>
28#include <linux/dmi.h>
29#include <linux/mm.h>
30#include <linux/pci.h>
31#include <linux/i2c.h>
32#include <linux/interrupt.h>
33#include <linux/spinlock.h>
34#include <linux/videodev2.h>
35#include <linux/slab.h>
36#include <media/v4l2-device.h>
37#include <media/v4l2-ioctl.h>
38#include <media/v4l2-chip-ident.h>
39#include <linux/device.h>
40#include <linux/wait.h>
41#include <linux/list.h>
42#include <linux/dma-mapping.h>
43#include <linux/delay.h>
44#include <linux/jiffies.h>
45#include <linux/vmalloc.h>
46
47#include <asm/uaccess.h>
48#include <asm/io.h>
49
50#include "ov7670.h"
51#include "cafe_ccic-regs.h"
52
53#define CAFE_VERSION 0x000002
54
55
56/*
57 * Parameters.
58 */
59MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
60MODULE_DESCRIPTION("Marvell 88ALP01 CMOS Camera Controller driver");
61MODULE_LICENSE("GPL");
62MODULE_SUPPORTED_DEVICE("Video");
63
64/*
65 * Internal DMA buffer management. Since the controller cannot do S/G I/O,
66 * we must have physically contiguous buffers to bring frames into.
67 * These parameters control how many buffers we use, whether we
68 * allocate them at load time (better chance of success, but nails down
69 * memory) or when somebody tries to use the camera (riskier), and,
70 * for load-time allocation, how big they should be.
71 *
72 * The controller can cycle through three buffers. We could use
73 * more by flipping pointers around, but it probably makes little
74 * sense.
75 */
76
77#define MAX_DMA_BUFS 3
78static int alloc_bufs_at_read;
79module_param(alloc_bufs_at_read, bool, 0444);
80MODULE_PARM_DESC(alloc_bufs_at_read,
81 "Non-zero value causes DMA buffers to be allocated when the "
82 "video capture device is read, rather than at module load "
83 "time. This saves memory, but decreases the chances of "
84 "successfully getting those buffers.");
85
86static int n_dma_bufs = 3;
87module_param(n_dma_bufs, uint, 0644);
88MODULE_PARM_DESC(n_dma_bufs,
89 "The number of DMA buffers to allocate. Can be either two "
90 "(saves memory, makes timing tighter) or three.");
91
92static int dma_buf_size = VGA_WIDTH * VGA_HEIGHT * 2; /* Worst case */
93module_param(dma_buf_size, uint, 0444);
94MODULE_PARM_DESC(dma_buf_size,
95 "The size of the allocated DMA buffers. If actual operating "
96 "parameters require larger buffers, an attempt to reallocate "
97 "will be made.");
98
99static int min_buffers = 1;
100module_param(min_buffers, uint, 0644);
101MODULE_PARM_DESC(min_buffers,
102 "The minimum number of streaming I/O buffers we are willing "
103 "to work with.");
104
105static int max_buffers = 10;
106module_param(max_buffers, uint, 0644);
107MODULE_PARM_DESC(max_buffers,
108 "The maximum number of streaming I/O buffers an application "
109 "will be allowed to allocate. These buffers are big and live "
110 "in vmalloc space.");
111
112static int flip;
113module_param(flip, bool, 0444);
114MODULE_PARM_DESC(flip,
115 "If set, the sensor will be instructed to flip the image "
116 "vertically.");
117
118
119enum cafe_state {
120 S_NOTREADY, /* Not yet initialized */
121 S_IDLE, /* Just hanging around */
122 S_FLAKED, /* Some sort of problem */
123 S_SINGLEREAD, /* In read() */
124 S_SPECREAD, /* Speculative read (for future read()) */
125 S_STREAMING /* Streaming data */
126};
127
128/*
129 * Tracking of streaming I/O buffers.
130 */
131struct cafe_sio_buffer {
132 struct list_head list;
133 struct v4l2_buffer v4lbuf;
134 char *buffer; /* Where it lives in kernel space */
135 int mapcount;
136 struct cafe_camera *cam;
137};
138
139/*
140 * A description of one of our devices.
141 * Locking: controlled by s_mutex. Certain fields, however, require
142 * the dev_lock spinlock; they are marked as such by comments.
143 * dev_lock is also required for access to device registers.
144 */
145struct cafe_camera
146{
147 struct v4l2_device v4l2_dev;
148 enum cafe_state state;
149 unsigned long flags; /* Buffer status, mainly (dev_lock) */
150 int users; /* How many open FDs */
151 struct file *owner; /* Who has data access (v4l2) */
152
153 /*
154 * Subsystem structures.
155 */
156 struct pci_dev *pdev;
157 struct video_device vdev;
158 struct i2c_adapter i2c_adapter;
159 struct v4l2_subdev *sensor;
160 unsigned short sensor_addr;
161
162 unsigned char __iomem *regs;
163 struct list_head dev_list; /* link to other devices */
164
165 /* DMA buffers */
166 unsigned int nbufs; /* How many are alloc'd */
167 int next_buf; /* Next to consume (dev_lock) */
168 unsigned int dma_buf_size; /* allocated size */
169 void *dma_bufs[MAX_DMA_BUFS]; /* Internal buffer addresses */
170 dma_addr_t dma_handles[MAX_DMA_BUFS]; /* Buffer bus addresses */
171 unsigned int specframes; /* Unconsumed spec frames (dev_lock) */
172 unsigned int sequence; /* Frame sequence number */
173 unsigned int buf_seq[MAX_DMA_BUFS]; /* Sequence for individual buffers */
174
175 /* Streaming buffers */
176 unsigned int n_sbufs; /* How many we have */
177 struct cafe_sio_buffer *sb_bufs; /* The array of housekeeping structs */
178 struct list_head sb_avail; /* Available for data (we own) (dev_lock) */
179 struct list_head sb_full; /* With data (user space owns) (dev_lock) */
180 struct tasklet_struct s_tasklet;
181
182 /* Current operating parameters */
183 u32 sensor_type; /* Currently ov7670 only */
184 struct v4l2_pix_format pix_format;
185 enum v4l2_mbus_pixelcode mbus_code;
186
187 /* Locks */
188 struct mutex s_mutex; /* Access to this structure */
189 spinlock_t dev_lock; /* Access to device */
190
191 /* Misc */
192 wait_queue_head_t smbus_wait; /* Waiting on i2c events */
193 wait_queue_head_t iowait; /* Waiting on frame data */
194};
195
196/*
197 * Status flags. Always manipulated with bit operations.
198 */
199#define CF_BUF0_VALID 0 /* Buffers valid - first three */
200#define CF_BUF1_VALID 1
201#define CF_BUF2_VALID 2
202#define CF_DMA_ACTIVE 3 /* A frame is incoming */
203#define CF_CONFIG_NEEDED 4 /* Must configure hardware */
204
205#define sensor_call(cam, o, f, args...) \
206 v4l2_subdev_call(cam->sensor, o, f, ##args)
207
208static inline struct cafe_camera *to_cam(struct v4l2_device *dev)
209{
210 return container_of(dev, struct cafe_camera, v4l2_dev);
211}
212
213static struct cafe_format_struct {
214 __u8 *desc;
215 __u32 pixelformat;
216 int bpp; /* Bytes per pixel */
217 enum v4l2_mbus_pixelcode mbus_code;
218} cafe_formats[] = {
219 {
220 .desc = "YUYV 4:2:2",
221 .pixelformat = V4L2_PIX_FMT_YUYV,
222 .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
223 .bpp = 2,
224 },
225 {
226 .desc = "RGB 444",
227 .pixelformat = V4L2_PIX_FMT_RGB444,
228 .mbus_code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE,
229 .bpp = 2,
230 },
231 {
232 .desc = "RGB 565",
233 .pixelformat = V4L2_PIX_FMT_RGB565,
234 .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE,
235 .bpp = 2,
236 },
237 {
238 .desc = "Raw RGB Bayer",
239 .pixelformat = V4L2_PIX_FMT_SBGGR8,
240 .mbus_code = V4L2_MBUS_FMT_SBGGR8_1X8,
241 .bpp = 1
242 },
243};
244#define N_CAFE_FMTS ARRAY_SIZE(cafe_formats)
245
246static struct cafe_format_struct *cafe_find_format(u32 pixelformat)
247{
248 unsigned i;
249
250 for (i = 0; i < N_CAFE_FMTS; i++)
251 if (cafe_formats[i].pixelformat == pixelformat)
252 return cafe_formats + i;
253 /* Not found? Then return the first format. */
254 return cafe_formats;
255}
256
257/*
258 * Start over with DMA buffers - dev_lock needed.
259 */
260static void cafe_reset_buffers(struct cafe_camera *cam)
261{
262 int i;
263
264 cam->next_buf = -1;
265 for (i = 0; i < cam->nbufs; i++)
266 clear_bit(i, &cam->flags);
267 cam->specframes = 0;
268}
269
270static inline int cafe_needs_config(struct cafe_camera *cam)
271{
272 return test_bit(CF_CONFIG_NEEDED, &cam->flags);
273}
274
275static void cafe_set_config_needed(struct cafe_camera *cam, int needed)
276{
277 if (needed)
278 set_bit(CF_CONFIG_NEEDED, &cam->flags);
279 else
280 clear_bit(CF_CONFIG_NEEDED, &cam->flags);
281}
282
283
284
285
286/*
287 * Debugging and related.
288 */
289#define cam_err(cam, fmt, arg...) \
290 dev_err(&(cam)->pdev->dev, fmt, ##arg);
291#define cam_warn(cam, fmt, arg...) \
292 dev_warn(&(cam)->pdev->dev, fmt, ##arg);
293#define cam_dbg(cam, fmt, arg...) \
294 dev_dbg(&(cam)->pdev->dev, fmt, ##arg);
295
296
297/* ---------------------------------------------------------------------*/
298
299/*
300 * Device register I/O
301 */
302static inline void cafe_reg_write(struct cafe_camera *cam, unsigned int reg,
303 unsigned int val)
304{
305 iowrite32(val, cam->regs + reg);
306}
307
308static inline unsigned int cafe_reg_read(struct cafe_camera *cam,
309 unsigned int reg)
310{
311 return ioread32(cam->regs + reg);
312}
313
314
315static inline void cafe_reg_write_mask(struct cafe_camera *cam, unsigned int reg,
316 unsigned int val, unsigned int mask)
317{
318 unsigned int v = cafe_reg_read(cam, reg);
319
320 v = (v & ~mask) | (val & mask);
321 cafe_reg_write(cam, reg, v);
322}
323
324static inline void cafe_reg_clear_bit(struct cafe_camera *cam,
325 unsigned int reg, unsigned int val)
326{
327 cafe_reg_write_mask(cam, reg, 0, val);
328}
329
330static inline void cafe_reg_set_bit(struct cafe_camera *cam,
331 unsigned int reg, unsigned int val)
332{
333 cafe_reg_write_mask(cam, reg, val, val);
334}
335
336
337
338/* -------------------------------------------------------------------- */
339/*
340 * The I2C/SMBUS interface to the camera itself starts here. The
341 * controller handles SMBUS itself, presenting a relatively simple register
342 * interface; all we have to do is to tell it where to route the data.
343 */
344#define CAFE_SMBUS_TIMEOUT (HZ) /* generous */
345
346static int cafe_smbus_write_done(struct cafe_camera *cam)
347{
348 unsigned long flags;
349 int c1;
350
351 /*
352 * We must delay after the interrupt, or the controller gets confused
353 * and never does give us good status. Fortunately, we don't do this
354 * often.
355 */
356 udelay(20);
357 spin_lock_irqsave(&cam->dev_lock, flags);
358 c1 = cafe_reg_read(cam, REG_TWSIC1);
359 spin_unlock_irqrestore(&cam->dev_lock, flags);
360 return (c1 & (TWSIC1_WSTAT|TWSIC1_ERROR)) != TWSIC1_WSTAT;
361}
362
363static int cafe_smbus_write_data(struct cafe_camera *cam,
364 u16 addr, u8 command, u8 value)
365{
366 unsigned int rval;
367 unsigned long flags;
368
369 spin_lock_irqsave(&cam->dev_lock, flags);
370 rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
371 rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
372 /*
373 * Marvell sez set clkdiv to all 1's for now.
374 */
375 rval |= TWSIC0_CLKDIV;
376 cafe_reg_write(cam, REG_TWSIC0, rval);
377 (void) cafe_reg_read(cam, REG_TWSIC1); /* force write */
378 rval = value | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
379 cafe_reg_write(cam, REG_TWSIC1, rval);
380 spin_unlock_irqrestore(&cam->dev_lock, flags);
381
382 /* Unfortunately, reading TWSIC1 too soon after sending a command
383 * causes the device to die.
384 * Use a busy-wait because we often send a large quantity of small
385 * commands at-once; using msleep() would cause a lot of context
386 * switches which take longer than 2ms, resulting in a noticeable
387 * boot-time and capture-start delays.
388 */
389 mdelay(2);
390
391 /*
392 * Another sad fact is that sometimes, commands silently complete but
393 * cafe_smbus_write_done() never becomes aware of this.
394 * This happens at random and appears to possible occur with any
395 * command.
396 * We don't understand why this is. We work around this issue
397 * with the timeout in the wait below, assuming that all commands
398 * complete within the timeout.
399 */
400 wait_event_timeout(cam->smbus_wait, cafe_smbus_write_done(cam),
401 CAFE_SMBUS_TIMEOUT);
402
403 spin_lock_irqsave(&cam->dev_lock, flags);
404 rval = cafe_reg_read(cam, REG_TWSIC1);
405 spin_unlock_irqrestore(&cam->dev_lock, flags);
406
407 if (rval & TWSIC1_WSTAT) {
408 cam_err(cam, "SMBUS write (%02x/%02x/%02x) timed out\n", addr,
409 command, value);
410 return -EIO;
411 }
412 if (rval & TWSIC1_ERROR) {
413 cam_err(cam, "SMBUS write (%02x/%02x/%02x) error\n", addr,
414 command, value);
415 return -EIO;
416 }
417 return 0;
418}
419
420
421
422static int cafe_smbus_read_done(struct cafe_camera *cam)
423{
424 unsigned long flags;
425 int c1;
426
427 /*
428 * We must delay after the interrupt, or the controller gets confused
429 * and never does give us good status. Fortunately, we don't do this
430 * often.
431 */
432 udelay(20);
433 spin_lock_irqsave(&cam->dev_lock, flags);
434 c1 = cafe_reg_read(cam, REG_TWSIC1);
435 spin_unlock_irqrestore(&cam->dev_lock, flags);
436 return c1 & (TWSIC1_RVALID|TWSIC1_ERROR);
437}
438
439
440
441static int cafe_smbus_read_data(struct cafe_camera *cam,
442 u16 addr, u8 command, u8 *value)
443{
444 unsigned int rval;
445 unsigned long flags;
446
447 spin_lock_irqsave(&cam->dev_lock, flags);
448 rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
449 rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
450 /*
451 * Marvel sez set clkdiv to all 1's for now.
452 */
453 rval |= TWSIC0_CLKDIV;
454 cafe_reg_write(cam, REG_TWSIC0, rval);
455 (void) cafe_reg_read(cam, REG_TWSIC1); /* force write */
456 rval = TWSIC1_READ | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
457 cafe_reg_write(cam, REG_TWSIC1, rval);
458 spin_unlock_irqrestore(&cam->dev_lock, flags);
459
460 wait_event_timeout(cam->smbus_wait,
461 cafe_smbus_read_done(cam), CAFE_SMBUS_TIMEOUT);
462 spin_lock_irqsave(&cam->dev_lock, flags);
463 rval = cafe_reg_read(cam, REG_TWSIC1);
464 spin_unlock_irqrestore(&cam->dev_lock, flags);
465
466 if (rval & TWSIC1_ERROR) {
467 cam_err(cam, "SMBUS read (%02x/%02x) error\n", addr, command);
468 return -EIO;
469 }
470 if (! (rval & TWSIC1_RVALID)) {
471 cam_err(cam, "SMBUS read (%02x/%02x) timed out\n", addr,
472 command);
473 return -EIO;
474 }
475 *value = rval & 0xff;
476 return 0;
477}
478
479/*
480 * Perform a transfer over SMBUS. This thing is called under
481 * the i2c bus lock, so we shouldn't race with ourselves...
482 */
483static int cafe_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
484 unsigned short flags, char rw, u8 command,
485 int size, union i2c_smbus_data *data)
486{
487 struct v4l2_device *v4l2_dev = i2c_get_adapdata(adapter);
488 struct cafe_camera *cam = to_cam(v4l2_dev);
489 int ret = -EINVAL;
490
491 /*
492 * This interface would appear to only do byte data ops. OK
493 * it can do word too, but the cam chip has no use for that.
494 */
495 if (size != I2C_SMBUS_BYTE_DATA) {
496 cam_err(cam, "funky xfer size %d\n", size);
497 return -EINVAL;
498 }
499
500 if (rw == I2C_SMBUS_WRITE)
501 ret = cafe_smbus_write_data(cam, addr, command, data->byte);
502 else if (rw == I2C_SMBUS_READ)
503 ret = cafe_smbus_read_data(cam, addr, command, &data->byte);
504 return ret;
505}
506
507
508static void cafe_smbus_enable_irq(struct cafe_camera *cam)
509{
510 unsigned long flags;
511
512 spin_lock_irqsave(&cam->dev_lock, flags);
513 cafe_reg_set_bit(cam, REG_IRQMASK, TWSIIRQS);
514 spin_unlock_irqrestore(&cam->dev_lock, flags);
515}
516
517static u32 cafe_smbus_func(struct i2c_adapter *adapter)
518{
519 return I2C_FUNC_SMBUS_READ_BYTE_DATA |
520 I2C_FUNC_SMBUS_WRITE_BYTE_DATA;
521}
522
523static struct i2c_algorithm cafe_smbus_algo = {
524 .smbus_xfer = cafe_smbus_xfer,
525 .functionality = cafe_smbus_func
526};
527
528/* Somebody is on the bus */
529static void cafe_ctlr_stop_dma(struct cafe_camera *cam);
530static void cafe_ctlr_power_down(struct cafe_camera *cam);
531
532static int cafe_smbus_setup(struct cafe_camera *cam)
533{
534 struct i2c_adapter *adap = &cam->i2c_adapter;
535 int ret;
536
537 cafe_smbus_enable_irq(cam);
538 adap->owner = THIS_MODULE;
539 adap->algo = &cafe_smbus_algo;
540 strcpy(adap->name, "cafe_ccic");
541 adap->dev.parent = &cam->pdev->dev;
542 i2c_set_adapdata(adap, &cam->v4l2_dev);
543 ret = i2c_add_adapter(adap);
544 if (ret)
545 printk(KERN_ERR "Unable to register cafe i2c adapter\n");
546 return ret;
547}
548
549static void cafe_smbus_shutdown(struct cafe_camera *cam)
550{
551 i2c_del_adapter(&cam->i2c_adapter);
552}
553
554
555/* ------------------------------------------------------------------- */
556/*
557 * Deal with the controller.
558 */
559
560/*
561 * Do everything we think we need to have the interface operating
562 * according to the desired format.
563 */
564static void cafe_ctlr_dma(struct cafe_camera *cam)
565{
566 /*
567 * Store the first two Y buffers (we aren't supporting
568 * planar formats for now, so no UV bufs). Then either
569 * set the third if it exists, or tell the controller
570 * to just use two.
571 */
572 cafe_reg_write(cam, REG_Y0BAR, cam->dma_handles[0]);
573 cafe_reg_write(cam, REG_Y1BAR, cam->dma_handles[1]);
574 if (cam->nbufs > 2) {
575 cafe_reg_write(cam, REG_Y2BAR, cam->dma_handles[2]);
576 cafe_reg_clear_bit(cam, REG_CTRL1, C1_TWOBUFS);
577 }
578 else
579 cafe_reg_set_bit(cam, REG_CTRL1, C1_TWOBUFS);
580 cafe_reg_write(cam, REG_UBAR, 0); /* 32 bits only for now */
581}
582
583static void cafe_ctlr_image(struct cafe_camera *cam)
584{
585 int imgsz;
586 struct v4l2_pix_format *fmt = &cam->pix_format;
587
588 imgsz = ((fmt->height << IMGSZ_V_SHIFT) & IMGSZ_V_MASK) |
589 (fmt->bytesperline & IMGSZ_H_MASK);
590 cafe_reg_write(cam, REG_IMGSIZE, imgsz);
591 cafe_reg_write(cam, REG_IMGOFFSET, 0);
592 /* YPITCH just drops the last two bits */
593 cafe_reg_write_mask(cam, REG_IMGPITCH, fmt->bytesperline,
594 IMGP_YP_MASK);
595 /*
596 * Tell the controller about the image format we are using.
597 */
598 switch (cam->pix_format.pixelformat) {
599 case V4L2_PIX_FMT_YUYV:
600 cafe_reg_write_mask(cam, REG_CTRL0,
601 C0_DF_YUV|C0_YUV_PACKED|C0_YUVE_YUYV,
602 C0_DF_MASK);
603 break;
604
605 case V4L2_PIX_FMT_RGB444:
606 cafe_reg_write_mask(cam, REG_CTRL0,
607 C0_DF_RGB|C0_RGBF_444|C0_RGB4_XRGB,
608 C0_DF_MASK);
609 /* Alpha value? */
610 break;
611
612 case V4L2_PIX_FMT_RGB565:
613 cafe_reg_write_mask(cam, REG_CTRL0,
614 C0_DF_RGB|C0_RGBF_565|C0_RGB5_BGGR,
615 C0_DF_MASK);
616 break;
617
618 default:
619 cam_err(cam, "Unknown format %x\n", cam->pix_format.pixelformat);
620 break;
621 }
622 /*
623 * Make sure it knows we want to use hsync/vsync.
624 */
625 cafe_reg_write_mask(cam, REG_CTRL0, C0_SIF_HVSYNC,
626 C0_SIFM_MASK);
627}
628
629
630/*
631 * Configure the controller for operation; caller holds the
632 * device mutex.
633 */
634static int cafe_ctlr_configure(struct cafe_camera *cam)
635{
636 unsigned long flags;
637
638 spin_lock_irqsave(&cam->dev_lock, flags);
639 cafe_ctlr_dma(cam);
640 cafe_ctlr_image(cam);
641 cafe_set_config_needed(cam, 0);
642 spin_unlock_irqrestore(&cam->dev_lock, flags);
643 return 0;
644}
645
646static void cafe_ctlr_irq_enable(struct cafe_camera *cam)
647{
648 /*
649 * Clear any pending interrupts, since we do not
650 * expect to have I/O active prior to enabling.
651 */
652 cafe_reg_write(cam, REG_IRQSTAT, FRAMEIRQS);
653 cafe_reg_set_bit(cam, REG_IRQMASK, FRAMEIRQS);
654}
655
656static void cafe_ctlr_irq_disable(struct cafe_camera *cam)
657{
658 cafe_reg_clear_bit(cam, REG_IRQMASK, FRAMEIRQS);
659}
660
661/*
662 * Make the controller start grabbing images. Everything must
663 * be set up before doing this.
664 */
665static void cafe_ctlr_start(struct cafe_camera *cam)
666{
667 /* set_bit performs a read, so no other barrier should be
668 needed here */
669 cafe_reg_set_bit(cam, REG_CTRL0, C0_ENABLE);
670}
671
672static void cafe_ctlr_stop(struct cafe_camera *cam)
673{
674 cafe_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
675}
676
677static void cafe_ctlr_init(struct cafe_camera *cam)
678{
679 unsigned long flags;
680
681 spin_lock_irqsave(&cam->dev_lock, flags);
682 /*
683 * Added magic to bring up the hardware on the B-Test board
684 */
685 cafe_reg_write(cam, 0x3038, 0x8);
686 cafe_reg_write(cam, 0x315c, 0x80008);
687 /*
688 * Go through the dance needed to wake the device up.
689 * Note that these registers are global and shared
690 * with the NAND and SD devices. Interaction between the
691 * three still needs to be examined.
692 */
693 cafe_reg_write(cam, REG_GL_CSR, GCSR_SRS|GCSR_MRS); /* Needed? */
694 cafe_reg_write(cam, REG_GL_CSR, GCSR_SRC|GCSR_MRC);
695 cafe_reg_write(cam, REG_GL_CSR, GCSR_SRC|GCSR_MRS);
696 /*
697 * Here we must wait a bit for the controller to come around.
698 */
699 spin_unlock_irqrestore(&cam->dev_lock, flags);
700 msleep(5);
701 spin_lock_irqsave(&cam->dev_lock, flags);
702
703 cafe_reg_write(cam, REG_GL_CSR, GCSR_CCIC_EN|GCSR_SRC|GCSR_MRC);
704 cafe_reg_set_bit(cam, REG_GL_IMASK, GIMSK_CCIC_EN);
705 /*
706 * Make sure it's not powered down.
707 */
708 cafe_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
709 /*
710 * Turn off the enable bit. It sure should be off anyway,
711 * but it's good to be sure.
712 */
713 cafe_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
714 /*
715 * Mask all interrupts.
716 */
717 cafe_reg_write(cam, REG_IRQMASK, 0);
718 /*
719 * Clock the sensor appropriately. Controller clock should
720 * be 48MHz, sensor "typical" value is half that.
721 */
722 cafe_reg_write_mask(cam, REG_CLKCTRL, 2, CLK_DIV_MASK);
723 spin_unlock_irqrestore(&cam->dev_lock, flags);
724}
725
726
727/*
728 * Stop the controller, and don't return until we're really sure that no
729 * further DMA is going on.
730 */
731static void cafe_ctlr_stop_dma(struct cafe_camera *cam)
732{
733 unsigned long flags;
734
735 /*
736 * Theory: stop the camera controller (whether it is operating
737 * or not). Delay briefly just in case we race with the SOF
738 * interrupt, then wait until no DMA is active.
739 */
740 spin_lock_irqsave(&cam->dev_lock, flags);
741 cafe_ctlr_stop(cam);
742 spin_unlock_irqrestore(&cam->dev_lock, flags);
743 mdelay(1);
744 wait_event_timeout(cam->iowait,
745 !test_bit(CF_DMA_ACTIVE, &cam->flags), HZ);
746 if (test_bit(CF_DMA_ACTIVE, &cam->flags))
747 cam_err(cam, "Timeout waiting for DMA to end\n");
748 /* This would be bad news - what now? */
749 spin_lock_irqsave(&cam->dev_lock, flags);
750 cam->state = S_IDLE;
751 cafe_ctlr_irq_disable(cam);
752 spin_unlock_irqrestore(&cam->dev_lock, flags);
753}
754
755/*
756 * Power up and down.
757 */
758static void cafe_ctlr_power_up(struct cafe_camera *cam)
759{
760 unsigned long flags;
761
762 spin_lock_irqsave(&cam->dev_lock, flags);
763 cafe_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
764 /*
765 * Part one of the sensor dance: turn the global
766 * GPIO signal on.
767 */
768 cafe_reg_write(cam, REG_GL_FCR, GFCR_GPIO_ON);
769 cafe_reg_write(cam, REG_GL_GPIOR, GGPIO_OUT|GGPIO_VAL);
770 /*
771 * Put the sensor into operational mode (assumes OLPC-style
772 * wiring). Control 0 is reset - set to 1 to operate.
773 * Control 1 is power down, set to 0 to operate.
774 */
775 cafe_reg_write(cam, REG_GPR, GPR_C1EN|GPR_C0EN); /* pwr up, reset */
776/* mdelay(1); */ /* Marvell says 1ms will do it */
777 cafe_reg_write(cam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C0);
778/* mdelay(1); */ /* Enough? */
779 spin_unlock_irqrestore(&cam->dev_lock, flags);
780 msleep(5); /* Just to be sure */
781}
782
783static void cafe_ctlr_power_down(struct cafe_camera *cam)
784{
785 unsigned long flags;
786
787 spin_lock_irqsave(&cam->dev_lock, flags);
788 cafe_reg_write(cam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C1);
789 cafe_reg_write(cam, REG_GL_FCR, GFCR_GPIO_ON);
790 cafe_reg_write(cam, REG_GL_GPIOR, GGPIO_OUT);
791 cafe_reg_set_bit(cam, REG_CTRL1, C1_PWRDWN);
792 spin_unlock_irqrestore(&cam->dev_lock, flags);
793}
794
795/* -------------------------------------------------------------------- */
796/*
797 * Communications with the sensor.
798 */
799
800static int __cafe_cam_reset(struct cafe_camera *cam)
801{
802 return sensor_call(cam, core, reset, 0);
803}
804
805/*
806 * We have found the sensor on the i2c. Let's try to have a
807 * conversation.
808 */
809static int cafe_cam_init(struct cafe_camera *cam)
810{
811 struct v4l2_dbg_chip_ident chip;
812 int ret;
813
814 mutex_lock(&cam->s_mutex);
815 if (cam->state != S_NOTREADY)
816 cam_warn(cam, "Cam init with device in funky state %d",
817 cam->state);
818 ret = __cafe_cam_reset(cam);
819 if (ret)
820 goto out;
821 chip.ident = V4L2_IDENT_NONE;
822 chip.match.type = V4L2_CHIP_MATCH_I2C_ADDR;
823 chip.match.addr = cam->sensor_addr;
824 ret = sensor_call(cam, core, g_chip_ident, &chip);
825 if (ret)
826 goto out;
827 cam->sensor_type = chip.ident;
828 if (cam->sensor_type != V4L2_IDENT_OV7670) {
829 cam_err(cam, "Unsupported sensor type 0x%x", cam->sensor_type);
830 ret = -EINVAL;
831 goto out;
832 }
833/* Get/set parameters? */
834 ret = 0;
835 cam->state = S_IDLE;
836 out:
837 cafe_ctlr_power_down(cam);
838 mutex_unlock(&cam->s_mutex);
839 return ret;
840}
841
842/*
843 * Configure the sensor to match the parameters we have. Caller should
844 * hold s_mutex
845 */
846static int cafe_cam_set_flip(struct cafe_camera *cam)
847{
848 struct v4l2_control ctrl;
849
850 memset(&ctrl, 0, sizeof(ctrl));
851 ctrl.id = V4L2_CID_VFLIP;
852 ctrl.value = flip;
853 return sensor_call(cam, core, s_ctrl, &ctrl);
854}
855
856
857static int cafe_cam_configure(struct cafe_camera *cam)
858{
859 struct v4l2_mbus_framefmt mbus_fmt;
860 int ret;
861
862 v4l2_fill_mbus_format(&mbus_fmt, &cam->pix_format, cam->mbus_code);
863 ret = sensor_call(cam, core, init, 0);
864 if (ret == 0)
865 ret = sensor_call(cam, video, s_mbus_fmt, &mbus_fmt);
866 /*
867 * OV7670 does weird things if flip is set *before* format...
868 */
869 ret += cafe_cam_set_flip(cam);
870 return ret;
871}
872
873/* -------------------------------------------------------------------- */
874/*
875 * DMA buffer management. These functions need s_mutex held.
876 */
877
878/* FIXME: this is inefficient as hell, since dma_alloc_coherent just
879 * does a get_free_pages() call, and we waste a good chunk of an orderN
880 * allocation. Should try to allocate the whole set in one chunk.
881 */
882static int cafe_alloc_dma_bufs(struct cafe_camera *cam, int loadtime)
883{
884 int i;
885
886 cafe_set_config_needed(cam, 1);
887 if (loadtime)
888 cam->dma_buf_size = dma_buf_size;
889 else
890 cam->dma_buf_size = cam->pix_format.sizeimage;
891 if (n_dma_bufs > 3)
892 n_dma_bufs = 3;
893
894 cam->nbufs = 0;
895 for (i = 0; i < n_dma_bufs; i++) {
896 cam->dma_bufs[i] = dma_alloc_coherent(&cam->pdev->dev,
897 cam->dma_buf_size, cam->dma_handles + i,
898 GFP_KERNEL);
899 if (cam->dma_bufs[i] == NULL) {
900 cam_warn(cam, "Failed to allocate DMA buffer\n");
901 break;
902 }
903 /* For debug, remove eventually */
904 memset(cam->dma_bufs[i], 0xcc, cam->dma_buf_size);
905 (cam->nbufs)++;
906 }
907
908 switch (cam->nbufs) {
909 case 1:
910 dma_free_coherent(&cam->pdev->dev, cam->dma_buf_size,
911 cam->dma_bufs[0], cam->dma_handles[0]);
912 cam->nbufs = 0;
913 case 0:
914 cam_err(cam, "Insufficient DMA buffers, cannot operate\n");
915 return -ENOMEM;
916
917 case 2:
918 if (n_dma_bufs > 2)
919 cam_warn(cam, "Will limp along with only 2 buffers\n");
920 break;
921 }
922 return 0;
923}
924
925static void cafe_free_dma_bufs(struct cafe_camera *cam)
926{
927 int i;
928
929 for (i = 0; i < cam->nbufs; i++) {
930 dma_free_coherent(&cam->pdev->dev, cam->dma_buf_size,
931 cam->dma_bufs[i], cam->dma_handles[i]);
932 cam->dma_bufs[i] = NULL;
933 }
934 cam->nbufs = 0;
935}
936
937
938
939
940
941/* ----------------------------------------------------------------------- */
942/*
943 * Here starts the V4L2 interface code.
944 */
945
946/*
947 * Read an image from the device.
948 */
949static ssize_t cafe_deliver_buffer(struct cafe_camera *cam,
950 char __user *buffer, size_t len, loff_t *pos)
951{
952 int bufno;
953 unsigned long flags;
954
955 spin_lock_irqsave(&cam->dev_lock, flags);
956 if (cam->next_buf < 0) {
957 cam_err(cam, "deliver_buffer: No next buffer\n");
958 spin_unlock_irqrestore(&cam->dev_lock, flags);
959 return -EIO;
960 }
961 bufno = cam->next_buf;
962 clear_bit(bufno, &cam->flags);
963 if (++(cam->next_buf) >= cam->nbufs)
964 cam->next_buf = 0;
965 if (! test_bit(cam->next_buf, &cam->flags))
966 cam->next_buf = -1;
967 cam->specframes = 0;
968 spin_unlock_irqrestore(&cam->dev_lock, flags);
969
970 if (len > cam->pix_format.sizeimage)
971 len = cam->pix_format.sizeimage;
972 if (copy_to_user(buffer, cam->dma_bufs[bufno], len))
973 return -EFAULT;
974 (*pos) += len;
975 return len;
976}
977
978/*
979 * Get everything ready, and start grabbing frames.
980 */
981static int cafe_read_setup(struct cafe_camera *cam, enum cafe_state state)
982{
983 int ret;
984 unsigned long flags;
985
986 /*
987 * Configuration. If we still don't have DMA buffers,
988 * make one last, desperate attempt.
989 */
990 if (cam->nbufs == 0)
991 if (cafe_alloc_dma_bufs(cam, 0))
992 return -ENOMEM;
993
994 if (cafe_needs_config(cam)) {
995 cafe_cam_configure(cam);
996 ret = cafe_ctlr_configure(cam);
997 if (ret)
998 return ret;
999 }
1000
1001 /*
1002 * Turn it loose.
1003 */
1004 spin_lock_irqsave(&cam->dev_lock, flags);
1005 cafe_reset_buffers(cam);
1006 cafe_ctlr_irq_enable(cam);
1007 cam->state = state;
1008 cafe_ctlr_start(cam);
1009 spin_unlock_irqrestore(&cam->dev_lock, flags);
1010 return 0;
1011}
1012
1013
1014static ssize_t cafe_v4l_read(struct file *filp,
1015 char __user *buffer, size_t len, loff_t *pos)
1016{
1017 struct cafe_camera *cam = filp->private_data;
1018 int ret = 0;
1019
1020 /*
1021 * Perhaps we're in speculative read mode and already
1022 * have data?
1023 */
1024 mutex_lock(&cam->s_mutex);
1025 if (cam->state == S_SPECREAD) {
1026 if (cam->next_buf >= 0) {
1027 ret = cafe_deliver_buffer(cam, buffer, len, pos);
1028 if (ret != 0)
1029 goto out_unlock;
1030 }
1031 } else if (cam->state == S_FLAKED || cam->state == S_NOTREADY) {
1032 ret = -EIO;
1033 goto out_unlock;
1034 } else if (cam->state != S_IDLE) {
1035 ret = -EBUSY;
1036 goto out_unlock;
1037 }
1038
1039 /*
1040 * v4l2: multiple processes can open the device, but only
1041 * one gets to grab data from it.
1042 */
1043 if (cam->owner && cam->owner != filp) {
1044 ret = -EBUSY;
1045 goto out_unlock;
1046 }
1047 cam->owner = filp;
1048
1049 /*
1050 * Do setup if need be.
1051 */
1052 if (cam->state != S_SPECREAD) {
1053 ret = cafe_read_setup(cam, S_SINGLEREAD);
1054 if (ret)
1055 goto out_unlock;
1056 }
1057 /*
1058 * Wait for something to happen. This should probably
1059 * be interruptible (FIXME).
1060 */
1061 wait_event_timeout(cam->iowait, cam->next_buf >= 0, HZ);
1062 if (cam->next_buf < 0) {
1063 cam_err(cam, "read() operation timed out\n");
1064 cafe_ctlr_stop_dma(cam);
1065 ret = -EIO;
1066 goto out_unlock;
1067 }
1068 /*
1069 * Give them their data and we should be done.
1070 */
1071 ret = cafe_deliver_buffer(cam, buffer, len, pos);
1072
1073 out_unlock:
1074 mutex_unlock(&cam->s_mutex);
1075 return ret;
1076}
1077
1078
1079
1080
1081
1082
1083
1084
1085/*
1086 * Streaming I/O support.
1087 */
1088
1089
1090
1091static int cafe_vidioc_streamon(struct file *filp, void *priv,
1092 enum v4l2_buf_type type)
1093{
1094 struct cafe_camera *cam = filp->private_data;
1095 int ret = -EINVAL;
1096
1097 if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1098 goto out;
1099 mutex_lock(&cam->s_mutex);
1100 if (cam->state != S_IDLE || cam->n_sbufs == 0)
1101 goto out_unlock;
1102
1103 cam->sequence = 0;
1104 ret = cafe_read_setup(cam, S_STREAMING);
1105
1106 out_unlock:
1107 mutex_unlock(&cam->s_mutex);
1108 out:
1109 return ret;
1110}
1111
1112
1113static int cafe_vidioc_streamoff(struct file *filp, void *priv,
1114 enum v4l2_buf_type type)
1115{
1116 struct cafe_camera *cam = filp->private_data;
1117 int ret = -EINVAL;
1118
1119 if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1120 goto out;
1121 mutex_lock(&cam->s_mutex);
1122 if (cam->state != S_STREAMING)
1123 goto out_unlock;
1124
1125 cafe_ctlr_stop_dma(cam);
1126 ret = 0;
1127
1128 out_unlock:
1129 mutex_unlock(&cam->s_mutex);
1130 out:
1131 return ret;
1132}
1133
1134
1135
1136static int cafe_setup_siobuf(struct cafe_camera *cam, int index)
1137{
1138 struct cafe_sio_buffer *buf = cam->sb_bufs + index;
1139
1140 INIT_LIST_HEAD(&buf->list);
1141 buf->v4lbuf.length = PAGE_ALIGN(cam->pix_format.sizeimage);
1142 buf->buffer = vmalloc_user(buf->v4lbuf.length);
1143 if (buf->buffer == NULL)
1144 return -ENOMEM;
1145 buf->mapcount = 0;
1146 buf->cam = cam;
1147
1148 buf->v4lbuf.index = index;
1149 buf->v4lbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1150 buf->v4lbuf.field = V4L2_FIELD_NONE;
1151 buf->v4lbuf.memory = V4L2_MEMORY_MMAP;
1152 /*
1153 * Offset: must be 32-bit even on a 64-bit system. videobuf-dma-sg
1154 * just uses the length times the index, but the spec warns
1155 * against doing just that - vma merging problems. So we
1156 * leave a gap between each pair of buffers.
1157 */
1158 buf->v4lbuf.m.offset = 2*index*buf->v4lbuf.length;
1159 return 0;
1160}
1161
1162static int cafe_free_sio_buffers(struct cafe_camera *cam)
1163{
1164 int i;
1165
1166 /*
1167 * If any buffers are mapped, we cannot free them at all.
1168 */
1169 for (i = 0; i < cam->n_sbufs; i++)
1170 if (cam->sb_bufs[i].mapcount > 0)
1171 return -EBUSY;
1172 /*
1173 * OK, let's do it.
1174 */
1175 for (i = 0; i < cam->n_sbufs; i++)
1176 vfree(cam->sb_bufs[i].buffer);
1177 cam->n_sbufs = 0;
1178 kfree(cam->sb_bufs);
1179 cam->sb_bufs = NULL;
1180 INIT_LIST_HEAD(&cam->sb_avail);
1181 INIT_LIST_HEAD(&cam->sb_full);
1182 return 0;
1183}
1184
1185
1186
1187static int cafe_vidioc_reqbufs(struct file *filp, void *priv,
1188 struct v4l2_requestbuffers *req)
1189{
1190 struct cafe_camera *cam = filp->private_data;
1191 int ret = 0; /* Silence warning */
1192
1193 /*
1194 * Make sure it's something we can do. User pointers could be
1195 * implemented without great pain, but that's not been done yet.
1196 */
1197 if (req->memory != V4L2_MEMORY_MMAP)
1198 return -EINVAL;
1199 /*
1200 * If they ask for zero buffers, they really want us to stop streaming
1201 * (if it's happening) and free everything. Should we check owner?
1202 */
1203 mutex_lock(&cam->s_mutex);
1204 if (req->count == 0) {
1205 if (cam->state == S_STREAMING)
1206 cafe_ctlr_stop_dma(cam);
1207 ret = cafe_free_sio_buffers (cam);
1208 goto out;
1209 }
1210 /*
1211 * Device needs to be idle and working. We *could* try to do the
1212 * right thing in S_SPECREAD by shutting things down, but it
1213 * probably doesn't matter.
1214 */
1215 if (cam->state != S_IDLE || (cam->owner && cam->owner != filp)) {
1216 ret = -EBUSY;
1217 goto out;
1218 }
1219 cam->owner = filp;
1220
1221 if (req->count < min_buffers)
1222 req->count = min_buffers;
1223 else if (req->count > max_buffers)
1224 req->count = max_buffers;
1225 if (cam->n_sbufs > 0) {
1226 ret = cafe_free_sio_buffers(cam);
1227 if (ret)
1228 goto out;
1229 }
1230
1231 cam->sb_bufs = kzalloc(req->count*sizeof(struct cafe_sio_buffer),
1232 GFP_KERNEL);
1233 if (cam->sb_bufs == NULL) {
1234 ret = -ENOMEM;
1235 goto out;
1236 }
1237 for (cam->n_sbufs = 0; cam->n_sbufs < req->count; (cam->n_sbufs++)) {
1238 ret = cafe_setup_siobuf(cam, cam->n_sbufs);
1239 if (ret)
1240 break;
1241 }
1242
1243 if (cam->n_sbufs == 0) /* no luck at all - ret already set */
1244 kfree(cam->sb_bufs);
1245 req->count = cam->n_sbufs; /* In case of partial success */
1246
1247 out:
1248 mutex_unlock(&cam->s_mutex);
1249 return ret;
1250}
1251
1252
1253static int cafe_vidioc_querybuf(struct file *filp, void *priv,
1254 struct v4l2_buffer *buf)
1255{
1256 struct cafe_camera *cam = filp->private_data;
1257 int ret = -EINVAL;
1258
1259 mutex_lock(&cam->s_mutex);
1260 if (buf->index >= cam->n_sbufs)
1261 goto out;
1262 *buf = cam->sb_bufs[buf->index].v4lbuf;
1263 ret = 0;
1264 out:
1265 mutex_unlock(&cam->s_mutex);
1266 return ret;
1267}
1268
1269static int cafe_vidioc_qbuf(struct file *filp, void *priv,
1270 struct v4l2_buffer *buf)
1271{
1272 struct cafe_camera *cam = filp->private_data;
1273 struct cafe_sio_buffer *sbuf;
1274 int ret = -EINVAL;
1275 unsigned long flags;
1276
1277 mutex_lock(&cam->s_mutex);
1278 if (buf->index >= cam->n_sbufs)
1279 goto out;
1280 sbuf = cam->sb_bufs + buf->index;
1281 if (sbuf->v4lbuf.flags & V4L2_BUF_FLAG_QUEUED) {
1282 ret = 0; /* Already queued?? */
1283 goto out;
1284 }
1285 if (sbuf->v4lbuf.flags & V4L2_BUF_FLAG_DONE) {
1286 /* Spec doesn't say anything, seems appropriate tho */
1287 ret = -EBUSY;
1288 goto out;
1289 }
1290 sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_QUEUED;
1291 spin_lock_irqsave(&cam->dev_lock, flags);
1292 list_add(&sbuf->list, &cam->sb_avail);
1293 spin_unlock_irqrestore(&cam->dev_lock, flags);
1294 ret = 0;
1295 out:
1296 mutex_unlock(&cam->s_mutex);
1297 return ret;
1298}
1299
1300static int cafe_vidioc_dqbuf(struct file *filp, void *priv,
1301 struct v4l2_buffer *buf)
1302{
1303 struct cafe_camera *cam = filp->private_data;
1304 struct cafe_sio_buffer *sbuf;
1305 int ret = -EINVAL;
1306 unsigned long flags;
1307
1308 mutex_lock(&cam->s_mutex);
1309 if (cam->state != S_STREAMING)
1310 goto out_unlock;
1311 if (list_empty(&cam->sb_full) && filp->f_flags & O_NONBLOCK) {
1312 ret = -EAGAIN;
1313 goto out_unlock;
1314 }
1315
1316 while (list_empty(&cam->sb_full) && cam->state == S_STREAMING) {
1317 mutex_unlock(&cam->s_mutex);
1318 if (wait_event_interruptible(cam->iowait,
1319 !list_empty(&cam->sb_full))) {
1320 ret = -ERESTARTSYS;
1321 goto out;
1322 }
1323 mutex_lock(&cam->s_mutex);
1324 }
1325
1326 if (cam->state != S_STREAMING)
1327 ret = -EINTR;
1328 else {
1329 spin_lock_irqsave(&cam->dev_lock, flags);
1330 /* Should probably recheck !list_empty() here */
1331 sbuf = list_entry(cam->sb_full.next,
1332 struct cafe_sio_buffer, list);
1333 list_del_init(&sbuf->list);
1334 spin_unlock_irqrestore(&cam->dev_lock, flags);
1335 sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_DONE;
1336 *buf = sbuf->v4lbuf;
1337 ret = 0;
1338 }
1339
1340 out_unlock:
1341 mutex_unlock(&cam->s_mutex);
1342 out:
1343 return ret;
1344}
1345
1346
1347
1348static void cafe_v4l_vm_open(struct vm_area_struct *vma)
1349{
1350 struct cafe_sio_buffer *sbuf = vma->vm_private_data;
1351 /*
1352 * Locking: done under mmap_sem, so we don't need to
1353 * go back to the camera lock here.
1354 */
1355 sbuf->mapcount++;
1356}
1357
1358
1359static void cafe_v4l_vm_close(struct vm_area_struct *vma)
1360{
1361 struct cafe_sio_buffer *sbuf = vma->vm_private_data;
1362
1363 mutex_lock(&sbuf->cam->s_mutex);
1364 sbuf->mapcount--;
1365 /* Docs say we should stop I/O too... */
1366 if (sbuf->mapcount == 0)
1367 sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_MAPPED;
1368 mutex_unlock(&sbuf->cam->s_mutex);
1369}
1370
1371static const struct vm_operations_struct cafe_v4l_vm_ops = {
1372 .open = cafe_v4l_vm_open,
1373 .close = cafe_v4l_vm_close
1374};
1375
1376
1377static int cafe_v4l_mmap(struct file *filp, struct vm_area_struct *vma)
1378{
1379 struct cafe_camera *cam = filp->private_data;
1380 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
1381 int ret = -EINVAL;
1382 int i;
1383 struct cafe_sio_buffer *sbuf = NULL;
1384
1385 if (! (vma->vm_flags & VM_WRITE) || ! (vma->vm_flags & VM_SHARED))
1386 return -EINVAL;
1387 /*
1388 * Find the buffer they are looking for.
1389 */
1390 mutex_lock(&cam->s_mutex);
1391 for (i = 0; i < cam->n_sbufs; i++)
1392 if (cam->sb_bufs[i].v4lbuf.m.offset == offset) {
1393 sbuf = cam->sb_bufs + i;
1394 break;
1395 }
1396 if (sbuf == NULL)
1397 goto out;
1398
1399 ret = remap_vmalloc_range(vma, sbuf->buffer, 0);
1400 if (ret)
1401 goto out;
1402 vma->vm_flags |= VM_DONTEXPAND;
1403 vma->vm_private_data = sbuf;
1404 vma->vm_ops = &cafe_v4l_vm_ops;
1405 sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_MAPPED;
1406 cafe_v4l_vm_open(vma);
1407 ret = 0;
1408 out:
1409 mutex_unlock(&cam->s_mutex);
1410 return ret;
1411}
1412
1413
1414
1415static int cafe_v4l_open(struct file *filp)
1416{
1417 struct cafe_camera *cam = video_drvdata(filp);
1418
1419 filp->private_data = cam;
1420
1421 mutex_lock(&cam->s_mutex);
1422 if (cam->users == 0) {
1423 cafe_ctlr_power_up(cam);
1424 __cafe_cam_reset(cam);
1425 cafe_set_config_needed(cam, 1);
1426 /* FIXME make sure this is complete */
1427 }
1428 (cam->users)++;
1429 mutex_unlock(&cam->s_mutex);
1430 return 0;
1431}
1432
1433
1434static int cafe_v4l_release(struct file *filp)
1435{
1436 struct cafe_camera *cam = filp->private_data;
1437
1438 mutex_lock(&cam->s_mutex);
1439 (cam->users)--;
1440 if (filp == cam->owner) {
1441 cafe_ctlr_stop_dma(cam);
1442 cafe_free_sio_buffers(cam);
1443 cam->owner = NULL;
1444 }
1445 if (cam->users == 0) {
1446 cafe_ctlr_power_down(cam);
1447 if (alloc_bufs_at_read)
1448 cafe_free_dma_bufs(cam);
1449 }
1450 mutex_unlock(&cam->s_mutex);
1451 return 0;
1452}
1453
1454
1455
1456static unsigned int cafe_v4l_poll(struct file *filp,
1457 struct poll_table_struct *pt)
1458{
1459 struct cafe_camera *cam = filp->private_data;
1460
1461 poll_wait(filp, &cam->iowait, pt);
1462 if (cam->next_buf >= 0)
1463 return POLLIN | POLLRDNORM;
1464 return 0;
1465}
1466
1467
1468
1469static int cafe_vidioc_queryctrl(struct file *filp, void *priv,
1470 struct v4l2_queryctrl *qc)
1471{
1472 struct cafe_camera *cam = priv;
1473 int ret;
1474
1475 mutex_lock(&cam->s_mutex);
1476 ret = sensor_call(cam, core, queryctrl, qc);
1477 mutex_unlock(&cam->s_mutex);
1478 return ret;
1479}
1480
1481
1482static int cafe_vidioc_g_ctrl(struct file *filp, void *priv,
1483 struct v4l2_control *ctrl)
1484{
1485 struct cafe_camera *cam = priv;
1486 int ret;
1487
1488 mutex_lock(&cam->s_mutex);
1489 ret = sensor_call(cam, core, g_ctrl, ctrl);
1490 mutex_unlock(&cam->s_mutex);
1491 return ret;
1492}
1493
1494
1495static int cafe_vidioc_s_ctrl(struct file *filp, void *priv,
1496 struct v4l2_control *ctrl)
1497{
1498 struct cafe_camera *cam = priv;
1499 int ret;
1500
1501 mutex_lock(&cam->s_mutex);
1502 ret = sensor_call(cam, core, s_ctrl, ctrl);
1503 mutex_unlock(&cam->s_mutex);
1504 return ret;
1505}
1506
1507
1508
1509
1510
1511static int cafe_vidioc_querycap(struct file *file, void *priv,
1512 struct v4l2_capability *cap)
1513{
1514 strcpy(cap->driver, "cafe_ccic");
1515 strcpy(cap->card, "cafe_ccic");
1516 cap->version = CAFE_VERSION;
1517 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
1518 V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
1519 return 0;
1520}
1521
1522
1523/*
1524 * The default format we use until somebody says otherwise.
1525 */
1526static const struct v4l2_pix_format cafe_def_pix_format = {
1527 .width = VGA_WIDTH,
1528 .height = VGA_HEIGHT,
1529 .pixelformat = V4L2_PIX_FMT_YUYV,
1530 .field = V4L2_FIELD_NONE,
1531 .bytesperline = VGA_WIDTH*2,
1532 .sizeimage = VGA_WIDTH*VGA_HEIGHT*2,
1533};
1534
1535static const enum v4l2_mbus_pixelcode cafe_def_mbus_code =
1536 V4L2_MBUS_FMT_YUYV8_2X8;
1537
1538static int cafe_vidioc_enum_fmt_vid_cap(struct file *filp,
1539 void *priv, struct v4l2_fmtdesc *fmt)
1540{
1541 if (fmt->index >= N_CAFE_FMTS)
1542 return -EINVAL;
1543 strlcpy(fmt->description, cafe_formats[fmt->index].desc,
1544 sizeof(fmt->description));
1545 fmt->pixelformat = cafe_formats[fmt->index].pixelformat;
1546 return 0;
1547}
1548
1549static int cafe_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
1550 struct v4l2_format *fmt)
1551{
1552 struct cafe_camera *cam = priv;
1553 struct cafe_format_struct *f;
1554 struct v4l2_pix_format *pix = &fmt->fmt.pix;
1555 struct v4l2_mbus_framefmt mbus_fmt;
1556 int ret;
1557
1558 f = cafe_find_format(pix->pixelformat);
1559 pix->pixelformat = f->pixelformat;
1560 v4l2_fill_mbus_format(&mbus_fmt, pix, f->mbus_code);
1561 mutex_lock(&cam->s_mutex);
1562 ret = sensor_call(cam, video, try_mbus_fmt, &mbus_fmt);
1563 mutex_unlock(&cam->s_mutex);
1564 v4l2_fill_pix_format(pix, &mbus_fmt);
1565 pix->bytesperline = pix->width * f->bpp;
1566 pix->sizeimage = pix->height * pix->bytesperline;
1567 return ret;
1568}
1569
1570static int cafe_vidioc_s_fmt_vid_cap(struct file *filp, void *priv,
1571 struct v4l2_format *fmt)
1572{
1573 struct cafe_camera *cam = priv;
1574 struct cafe_format_struct *f;
1575 int ret;
1576
1577 /*
1578 * Can't do anything if the device is not idle
1579 * Also can't if there are streaming buffers in place.
1580 */
1581 if (cam->state != S_IDLE || cam->n_sbufs > 0)
1582 return -EBUSY;
1583
1584 f = cafe_find_format(fmt->fmt.pix.pixelformat);
1585
1586 /*
1587 * See if the formatting works in principle.
1588 */
1589 ret = cafe_vidioc_try_fmt_vid_cap(filp, priv, fmt);
1590 if (ret)
1591 return ret;
1592 /*
1593 * Now we start to change things for real, so let's do it
1594 * under lock.
1595 */
1596 mutex_lock(&cam->s_mutex);
1597 cam->pix_format = fmt->fmt.pix;
1598 cam->mbus_code = f->mbus_code;
1599
1600 /*
1601 * Make sure we have appropriate DMA buffers.
1602 */
1603 ret = -ENOMEM;
1604 if (cam->nbufs > 0 && cam->dma_buf_size < cam->pix_format.sizeimage)
1605 cafe_free_dma_bufs(cam);
1606 if (cam->nbufs == 0) {
1607 if (cafe_alloc_dma_bufs(cam, 0))
1608 goto out;
1609 }
1610 /*
1611 * It looks like this might work, so let's program the sensor.
1612 */
1613 ret = cafe_cam_configure(cam);
1614 if (! ret)
1615 ret = cafe_ctlr_configure(cam);
1616 out:
1617 mutex_unlock(&cam->s_mutex);
1618 return ret;
1619}
1620
1621/*
1622 * Return our stored notion of how the camera is/should be configured.
1623 * The V4l2 spec wants us to be smarter, and actually get this from
1624 * the camera (and not mess with it at open time). Someday.
1625 */
1626static int cafe_vidioc_g_fmt_vid_cap(struct file *filp, void *priv,
1627 struct v4l2_format *f)
1628{
1629 struct cafe_camera *cam = priv;
1630
1631 f->fmt.pix = cam->pix_format;
1632 return 0;
1633}
1634
1635/*
1636 * We only have one input - the sensor - so minimize the nonsense here.
1637 */
1638static int cafe_vidioc_enum_input(struct file *filp, void *priv,
1639 struct v4l2_input *input)
1640{
1641 if (input->index != 0)
1642 return -EINVAL;
1643
1644 input->type = V4L2_INPUT_TYPE_CAMERA;
1645 input->std = V4L2_STD_ALL; /* Not sure what should go here */
1646 strcpy(input->name, "Camera");
1647 return 0;
1648}
1649
1650static int cafe_vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
1651{
1652 *i = 0;
1653 return 0;
1654}
1655
1656static int cafe_vidioc_s_input(struct file *filp, void *priv, unsigned int i)
1657{
1658 if (i != 0)
1659 return -EINVAL;
1660 return 0;
1661}
1662
1663/* from vivi.c */
1664static int cafe_vidioc_s_std(struct file *filp, void *priv, v4l2_std_id *a)
1665{
1666 return 0;
1667}
1668
1669/*
1670 * G/S_PARM. Most of this is done by the sensor, but we are
1671 * the level which controls the number of read buffers.
1672 */
1673static int cafe_vidioc_g_parm(struct file *filp, void *priv,
1674 struct v4l2_streamparm *parms)
1675{
1676 struct cafe_camera *cam = priv;
1677 int ret;
1678
1679 mutex_lock(&cam->s_mutex);
1680 ret = sensor_call(cam, video, g_parm, parms);
1681 mutex_unlock(&cam->s_mutex);
1682 parms->parm.capture.readbuffers = n_dma_bufs;
1683 return ret;
1684}
1685
1686static int cafe_vidioc_s_parm(struct file *filp, void *priv,
1687 struct v4l2_streamparm *parms)
1688{
1689 struct cafe_camera *cam = priv;
1690 int ret;
1691
1692 mutex_lock(&cam->s_mutex);
1693 ret = sensor_call(cam, video, s_parm, parms);
1694 mutex_unlock(&cam->s_mutex);
1695 parms->parm.capture.readbuffers = n_dma_bufs;
1696 return ret;
1697}
1698
1699static int cafe_vidioc_g_chip_ident(struct file *file, void *priv,
1700 struct v4l2_dbg_chip_ident *chip)
1701{
1702 struct cafe_camera *cam = priv;
1703
1704 chip->ident = V4L2_IDENT_NONE;
1705 chip->revision = 0;
1706 if (v4l2_chip_match_host(&chip->match)) {
1707 chip->ident = V4L2_IDENT_CAFE;
1708 return 0;
1709 }
1710 return sensor_call(cam, core, g_chip_ident, chip);
1711}
1712
1713static int cafe_vidioc_enum_framesizes(struct file *filp, void *priv,
1714 struct v4l2_frmsizeenum *sizes)
1715{
1716 struct cafe_camera *cam = priv;
1717 int ret;
1718
1719 mutex_lock(&cam->s_mutex);
1720 ret = sensor_call(cam, video, enum_framesizes, sizes);
1721 mutex_unlock(&cam->s_mutex);
1722 return ret;
1723}
1724
1725static int cafe_vidioc_enum_frameintervals(struct file *filp, void *priv,
1726 struct v4l2_frmivalenum *interval)
1727{
1728 struct cafe_camera *cam = priv;
1729 int ret;
1730
1731 mutex_lock(&cam->s_mutex);
1732 ret = sensor_call(cam, video, enum_frameintervals, interval);
1733 mutex_unlock(&cam->s_mutex);
1734 return ret;
1735}
1736
1737#ifdef CONFIG_VIDEO_ADV_DEBUG
1738static int cafe_vidioc_g_register(struct file *file, void *priv,
1739 struct v4l2_dbg_register *reg)
1740{
1741 struct cafe_camera *cam = priv;
1742
1743 if (v4l2_chip_match_host(&reg->match)) {
1744 reg->val = cafe_reg_read(cam, reg->reg);
1745 reg->size = 4;
1746 return 0;
1747 }
1748 return sensor_call(cam, core, g_register, reg);
1749}
1750
1751static int cafe_vidioc_s_register(struct file *file, void *priv,
1752 struct v4l2_dbg_register *reg)
1753{
1754 struct cafe_camera *cam = priv;
1755
1756 if (v4l2_chip_match_host(&reg->match)) {
1757 cafe_reg_write(cam, reg->reg, reg->val);
1758 return 0;
1759 }
1760 return sensor_call(cam, core, s_register, reg);
1761}
1762#endif
1763
1764/*
1765 * This template device holds all of those v4l2 methods; we
1766 * clone it for specific real devices.
1767 */
1768
1769static const struct v4l2_file_operations cafe_v4l_fops = {
1770 .owner = THIS_MODULE,
1771 .open = cafe_v4l_open,
1772 .release = cafe_v4l_release,
1773 .read = cafe_v4l_read,
1774 .poll = cafe_v4l_poll,
1775 .mmap = cafe_v4l_mmap,
1776 .unlocked_ioctl = video_ioctl2,
1777};
1778
1779static const struct v4l2_ioctl_ops cafe_v4l_ioctl_ops = {
1780 .vidioc_querycap = cafe_vidioc_querycap,
1781 .vidioc_enum_fmt_vid_cap = cafe_vidioc_enum_fmt_vid_cap,
1782 .vidioc_try_fmt_vid_cap = cafe_vidioc_try_fmt_vid_cap,
1783 .vidioc_s_fmt_vid_cap = cafe_vidioc_s_fmt_vid_cap,
1784 .vidioc_g_fmt_vid_cap = cafe_vidioc_g_fmt_vid_cap,
1785 .vidioc_enum_input = cafe_vidioc_enum_input,
1786 .vidioc_g_input = cafe_vidioc_g_input,
1787 .vidioc_s_input = cafe_vidioc_s_input,
1788 .vidioc_s_std = cafe_vidioc_s_std,
1789 .vidioc_reqbufs = cafe_vidioc_reqbufs,
1790 .vidioc_querybuf = cafe_vidioc_querybuf,
1791 .vidioc_qbuf = cafe_vidioc_qbuf,
1792 .vidioc_dqbuf = cafe_vidioc_dqbuf,
1793 .vidioc_streamon = cafe_vidioc_streamon,
1794 .vidioc_streamoff = cafe_vidioc_streamoff,
1795 .vidioc_queryctrl = cafe_vidioc_queryctrl,
1796 .vidioc_g_ctrl = cafe_vidioc_g_ctrl,
1797 .vidioc_s_ctrl = cafe_vidioc_s_ctrl,
1798 .vidioc_g_parm = cafe_vidioc_g_parm,
1799 .vidioc_s_parm = cafe_vidioc_s_parm,
1800 .vidioc_enum_framesizes = cafe_vidioc_enum_framesizes,
1801 .vidioc_enum_frameintervals = cafe_vidioc_enum_frameintervals,
1802 .vidioc_g_chip_ident = cafe_vidioc_g_chip_ident,
1803#ifdef CONFIG_VIDEO_ADV_DEBUG
1804 .vidioc_g_register = cafe_vidioc_g_register,
1805 .vidioc_s_register = cafe_vidioc_s_register,
1806#endif
1807};
1808
1809static struct video_device cafe_v4l_template = {
1810 .name = "cafe",
1811 .tvnorms = V4L2_STD_NTSC_M,
1812 .current_norm = V4L2_STD_NTSC_M, /* make mplayer happy */
1813
1814 .fops = &cafe_v4l_fops,
1815 .ioctl_ops = &cafe_v4l_ioctl_ops,
1816 .release = video_device_release_empty,
1817};
1818
1819
1820/* ---------------------------------------------------------------------- */
1821/*
1822 * Interrupt handler stuff
1823 */
1824
1825
1826
1827static void cafe_frame_tasklet(unsigned long data)
1828{
1829 struct cafe_camera *cam = (struct cafe_camera *) data;
1830 int i;
1831 unsigned long flags;
1832 struct cafe_sio_buffer *sbuf;
1833
1834 spin_lock_irqsave(&cam->dev_lock, flags);
1835 for (i = 0; i < cam->nbufs; i++) {
1836 int bufno = cam->next_buf;
1837 if (bufno < 0) { /* "will never happen" */
1838 cam_err(cam, "No valid bufs in tasklet!\n");
1839 break;
1840 }
1841 if (++(cam->next_buf) >= cam->nbufs)
1842 cam->next_buf = 0;
1843 if (! test_bit(bufno, &cam->flags))
1844 continue;
1845 if (list_empty(&cam->sb_avail))
1846 break; /* Leave it valid, hope for better later */
1847 clear_bit(bufno, &cam->flags);
1848 sbuf = list_entry(cam->sb_avail.next,
1849 struct cafe_sio_buffer, list);
1850 /*
1851 * Drop the lock during the big copy. This *should* be safe...
1852 */
1853 spin_unlock_irqrestore(&cam->dev_lock, flags);
1854 memcpy(sbuf->buffer, cam->dma_bufs[bufno],
1855 cam->pix_format.sizeimage);
1856 sbuf->v4lbuf.bytesused = cam->pix_format.sizeimage;
1857 sbuf->v4lbuf.sequence = cam->buf_seq[bufno];
1858 sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_QUEUED;
1859 sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_DONE;
1860 spin_lock_irqsave(&cam->dev_lock, flags);
1861 list_move_tail(&sbuf->list, &cam->sb_full);
1862 }
1863 if (! list_empty(&cam->sb_full))
1864 wake_up(&cam->iowait);
1865 spin_unlock_irqrestore(&cam->dev_lock, flags);
1866}
1867
1868
1869
1870static void cafe_frame_complete(struct cafe_camera *cam, int frame)
1871{
1872 /*
1873 * Basic frame housekeeping.
1874 */
1875 if (test_bit(frame, &cam->flags) && printk_ratelimit())
1876 cam_err(cam, "Frame overrun on %d, frames lost\n", frame);
1877 set_bit(frame, &cam->flags);
1878 clear_bit(CF_DMA_ACTIVE, &cam->flags);
1879 if (cam->next_buf < 0)
1880 cam->next_buf = frame;
1881 cam->buf_seq[frame] = ++(cam->sequence);
1882
1883 switch (cam->state) {
1884 /*
1885 * If in single read mode, try going speculative.
1886 */
1887 case S_SINGLEREAD:
1888 cam->state = S_SPECREAD;
1889 cam->specframes = 0;
1890 wake_up(&cam->iowait);
1891 break;
1892
1893 /*
1894 * If we are already doing speculative reads, and nobody is
1895 * reading them, just stop.
1896 */
1897 case S_SPECREAD:
1898 if (++(cam->specframes) >= cam->nbufs) {
1899 cafe_ctlr_stop(cam);
1900 cafe_ctlr_irq_disable(cam);
1901 cam->state = S_IDLE;
1902 }
1903 wake_up(&cam->iowait);
1904 break;
1905 /*
1906 * For the streaming case, we defer the real work to the
1907 * camera tasklet.
1908 *
1909 * FIXME: if the application is not consuming the buffers,
1910 * we should eventually put things on hold and restart in
1911 * vidioc_dqbuf().
1912 */
1913 case S_STREAMING:
1914 tasklet_schedule(&cam->s_tasklet);
1915 break;
1916
1917 default:
1918 cam_err(cam, "Frame interrupt in non-operational state\n");
1919 break;
1920 }
1921}
1922
1923
1924
1925
1926static void cafe_frame_irq(struct cafe_camera *cam, unsigned int irqs)
1927{
1928 unsigned int frame;
1929
1930 cafe_reg_write(cam, REG_IRQSTAT, FRAMEIRQS); /* Clear'em all */
1931 /*
1932 * Handle any frame completions. There really should
1933 * not be more than one of these, or we have fallen
1934 * far behind.
1935 */
1936 for (frame = 0; frame < cam->nbufs; frame++)
1937 if (irqs & (IRQ_EOF0 << frame))
1938 cafe_frame_complete(cam, frame);
1939 /*
1940 * If a frame starts, note that we have DMA active. This
1941 * code assumes that we won't get multiple frame interrupts
1942 * at once; may want to rethink that.
1943 */
1944 if (irqs & (IRQ_SOF0 | IRQ_SOF1 | IRQ_SOF2))
1945 set_bit(CF_DMA_ACTIVE, &cam->flags);
1946}
1947
1948
1949
1950static irqreturn_t cafe_irq(int irq, void *data)
1951{
1952 struct cafe_camera *cam = data;
1953 unsigned int irqs;
1954
1955 spin_lock(&cam->dev_lock);
1956 irqs = cafe_reg_read(cam, REG_IRQSTAT);
1957 if ((irqs & ALLIRQS) == 0) {
1958 spin_unlock(&cam->dev_lock);
1959 return IRQ_NONE;
1960 }
1961 if (irqs & FRAMEIRQS)
1962 cafe_frame_irq(cam, irqs);
1963 if (irqs & TWSIIRQS) {
1964 cafe_reg_write(cam, REG_IRQSTAT, TWSIIRQS);
1965 wake_up(&cam->smbus_wait);
1966 }
1967 spin_unlock(&cam->dev_lock);
1968 return IRQ_HANDLED;
1969}
1970
1971
1972/* -------------------------------------------------------------------------- */
1973/*
1974 * PCI interface stuff.
1975 */
1976
1977static const struct dmi_system_id olpc_xo1_dmi[] = {
1978 {
1979 .matches = {
1980 DMI_MATCH(DMI_SYS_VENDOR, "OLPC"),
1981 DMI_MATCH(DMI_PRODUCT_NAME, "XO"),
1982 DMI_MATCH(DMI_PRODUCT_VERSION, "1"),
1983 },
1984 },
1985 { }
1986};
1987
1988static int cafe_pci_probe(struct pci_dev *pdev,
1989 const struct pci_device_id *id)
1990{
1991 int ret;
1992 struct cafe_camera *cam;
1993 struct ov7670_config sensor_cfg = {
1994 /* This controller only does SMBUS */
1995 .use_smbus = true,
1996
1997 /*
1998 * Exclude QCIF mode, because it only captures a tiny portion
1999 * of the sensor FOV
2000 */
2001 .min_width = 320,
2002 .min_height = 240,
2003 };
2004 struct i2c_board_info ov7670_info = {
2005 .type = "ov7670",
2006 .addr = 0x42,
2007 .platform_data = &sensor_cfg,
2008 };
2009
2010 /*
2011 * Start putting together one of our big camera structures.
2012 */
2013 ret = -ENOMEM;
2014 cam = kzalloc(sizeof(struct cafe_camera), GFP_KERNEL);
2015 if (cam == NULL)
2016 goto out;
2017 ret = v4l2_device_register(&pdev->dev, &cam->v4l2_dev);
2018 if (ret)
2019 goto out_free;
2020
2021 mutex_init(&cam->s_mutex);
2022 spin_lock_init(&cam->dev_lock);
2023 cam->state = S_NOTREADY;
2024 cafe_set_config_needed(cam, 1);
2025 init_waitqueue_head(&cam->smbus_wait);
2026 init_waitqueue_head(&cam->iowait);
2027 cam->pdev = pdev;
2028 cam->pix_format = cafe_def_pix_format;
2029 cam->mbus_code = cafe_def_mbus_code;
2030 INIT_LIST_HEAD(&cam->dev_list);
2031 INIT_LIST_HEAD(&cam->sb_avail);
2032 INIT_LIST_HEAD(&cam->sb_full);
2033 tasklet_init(&cam->s_tasklet, cafe_frame_tasklet, (unsigned long) cam);
2034 /*
2035 * Get set up on the PCI bus.
2036 */
2037 ret = pci_enable_device(pdev);
2038 if (ret)
2039 goto out_unreg;
2040 pci_set_master(pdev);
2041
2042 ret = -EIO;
2043 cam->regs = pci_iomap(pdev, 0, 0);
2044 if (! cam->regs) {
2045 printk(KERN_ERR "Unable to ioremap cafe-ccic regs\n");
2046 goto out_unreg;
2047 }
2048 ret = request_irq(pdev->irq, cafe_irq, IRQF_SHARED, "cafe-ccic", cam);
2049 if (ret)
2050 goto out_iounmap;
2051 /*
2052 * Initialize the controller and leave it powered up. It will
2053 * stay that way until the sensor driver shows up.
2054 */
2055 cafe_ctlr_init(cam);
2056 cafe_ctlr_power_up(cam);
2057 /*
2058 * Set up I2C/SMBUS communications. We have to drop the mutex here
2059 * because the sensor could attach in this call chain, leading to
2060 * unsightly deadlocks.
2061 */
2062 ret = cafe_smbus_setup(cam);
2063 if (ret)
2064 goto out_freeirq;
2065
2066 /* Apply XO-1 clock speed */
2067 if (dmi_check_system(olpc_xo1_dmi))
2068 sensor_cfg.clock_speed = 45;
2069
2070 cam->sensor_addr = ov7670_info.addr;
2071 cam->sensor = v4l2_i2c_new_subdev_board(&cam->v4l2_dev, &cam->i2c_adapter,
2072 &ov7670_info, NULL);
2073 if (cam->sensor == NULL) {
2074 ret = -ENODEV;
2075 goto out_smbus;
2076 }
2077
2078 ret = cafe_cam_init(cam);
2079 if (ret)
2080 goto out_smbus;
2081
2082 /*
2083 * Get the v4l2 setup done.
2084 */
2085 mutex_lock(&cam->s_mutex);
2086 cam->vdev = cafe_v4l_template;
2087 cam->vdev.debug = 0;
2088/* cam->vdev.debug = V4L2_DEBUG_IOCTL_ARG;*/
2089 cam->vdev.v4l2_dev = &cam->v4l2_dev;
2090 ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
2091 if (ret)
2092 goto out_unlock;
2093 video_set_drvdata(&cam->vdev, cam);
2094
2095 /*
2096 * If so requested, try to get our DMA buffers now.
2097 */
2098 if (!alloc_bufs_at_read) {
2099 if (cafe_alloc_dma_bufs(cam, 1))
2100 cam_warn(cam, "Unable to alloc DMA buffers at load"
2101 " will try again later.");
2102 }
2103
2104 mutex_unlock(&cam->s_mutex);
2105 return 0;
2106
2107out_unlock:
2108 mutex_unlock(&cam->s_mutex);
2109out_smbus:
2110 cafe_smbus_shutdown(cam);
2111out_freeirq:
2112 cafe_ctlr_power_down(cam);
2113 free_irq(pdev->irq, cam);
2114out_iounmap:
2115 pci_iounmap(pdev, cam->regs);
2116out_free:
2117 v4l2_device_unregister(&cam->v4l2_dev);
2118out_unreg:
2119 kfree(cam);
2120out:
2121 return ret;
2122}
2123
2124
2125/*
2126 * Shut down an initialized device
2127 */
2128static void cafe_shutdown(struct cafe_camera *cam)
2129{
2130/* FIXME: Make sure we take care of everything here */
2131 if (cam->n_sbufs > 0)
2132 /* What if they are still mapped? Shouldn't be, but... */
2133 cafe_free_sio_buffers(cam);
2134 cafe_ctlr_stop_dma(cam);
2135 cafe_ctlr_power_down(cam);
2136 cafe_smbus_shutdown(cam);
2137 cafe_free_dma_bufs(cam);
2138 free_irq(cam->pdev->irq, cam);
2139 pci_iounmap(cam->pdev, cam->regs);
2140 video_unregister_device(&cam->vdev);
2141}
2142
2143
2144static void cafe_pci_remove(struct pci_dev *pdev)
2145{
2146 struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
2147 struct cafe_camera *cam = to_cam(v4l2_dev);
2148
2149 if (cam == NULL) {
2150 printk(KERN_WARNING "pci_remove on unknown pdev %p\n", pdev);
2151 return;
2152 }
2153 mutex_lock(&cam->s_mutex);
2154 if (cam->users > 0)
2155 cam_warn(cam, "Removing a device with users!\n");
2156 cafe_shutdown(cam);
2157 v4l2_device_unregister(&cam->v4l2_dev);
2158 kfree(cam);
2159/* No unlock - it no longer exists */
2160}
2161
2162
2163#ifdef CONFIG_PM
2164/*
2165 * Basic power management.
2166 */
2167static int cafe_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2168{
2169 struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
2170 struct cafe_camera *cam = to_cam(v4l2_dev);
2171 int ret;
2172 enum cafe_state cstate;
2173
2174 ret = pci_save_state(pdev);
2175 if (ret)
2176 return ret;
2177 cstate = cam->state; /* HACK - stop_dma sets to idle */
2178 cafe_ctlr_stop_dma(cam);
2179 cafe_ctlr_power_down(cam);
2180 pci_disable_device(pdev);
2181 cam->state = cstate;
2182 return 0;
2183}
2184
2185
2186static int cafe_pci_resume(struct pci_dev *pdev)
2187{
2188 struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
2189 struct cafe_camera *cam = to_cam(v4l2_dev);
2190 int ret = 0;
2191
2192 pci_restore_state(pdev);
2193 ret = pci_enable_device(pdev);
2194
2195 if (ret) {
2196 cam_warn(cam, "Unable to re-enable device on resume!\n");
2197 return ret;
2198 }
2199 cafe_ctlr_init(cam);
2200
2201 mutex_lock(&cam->s_mutex);
2202 if (cam->users > 0) {
2203 cafe_ctlr_power_up(cam);
2204 __cafe_cam_reset(cam);
2205 } else {
2206 cafe_ctlr_power_down(cam);
2207 }
2208 mutex_unlock(&cam->s_mutex);
2209
2210 set_bit(CF_CONFIG_NEEDED, &cam->flags);
2211 if (cam->state == S_SPECREAD)
2212 cam->state = S_IDLE; /* Don't bother restarting */
2213 else if (cam->state == S_SINGLEREAD || cam->state == S_STREAMING)
2214 ret = cafe_read_setup(cam, cam->state);
2215 return ret;
2216}
2217
2218#endif /* CONFIG_PM */
2219
2220
2221static struct pci_device_id cafe_ids[] = {
2222 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL,
2223 PCI_DEVICE_ID_MARVELL_88ALP01_CCIC) },
2224 { 0, }
2225};
2226
2227MODULE_DEVICE_TABLE(pci, cafe_ids);
2228
2229static struct pci_driver cafe_pci_driver = {
2230 .name = "cafe1000-ccic",
2231 .id_table = cafe_ids,
2232 .probe = cafe_pci_probe,
2233 .remove = cafe_pci_remove,
2234#ifdef CONFIG_PM
2235 .suspend = cafe_pci_suspend,
2236 .resume = cafe_pci_resume,
2237#endif
2238};
2239
2240
2241
2242
2243static int __init cafe_init(void)
2244{
2245 int ret;
2246
2247 printk(KERN_NOTICE "Marvell M88ALP01 'CAFE' Camera Controller version %d\n",
2248 CAFE_VERSION);
2249 ret = pci_register_driver(&cafe_pci_driver);
2250 if (ret) {
2251 printk(KERN_ERR "Unable to register cafe_ccic driver\n");
2252 goto out;
2253 }
2254 ret = 0;
2255
2256 out:
2257 return ret;
2258}
2259
2260
2261static void __exit cafe_exit(void)
2262{
2263 pci_unregister_driver(&cafe_pci_driver);
2264}
2265
2266module_init(cafe_init);
2267module_exit(cafe_exit);
diff --git a/drivers/media/video/cpia2/cpia2.h b/drivers/media/video/cpia2/cpia2.h
index 6d6d1843791c..ab252188981b 100644
--- a/drivers/media/video/cpia2/cpia2.h
+++ b/drivers/media/video/cpia2/cpia2.h
@@ -31,7 +31,6 @@
31#ifndef __CPIA2_H__ 31#ifndef __CPIA2_H__
32#define __CPIA2_H__ 32#define __CPIA2_H__
33 33
34#include <linux/version.h>
35#include <linux/videodev2.h> 34#include <linux/videodev2.h>
36#include <media/v4l2-common.h> 35#include <media/v4l2-common.h>
37#include <linux/usb.h> 36#include <linux/usb.h>
@@ -43,10 +42,6 @@
43/* define for verbose debug output */ 42/* define for verbose debug output */
44//#define _CPIA2_DEBUG_ 43//#define _CPIA2_DEBUG_
45 44
46#define CPIA2_MAJ_VER 3
47#define CPIA2_MIN_VER 0
48#define CPIA2_PATCH_VER 0
49
50/*** 45/***
51 * Image defines 46 * Image defines
52 ***/ 47 ***/
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 40eb6326e48a..077eb1db80a1 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -29,8 +29,7 @@
29 * Alan Cox <alan@lxorguk.ukuu.org.uk> 29 * Alan Cox <alan@lxorguk.ukuu.org.uk>
30 ****************************************************************************/ 30 ****************************************************************************/
31 31
32#include <linux/version.h> 32#define CPIA_VERSION "3.0.1"
33
34 33
35#include <linux/module.h> 34#include <linux/module.h>
36#include <linux/time.h> 35#include <linux/time.h>
@@ -80,6 +79,7 @@ MODULE_AUTHOR("Steve Miller (STMicroelectronics) <steve.miller@st.com>");
80MODULE_DESCRIPTION("V4L-driver for STMicroelectronics CPiA2 based cameras"); 79MODULE_DESCRIPTION("V4L-driver for STMicroelectronics CPiA2 based cameras");
81MODULE_SUPPORTED_DEVICE("video"); 80MODULE_SUPPORTED_DEVICE("video");
82MODULE_LICENSE("GPL"); 81MODULE_LICENSE("GPL");
82MODULE_VERSION(CPIA_VERSION);
83 83
84#define ABOUT "V4L-Driver for Vision CPiA2 based cameras" 84#define ABOUT "V4L-Driver for Vision CPiA2 based cameras"
85 85
@@ -465,9 +465,6 @@ static int cpia2_querycap(struct file *file, void *fh, struct v4l2_capability *v
465 if (usb_make_path(cam->dev, vc->bus_info, sizeof(vc->bus_info)) <0) 465 if (usb_make_path(cam->dev, vc->bus_info, sizeof(vc->bus_info)) <0)
466 memset(vc->bus_info,0, sizeof(vc->bus_info)); 466 memset(vc->bus_info,0, sizeof(vc->bus_info));
467 467
468 vc->version = KERNEL_VERSION(CPIA2_MAJ_VER, CPIA2_MIN_VER,
469 CPIA2_PATCH_VER);
470
471 vc->capabilities = V4L2_CAP_VIDEO_CAPTURE | 468 vc->capabilities = V4L2_CAP_VIDEO_CAPTURE |
472 V4L2_CAP_READWRITE | 469 V4L2_CAP_READWRITE |
473 V4L2_CAP_STREAMING; 470 V4L2_CAP_STREAMING;
@@ -1558,8 +1555,8 @@ static void __init check_parameters(void)
1558 *****************************************************************************/ 1555 *****************************************************************************/
1559static int __init cpia2_init(void) 1556static int __init cpia2_init(void)
1560{ 1557{
1561 LOG("%s v%d.%d.%d\n", 1558 LOG("%s v%s\n",
1562 ABOUT, CPIA2_MAJ_VER, CPIA2_MIN_VER, CPIA2_PATCH_VER); 1559 ABOUT, CPIA_VERSION);
1563 check_parameters(); 1560 check_parameters();
1564 cpia2_usb_init(); 1561 cpia2_usb_init();
1565 return 0; 1562 return 0;
@@ -1579,4 +1576,3 @@ static void __exit cpia2_exit(void)
1579 1576
1580module_init(cpia2_init); 1577module_init(cpia2_init);
1581module_exit(cpia2_exit); 1578module_exit(cpia2_exit);
1582
diff --git a/drivers/media/video/cx18/cx18-alsa-main.c b/drivers/media/video/cx18/cx18-alsa-main.c
index d50d69da387b..a1e6c2a32478 100644
--- a/drivers/media/video/cx18/cx18-alsa-main.c
+++ b/drivers/media/video/cx18/cx18-alsa-main.c
@@ -192,6 +192,7 @@ static int snd_cx18_init(struct v4l2_device *v4l2_dev)
192err_exit_free: 192err_exit_free:
193 if (sc != NULL) 193 if (sc != NULL)
194 snd_card_free(sc); 194 snd_card_free(sc);
195 kfree(cxsc);
195err_exit: 196err_exit:
196 return ret; 197 return ret;
197} 198}
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index 086427288de8..183420723060 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -25,7 +25,6 @@
25#ifndef CX18_DRIVER_H 25#ifndef CX18_DRIVER_H
26#define CX18_DRIVER_H 26#define CX18_DRIVER_H
27 27
28#include <linux/version.h>
29#include <linux/module.h> 28#include <linux/module.h>
30#include <linux/moduleparam.h> 29#include <linux/moduleparam.h>
31#include <linux/init.h> 30#include <linux/init.h>
diff --git a/drivers/media/video/cx18/cx18-ioctl.c b/drivers/media/video/cx18/cx18-ioctl.c
index e80134f52ef5..afe0a29e7200 100644
--- a/drivers/media/video/cx18/cx18-ioctl.c
+++ b/drivers/media/video/cx18/cx18-ioctl.c
@@ -469,7 +469,6 @@ static int cx18_querycap(struct file *file, void *fh,
469 strlcpy(vcap->card, cx->card_name, sizeof(vcap->card)); 469 strlcpy(vcap->card, cx->card_name, sizeof(vcap->card));
470 snprintf(vcap->bus_info, sizeof(vcap->bus_info), 470 snprintf(vcap->bus_info, sizeof(vcap->bus_info),
471 "PCI:%s", pci_name(cx->pci_dev)); 471 "PCI:%s", pci_name(cx->pci_dev));
472 vcap->version = CX18_DRIVER_VERSION; /* version */
473 vcap->capabilities = cx->v4l2_cap; /* capabilities */ 472 vcap->capabilities = cx->v4l2_cap; /* capabilities */
474 return 0; 473 return 0;
475} 474}
diff --git a/drivers/media/video/cx18/cx18-version.h b/drivers/media/video/cx18/cx18-version.h
index cd189b6bbe20..fed48b6bb67b 100644
--- a/drivers/media/video/cx18/cx18-version.h
+++ b/drivers/media/video/cx18/cx18-version.h
@@ -23,12 +23,6 @@
23#define CX18_VERSION_H 23#define CX18_VERSION_H
24 24
25#define CX18_DRIVER_NAME "cx18" 25#define CX18_DRIVER_NAME "cx18"
26#define CX18_DRIVER_VERSION_MAJOR 1 26#define CX18_VERSION "1.5.1"
27#define CX18_DRIVER_VERSION_MINOR 5
28#define CX18_DRIVER_VERSION_PATCHLEVEL 0
29
30#define CX18_VERSION __stringify(CX18_DRIVER_VERSION_MAJOR) "." __stringify(CX18_DRIVER_VERSION_MINOR) "." __stringify(CX18_DRIVER_VERSION_PATCHLEVEL)
31#define CX18_DRIVER_VERSION KERNEL_VERSION(CX18_DRIVER_VERSION_MAJOR, \
32 CX18_DRIVER_VERSION_MINOR, CX18_DRIVER_VERSION_PATCHLEVEL)
33 27
34#endif 28#endif
diff --git a/drivers/media/video/cx231xx/cx231xx-avcore.c b/drivers/media/video/cx231xx/cx231xx-avcore.c
index 8d7813415760..53ff26e7abf7 100644
--- a/drivers/media/video/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/video/cx231xx/cx231xx-avcore.c
@@ -355,6 +355,8 @@ int cx231xx_afe_update_power_control(struct cx231xx *dev,
355 case CX231XX_BOARD_HAUPPAUGE_EXETER: 355 case CX231XX_BOARD_HAUPPAUGE_EXETER:
356 case CX231XX_BOARD_HAUPPAUGE_USBLIVE2: 356 case CX231XX_BOARD_HAUPPAUGE_USBLIVE2:
357 case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID: 357 case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
358 case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL:
359 case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC:
358 if (avmode == POLARIS_AVMODE_ANALOGT_TV) { 360 if (avmode == POLARIS_AVMODE_ANALOGT_TV) {
359 while (afe_power_status != (FLD_PWRDN_TUNING_BIAS | 361 while (afe_power_status != (FLD_PWRDN_TUNING_BIAS |
360 FLD_PWRDN_ENABLE_PLL)) { 362 FLD_PWRDN_ENABLE_PLL)) {
@@ -1733,6 +1735,8 @@ int cx231xx_dif_set_standard(struct cx231xx *dev, u32 standard)
1733 break; 1735 break;
1734 case CX231XX_BOARD_CNXT_RDE_253S: 1736 case CX231XX_BOARD_CNXT_RDE_253S:
1735 case CX231XX_BOARD_CNXT_RDU_253S: 1737 case CX231XX_BOARD_CNXT_RDU_253S:
1738 case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL:
1739 case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC:
1736 func_mode = 0x01; 1740 func_mode = 0x01;
1737 break; 1741 break;
1738 default: 1742 default:
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index 22703815a31f..53dae2a8272d 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -387,6 +387,7 @@ struct cx231xx_board cx231xx_boards[] = {
387 .norm = V4L2_STD_NTSC, 387 .norm = V4L2_STD_NTSC,
388 .no_alt_vanc = 1, 388 .no_alt_vanc = 1,
389 .external_av = 1, 389 .external_av = 1,
390 .dont_use_port_3 = 1,
390 .input = {{ 391 .input = {{
391 .type = CX231XX_VMUX_COMPOSITE1, 392 .type = CX231XX_VMUX_COMPOSITE1,
392 .vmux = CX231XX_VIN_2_1, 393 .vmux = CX231XX_VIN_2_1,
@@ -532,6 +533,76 @@ struct cx231xx_board cx231xx_boards[] = {
532 .gpio = NULL, 533 .gpio = NULL,
533 } }, 534 } },
534 }, 535 },
536 [CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL] = {
537 .name = "Hauppauge WinTV USB2 FM (PAL)",
538 .tuner_type = TUNER_NXP_TDA18271,
539 .tuner_addr = 0x60,
540 .tuner_gpio = RDE250_XCV_TUNER,
541 .tuner_sif_gpio = 0x05,
542 .tuner_scl_gpio = 0x1a,
543 .tuner_sda_gpio = 0x1b,
544 .decoder = CX231XX_AVDECODER,
545 .output_mode = OUT_MODE_VIP11,
546 .ctl_pin_status_mask = 0xFFFFFFC4,
547 .agc_analog_digital_select_gpio = 0x0c,
548 .gpio_pin_status_mask = 0x4001000,
549 .tuner_i2c_master = 1,
550 .norm = V4L2_STD_PAL,
551
552 .input = {{
553 .type = CX231XX_VMUX_TELEVISION,
554 .vmux = CX231XX_VIN_3_1,
555 .amux = CX231XX_AMUX_VIDEO,
556 .gpio = NULL,
557 }, {
558 .type = CX231XX_VMUX_COMPOSITE1,
559 .vmux = CX231XX_VIN_2_1,
560 .amux = CX231XX_AMUX_LINE_IN,
561 .gpio = NULL,
562 }, {
563 .type = CX231XX_VMUX_SVIDEO,
564 .vmux = CX231XX_VIN_1_1 |
565 (CX231XX_VIN_1_2 << 8) |
566 CX25840_SVIDEO_ON,
567 .amux = CX231XX_AMUX_LINE_IN,
568 .gpio = NULL,
569 } },
570 },
571 [CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC] = {
572 .name = "Hauppauge WinTV USB2 FM (NTSC)",
573 .tuner_type = TUNER_NXP_TDA18271,
574 .tuner_addr = 0x60,
575 .tuner_gpio = RDE250_XCV_TUNER,
576 .tuner_sif_gpio = 0x05,
577 .tuner_scl_gpio = 0x1a,
578 .tuner_sda_gpio = 0x1b,
579 .decoder = CX231XX_AVDECODER,
580 .output_mode = OUT_MODE_VIP11,
581 .ctl_pin_status_mask = 0xFFFFFFC4,
582 .agc_analog_digital_select_gpio = 0x0c,
583 .gpio_pin_status_mask = 0x4001000,
584 .tuner_i2c_master = 1,
585 .norm = V4L2_STD_NTSC,
586
587 .input = {{
588 .type = CX231XX_VMUX_TELEVISION,
589 .vmux = CX231XX_VIN_3_1,
590 .amux = CX231XX_AMUX_VIDEO,
591 .gpio = NULL,
592 }, {
593 .type = CX231XX_VMUX_COMPOSITE1,
594 .vmux = CX231XX_VIN_2_1,
595 .amux = CX231XX_AMUX_LINE_IN,
596 .gpio = NULL,
597 }, {
598 .type = CX231XX_VMUX_SVIDEO,
599 .vmux = CX231XX_VIN_1_1 |
600 (CX231XX_VIN_1_2 << 8) |
601 CX25840_SVIDEO_ON,
602 .amux = CX231XX_AMUX_LINE_IN,
603 .gpio = NULL,
604 } },
605 },
535}; 606};
536const unsigned int cx231xx_bcount = ARRAY_SIZE(cx231xx_boards); 607const unsigned int cx231xx_bcount = ARRAY_SIZE(cx231xx_boards);
537 608
@@ -553,6 +624,10 @@ struct usb_device_id cx231xx_id_table[] = {
553 .driver_info = CX231XX_BOARD_CNXT_RDE_250}, 624 .driver_info = CX231XX_BOARD_CNXT_RDE_250},
554 {USB_DEVICE(0x0572, 0x58A0), 625 {USB_DEVICE(0x0572, 0x58A0),
555 .driver_info = CX231XX_BOARD_CNXT_RDU_250}, 626 .driver_info = CX231XX_BOARD_CNXT_RDU_250},
627 {USB_DEVICE(0x2040, 0xb110),
628 .driver_info = CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL},
629 {USB_DEVICE(0x2040, 0xb111),
630 .driver_info = CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC},
556 {USB_DEVICE(0x2040, 0xb120), 631 {USB_DEVICE(0x2040, 0xb120),
557 .driver_info = CX231XX_BOARD_HAUPPAUGE_EXETER}, 632 .driver_info = CX231XX_BOARD_HAUPPAUGE_EXETER},
558 {USB_DEVICE(0x2040, 0xb140), 633 {USB_DEVICE(0x2040, 0xb140),
@@ -1051,6 +1126,9 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
1051 if (assoc_desc->bFirstInterface != ifnum) { 1126 if (assoc_desc->bFirstInterface != ifnum) {
1052 cx231xx_err(DRIVER_NAME ": Not found " 1127 cx231xx_err(DRIVER_NAME ": Not found "
1053 "matching IAD interface\n"); 1128 "matching IAD interface\n");
1129 cx231xx_devused &= ~(1 << nr);
1130 kfree(dev);
1131 dev = NULL;
1054 return -ENODEV; 1132 return -ENODEV;
1055 } 1133 }
1056 1134
diff --git a/drivers/media/video/cx231xx/cx231xx-core.c b/drivers/media/video/cx231xx/cx231xx-core.c
index abe500feb7dd..d4457f9488ee 100644
--- a/drivers/media/video/cx231xx/cx231xx-core.c
+++ b/drivers/media/video/cx231xx/cx231xx-core.c
@@ -742,6 +742,8 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode)
742 case CX231XX_BOARD_CNXT_RDU_253S: 742 case CX231XX_BOARD_CNXT_RDU_253S:
743 case CX231XX_BOARD_HAUPPAUGE_EXETER: 743 case CX231XX_BOARD_HAUPPAUGE_EXETER:
744 case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID: 744 case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
745 case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL:
746 case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC:
745 errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0); 747 errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
746 break; 748 break;
747 default: 749 default:
@@ -1381,6 +1383,8 @@ int cx231xx_dev_init(struct cx231xx *dev)
1381 case CX231XX_BOARD_CNXT_RDU_253S: 1383 case CX231XX_BOARD_CNXT_RDU_253S:
1382 case CX231XX_BOARD_HAUPPAUGE_EXETER: 1384 case CX231XX_BOARD_HAUPPAUGE_EXETER:
1383 case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID: 1385 case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
1386 case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL:
1387 case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC:
1384 errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0); 1388 errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
1385 break; 1389 break;
1386 default: 1390 default:
diff --git a/drivers/media/video/cx231xx/cx231xx-video.c b/drivers/media/video/cx231xx/cx231xx-video.c
index a69c24d8db06..6e81f970dc7d 100644
--- a/drivers/media/video/cx231xx/cx231xx-video.c
+++ b/drivers/media/video/cx231xx/cx231xx-video.c
@@ -29,7 +29,6 @@
29#include <linux/bitmap.h> 29#include <linux/bitmap.h>
30#include <linux/usb.h> 30#include <linux/usb.h>
31#include <linux/i2c.h> 31#include <linux/i2c.h>
32#include <linux/version.h>
33#include <linux/mm.h> 32#include <linux/mm.h>
34#include <linux/mutex.h> 33#include <linux/mutex.h>
35#include <linux/slab.h> 34#include <linux/slab.h>
@@ -45,7 +44,7 @@
45#include "cx231xx.h" 44#include "cx231xx.h"
46#include "cx231xx-vbi.h" 45#include "cx231xx-vbi.h"
47 46
48#define CX231XX_VERSION_CODE KERNEL_VERSION(0, 0, 1) 47#define CX231XX_VERSION "0.0.2"
49 48
50#define DRIVER_AUTHOR "Srinivasa Deevi <srinivasa.deevi@conexant.com>" 49#define DRIVER_AUTHOR "Srinivasa Deevi <srinivasa.deevi@conexant.com>"
51#define DRIVER_DESC "Conexant cx231xx based USB video device driver" 50#define DRIVER_DESC "Conexant cx231xx based USB video device driver"
@@ -70,6 +69,7 @@ do {\
70MODULE_AUTHOR(DRIVER_AUTHOR); 69MODULE_AUTHOR(DRIVER_AUTHOR);
71MODULE_DESCRIPTION(DRIVER_DESC); 70MODULE_DESCRIPTION(DRIVER_DESC);
72MODULE_LICENSE("GPL"); 71MODULE_LICENSE("GPL");
72MODULE_VERSION(CX231XX_VERSION);
73 73
74static unsigned int card[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET }; 74static unsigned int card[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
75static unsigned int video_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET }; 75static unsigned int video_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
@@ -1179,7 +1179,8 @@ static int vidioc_enum_input(struct file *file, void *priv,
1179{ 1179{
1180 struct cx231xx_fh *fh = priv; 1180 struct cx231xx_fh *fh = priv;
1181 struct cx231xx *dev = fh->dev; 1181 struct cx231xx *dev = fh->dev;
1182 unsigned int n; 1182 u32 gen_stat;
1183 unsigned int ret, n;
1183 1184
1184 n = i->index; 1185 n = i->index;
1185 if (n >= MAX_CX231XX_INPUT) 1186 if (n >= MAX_CX231XX_INPUT)
@@ -1198,6 +1199,18 @@ static int vidioc_enum_input(struct file *file, void *priv,
1198 1199
1199 i->std = dev->vdev->tvnorms; 1200 i->std = dev->vdev->tvnorms;
1200 1201
1202 /* If they are asking about the active input, read signal status */
1203 if (n == dev->video_input) {
1204 ret = cx231xx_read_i2c_data(dev, VID_BLK_I2C_ADDRESS,
1205 GEN_STAT, 2, &gen_stat, 4);
1206 if (ret > 0) {
1207 if ((gen_stat & FLD_VPRES) == 0x00)
1208 i->status |= V4L2_IN_ST_NO_SIGNAL;
1209 if ((gen_stat & FLD_HLOCK) == 0x00)
1210 i->status |= V4L2_IN_ST_NO_H_LOCK;
1211 }
1212 }
1213
1201 return 0; 1214 return 0;
1202} 1215}
1203 1216
@@ -1869,8 +1882,6 @@ static int vidioc_querycap(struct file *file, void *priv,
1869 strlcpy(cap->card, cx231xx_boards[dev->model].name, sizeof(cap->card)); 1882 strlcpy(cap->card, cx231xx_boards[dev->model].name, sizeof(cap->card));
1870 usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info)); 1883 usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
1871 1884
1872 cap->version = CX231XX_VERSION_CODE;
1873
1874 cap->capabilities = V4L2_CAP_VBI_CAPTURE | 1885 cap->capabilities = V4L2_CAP_VBI_CAPTURE |
1875#if 0 1886#if 0
1876 V4L2_CAP_SLICED_VBI_CAPTURE | 1887 V4L2_CAP_SLICED_VBI_CAPTURE |
@@ -2057,7 +2068,6 @@ static int radio_querycap(struct file *file, void *priv,
2057 strlcpy(cap->card, cx231xx_boards[dev->model].name, sizeof(cap->card)); 2068 strlcpy(cap->card, cx231xx_boards[dev->model].name, sizeof(cap->card));
2058 usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info)); 2069 usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
2059 2070
2060 cap->version = CX231XX_VERSION_CODE;
2061 cap->capabilities = V4L2_CAP_TUNER; 2071 cap->capabilities = V4L2_CAP_TUNER;
2062 return 0; 2072 return 0;
2063} 2073}
@@ -2570,11 +2580,8 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
2570{ 2580{
2571 int ret; 2581 int ret;
2572 2582
2573 cx231xx_info("%s: v4l2 driver version %d.%d.%d\n", 2583 cx231xx_info("%s: v4l2 driver version %s\n",
2574 dev->name, 2584 dev->name, CX231XX_VERSION);
2575 (CX231XX_VERSION_CODE >> 16) & 0xff,
2576 (CX231XX_VERSION_CODE >> 8) & 0xff,
2577 CX231XX_VERSION_CODE & 0xff);
2578 2585
2579 /* set default norm */ 2586 /* set default norm */
2580 /*dev->norm = cx231xx_video_template.current_norm; */ 2587 /*dev->norm = cx231xx_video_template.current_norm; */
diff --git a/drivers/media/video/cx231xx/cx231xx.h b/drivers/media/video/cx231xx/cx231xx.h
index 46dd84067816..2000bc64c497 100644
--- a/drivers/media/video/cx231xx/cx231xx.h
+++ b/drivers/media/video/cx231xx/cx231xx.h
@@ -43,7 +43,7 @@
43#include "cx231xx-conf-reg.h" 43#include "cx231xx-conf-reg.h"
44 44
45#define DRIVER_NAME "cx231xx" 45#define DRIVER_NAME "cx231xx"
46#define PWR_SLEEP_INTERVAL 5 46#define PWR_SLEEP_INTERVAL 10
47 47
48/* I2C addresses for control block in Cx231xx */ 48/* I2C addresses for control block in Cx231xx */
49#define AFE_DEVICE_ADDRESS 0x60 49#define AFE_DEVICE_ADDRESS 0x60
@@ -67,6 +67,8 @@
67#define CX231XX_BOARD_PV_XCAPTURE_USB 11 67#define CX231XX_BOARD_PV_XCAPTURE_USB 11
68#define CX231XX_BOARD_KWORLD_UB430_USB_HYBRID 12 68#define CX231XX_BOARD_KWORLD_UB430_USB_HYBRID 12
69#define CX231XX_BOARD_ICONBIT_U100 13 69#define CX231XX_BOARD_ICONBIT_U100 13
70#define CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL 14
71#define CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC 15
70 72
71/* Limits minimum and default number of buffers */ 73/* Limits minimum and default number of buffers */
72#define CX231XX_MIN_BUF 4 74#define CX231XX_MIN_BUF 4
@@ -112,7 +114,6 @@
112 V4L2_STD_PAL_BG | V4L2_STD_PAL_DK | V4L2_STD_PAL_I | \ 114 V4L2_STD_PAL_BG | V4L2_STD_PAL_DK | V4L2_STD_PAL_I | \
113 V4L2_STD_PAL_M | V4L2_STD_PAL_N | V4L2_STD_PAL_Nc | \ 115 V4L2_STD_PAL_M | V4L2_STD_PAL_N | V4L2_STD_PAL_Nc | \
114 V4L2_STD_PAL_60 | V4L2_STD_SECAM_L | V4L2_STD_SECAM_DK) 116 V4L2_STD_PAL_60 | V4L2_STD_SECAM_L | V4L2_STD_SECAM_DK)
115#define CX231xx_VERSION_CODE KERNEL_VERSION(0, 0, 2)
116 117
117#define SLEEP_S5H1432 30 118#define SLEEP_S5H1432 30
118#define CX23417_OSC_EN 8 119#define CX23417_OSC_EN 8
diff --git a/drivers/media/video/cx23885/altera-ci.c b/drivers/media/video/cx23885/altera-ci.c
index 678539b2acfa..1fa8927f0d36 100644
--- a/drivers/media/video/cx23885/altera-ci.c
+++ b/drivers/media/video/cx23885/altera-ci.c
@@ -52,7 +52,6 @@
52 * | DATA7| DATA6| DATA5| DATA4| DATA3| DATA2| DATA1| DATA0| 52 * | DATA7| DATA6| DATA5| DATA4| DATA3| DATA2| DATA1| DATA0|
53 * +-------+-------+-------+-------+-------+-------+-------+-------+ 53 * +-------+-------+-------+-------+-------+-------+-------+-------+
54 */ 54 */
55#include <linux/version.h>
56#include <media/videobuf-dma-sg.h> 55#include <media/videobuf-dma-sg.h>
57#include <media/videobuf-dvb.h> 56#include <media/videobuf-dvb.h>
58#include "altera-ci.h" 57#include "altera-ci.h"
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c
index 9a98dc55f657..67c4a59bd882 100644
--- a/drivers/media/video/cx23885/cx23885-417.c
+++ b/drivers/media/video/cx23885/cx23885-417.c
@@ -1359,7 +1359,6 @@ static int vidioc_querycap(struct file *file, void *priv,
1359 strlcpy(cap->card, cx23885_boards[tsport->dev->board].name, 1359 strlcpy(cap->card, cx23885_boards[tsport->dev->board].name,
1360 sizeof(cap->card)); 1360 sizeof(cap->card));
1361 sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci)); 1361 sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
1362 cap->version = CX23885_VERSION_CODE;
1363 cap->capabilities = 1362 cap->capabilities =
1364 V4L2_CAP_VIDEO_CAPTURE | 1363 V4L2_CAP_VIDEO_CAPTURE |
1365 V4L2_CAP_READWRITE | 1364 V4L2_CAP_READWRITE |
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index 934185cca758..76b7563de39c 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -29,11 +29,17 @@
29#include "../../../staging/altera-stapl/altera.h" 29#include "../../../staging/altera-stapl/altera.h"
30#include "cx23885.h" 30#include "cx23885.h"
31#include "tuner-xc2028.h" 31#include "tuner-xc2028.h"
32#include "netup-eeprom.h"
32#include "netup-init.h" 33#include "netup-init.h"
33#include "altera-ci.h" 34#include "altera-ci.h"
35#include "xc4000.h"
34#include "xc5000.h" 36#include "xc5000.h"
35#include "cx23888-ir.h" 37#include "cx23888-ir.h"
36 38
39static unsigned int netup_card_rev = 1;
40module_param(netup_card_rev, int, 0644);
41MODULE_PARM_DESC(netup_card_rev,
42 "NetUP Dual DVB-T/C CI card revision");
37static unsigned int enable_885_ir; 43static unsigned int enable_885_ir;
38module_param(enable_885_ir, int, 0644); 44module_param(enable_885_ir, int, 0644);
39MODULE_PARM_DESC(enable_885_ir, 45MODULE_PARM_DESC(enable_885_ir,
@@ -175,6 +181,34 @@ struct cx23885_board cx23885_boards[] = {
175 .name = "Leadtek Winfast PxDVR3200 H", 181 .name = "Leadtek Winfast PxDVR3200 H",
176 .portc = CX23885_MPEG_DVB, 182 .portc = CX23885_MPEG_DVB,
177 }, 183 },
184 [CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000] = {
185 .name = "Leadtek Winfast PxDVR3200 H XC4000",
186 .porta = CX23885_ANALOG_VIDEO,
187 .portc = CX23885_MPEG_DVB,
188 .tuner_type = TUNER_XC4000,
189 .tuner_addr = 0x61,
190 .radio_type = TUNER_XC4000,
191 .radio_addr = 0x61,
192 .input = {{
193 .type = CX23885_VMUX_TELEVISION,
194 .vmux = CX25840_VIN2_CH1 |
195 CX25840_VIN5_CH2 |
196 CX25840_NONE0_CH3,
197 }, {
198 .type = CX23885_VMUX_COMPOSITE1,
199 .vmux = CX25840_COMPOSITE1,
200 }, {
201 .type = CX23885_VMUX_SVIDEO,
202 .vmux = CX25840_SVIDEO_LUMA3 |
203 CX25840_SVIDEO_CHROMA4,
204 }, {
205 .type = CX23885_VMUX_COMPONENT,
206 .vmux = CX25840_VIN7_CH1 |
207 CX25840_VIN6_CH2 |
208 CX25840_VIN8_CH3 |
209 CX25840_COMPONENT_ON,
210 } },
211 },
178 [CX23885_BOARD_COMPRO_VIDEOMATE_E650F] = { 212 [CX23885_BOARD_COMPRO_VIDEOMATE_E650F] = {
179 .name = "Compro VideoMate E650F", 213 .name = "Compro VideoMate E650F",
180 .portc = CX23885_MPEG_DVB, 214 .portc = CX23885_MPEG_DVB,
@@ -433,6 +467,10 @@ struct cx23885_subid cx23885_subids[] = {
433 .subdevice = 0x6681, 467 .subdevice = 0x6681,
434 .card = CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H, 468 .card = CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H,
435 }, { 469 }, {
470 .subvendor = 0x107d,
471 .subdevice = 0x6f39,
472 .card = CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000,
473 }, {
436 .subvendor = 0x185b, 474 .subvendor = 0x185b,
437 .subdevice = 0xe800, 475 .subdevice = 0xe800,
438 .card = CX23885_BOARD_COMPRO_VIDEOMATE_E650F, 476 .card = CX23885_BOARD_COMPRO_VIDEOMATE_E650F,
@@ -749,6 +787,7 @@ int cx23885_tuner_callback(void *priv, int component, int command, int arg)
749 case CX23885_BOARD_HAUPPAUGE_HVR1500: 787 case CX23885_BOARD_HAUPPAUGE_HVR1500:
750 case CX23885_BOARD_HAUPPAUGE_HVR1500Q: 788 case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
751 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H: 789 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
790 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
752 case CX23885_BOARD_COMPRO_VIDEOMATE_E650F: 791 case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
753 case CX23885_BOARD_COMPRO_VIDEOMATE_E800: 792 case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
754 case CX23885_BOARD_LEADTEK_WINFAST_PXTV1200: 793 case CX23885_BOARD_LEADTEK_WINFAST_PXTV1200:
@@ -909,6 +948,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
909 cx_set(GP0_IO, 0x000f000f); 948 cx_set(GP0_IO, 0x000f000f);
910 break; 949 break;
911 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H: 950 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
951 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
912 case CX23885_BOARD_COMPRO_VIDEOMATE_E650F: 952 case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
913 case CX23885_BOARD_COMPRO_VIDEOMATE_E800: 953 case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
914 case CX23885_BOARD_LEADTEK_WINFAST_PXTV1200: 954 case CX23885_BOARD_LEADTEK_WINFAST_PXTV1200:
@@ -1097,12 +1137,19 @@ int cx23885_ir_init(struct cx23885_dev *dev)
1097 case CX23885_BOARD_HAUPPAUGE_HVR1800: 1137 case CX23885_BOARD_HAUPPAUGE_HVR1800:
1098 case CX23885_BOARD_HAUPPAUGE_HVR1200: 1138 case CX23885_BOARD_HAUPPAUGE_HVR1200:
1099 case CX23885_BOARD_HAUPPAUGE_HVR1400: 1139 case CX23885_BOARD_HAUPPAUGE_HVR1400:
1100 case CX23885_BOARD_HAUPPAUGE_HVR1270:
1101 case CX23885_BOARD_HAUPPAUGE_HVR1275: 1140 case CX23885_BOARD_HAUPPAUGE_HVR1275:
1102 case CX23885_BOARD_HAUPPAUGE_HVR1255: 1141 case CX23885_BOARD_HAUPPAUGE_HVR1255:
1103 case CX23885_BOARD_HAUPPAUGE_HVR1210: 1142 case CX23885_BOARD_HAUPPAUGE_HVR1210:
1104 /* FIXME: Implement me */ 1143 /* FIXME: Implement me */
1105 break; 1144 break;
1145 case CX23885_BOARD_HAUPPAUGE_HVR1270:
1146 ret = cx23888_ir_probe(dev);
1147 if (ret)
1148 break;
1149 dev->sd_ir = cx23885_find_hw(dev, CX23885_HW_888_IR);
1150 v4l2_subdev_call(dev->sd_cx25840, core, s_io_pin_config,
1151 ir_rx_pin_cfg_count, ir_rx_pin_cfg);
1152 break;
1106 case CX23885_BOARD_HAUPPAUGE_HVR1850: 1153 case CX23885_BOARD_HAUPPAUGE_HVR1850:
1107 case CX23885_BOARD_HAUPPAUGE_HVR1290: 1154 case CX23885_BOARD_HAUPPAUGE_HVR1290:
1108 ret = cx23888_ir_probe(dev); 1155 ret = cx23888_ir_probe(dev);
@@ -1156,6 +1203,7 @@ int cx23885_ir_init(struct cx23885_dev *dev)
1156void cx23885_ir_fini(struct cx23885_dev *dev) 1203void cx23885_ir_fini(struct cx23885_dev *dev)
1157{ 1204{
1158 switch (dev->board) { 1205 switch (dev->board) {
1206 case CX23885_BOARD_HAUPPAUGE_HVR1270:
1159 case CX23885_BOARD_HAUPPAUGE_HVR1850: 1207 case CX23885_BOARD_HAUPPAUGE_HVR1850:
1160 case CX23885_BOARD_HAUPPAUGE_HVR1290: 1208 case CX23885_BOARD_HAUPPAUGE_HVR1290:
1161 cx23885_irq_remove(dev, PCI_MSK_IR); 1209 cx23885_irq_remove(dev, PCI_MSK_IR);
@@ -1199,6 +1247,7 @@ int netup_jtag_io(void *device, int tms, int tdi, int read_tdo)
1199void cx23885_ir_pci_int_enable(struct cx23885_dev *dev) 1247void cx23885_ir_pci_int_enable(struct cx23885_dev *dev)
1200{ 1248{
1201 switch (dev->board) { 1249 switch (dev->board) {
1250 case CX23885_BOARD_HAUPPAUGE_HVR1270:
1202 case CX23885_BOARD_HAUPPAUGE_HVR1850: 1251 case CX23885_BOARD_HAUPPAUGE_HVR1850:
1203 case CX23885_BOARD_HAUPPAUGE_HVR1290: 1252 case CX23885_BOARD_HAUPPAUGE_HVR1290:
1204 if (dev->sd_ir) 1253 if (dev->sd_ir)
@@ -1325,6 +1374,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
1325 case CX23885_BOARD_HAUPPAUGE_HVR1700: 1374 case CX23885_BOARD_HAUPPAUGE_HVR1700:
1326 case CX23885_BOARD_HAUPPAUGE_HVR1400: 1375 case CX23885_BOARD_HAUPPAUGE_HVR1400:
1327 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H: 1376 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
1377 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
1328 case CX23885_BOARD_COMPRO_VIDEOMATE_E650F: 1378 case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
1329 case CX23885_BOARD_HAUPPAUGE_HVR1270: 1379 case CX23885_BOARD_HAUPPAUGE_HVR1270:
1330 case CX23885_BOARD_HAUPPAUGE_HVR1275: 1380 case CX23885_BOARD_HAUPPAUGE_HVR1275:
@@ -1353,10 +1403,12 @@ void cx23885_card_setup(struct cx23885_dev *dev)
1353 case CX23885_BOARD_HAUPPAUGE_HVR1800lp: 1403 case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
1354 case CX23885_BOARD_HAUPPAUGE_HVR1700: 1404 case CX23885_BOARD_HAUPPAUGE_HVR1700:
1355 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H: 1405 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
1406 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
1356 case CX23885_BOARD_COMPRO_VIDEOMATE_E650F: 1407 case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
1357 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI: 1408 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
1358 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF: 1409 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
1359 case CX23885_BOARD_COMPRO_VIDEOMATE_E800: 1410 case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
1411 case CX23885_BOARD_HAUPPAUGE_HVR1270:
1360 case CX23885_BOARD_HAUPPAUGE_HVR1850: 1412 case CX23885_BOARD_HAUPPAUGE_HVR1850:
1361 case CX23885_BOARD_MYGICA_X8506: 1413 case CX23885_BOARD_MYGICA_X8506:
1362 case CX23885_BOARD_MAGICPRO_PROHDTVE2: 1414 case CX23885_BOARD_MAGICPRO_PROHDTVE2:
@@ -1383,6 +1435,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
1383 const struct firmware *fw; 1435 const struct firmware *fw;
1384 const char *filename = "dvb-netup-altera-01.fw"; 1436 const char *filename = "dvb-netup-altera-01.fw";
1385 char *action = "configure"; 1437 char *action = "configure";
1438 static struct netup_card_info cinfo;
1386 struct altera_config netup_config = { 1439 struct altera_config netup_config = {
1387 .dev = dev, 1440 .dev = dev,
1388 .action = action, 1441 .action = action,
@@ -1391,6 +1444,21 @@ void cx23885_card_setup(struct cx23885_dev *dev)
1391 1444
1392 netup_initialize(dev); 1445 netup_initialize(dev);
1393 1446
1447 netup_get_card_info(&dev->i2c_bus[0].i2c_adap, &cinfo);
1448 if (netup_card_rev)
1449 cinfo.rev = netup_card_rev;
1450
1451 switch (cinfo.rev) {
1452 case 0x4:
1453 filename = "dvb-netup-altera-04.fw";
1454 break;
1455 default:
1456 filename = "dvb-netup-altera-01.fw";
1457 break;
1458 }
1459 printk(KERN_INFO "NetUP card rev=0x%x fw_filename=%s\n",
1460 cinfo.rev, filename);
1461
1394 ret = request_firmware(&fw, filename, &dev->pci->dev); 1462 ret = request_firmware(&fw, filename, &dev->pci->dev);
1395 if (ret != 0) 1463 if (ret != 0)
1396 printk(KERN_ERR "did not find the firmware file. (%s) " 1464 printk(KERN_ERR "did not find the firmware file. (%s) "
diff --git a/drivers/media/video/cx23885/cx23885-core.c b/drivers/media/video/cx23885/cx23885-core.c
index 419777a832ee..ee41a8882f58 100644
--- a/drivers/media/video/cx23885/cx23885-core.c
+++ b/drivers/media/video/cx23885/cx23885-core.c
@@ -42,6 +42,7 @@
42MODULE_DESCRIPTION("Driver for cx23885 based TV cards"); 42MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
43MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>"); 43MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
44MODULE_LICENSE("GPL"); 44MODULE_LICENSE("GPL");
45MODULE_VERSION(CX23885_VERSION);
45 46
46static unsigned int debug; 47static unsigned int debug;
47module_param(debug, int, 0644); 48module_param(debug, int, 0644);
@@ -2147,14 +2148,8 @@ static struct pci_driver cx23885_pci_driver = {
2147 2148
2148static int __init cx23885_init(void) 2149static int __init cx23885_init(void)
2149{ 2150{
2150 printk(KERN_INFO "cx23885 driver version %d.%d.%d loaded\n", 2151 printk(KERN_INFO "cx23885 driver version %s loaded\n",
2151 (CX23885_VERSION_CODE >> 16) & 0xff, 2152 CX23885_VERSION);
2152 (CX23885_VERSION_CODE >> 8) & 0xff,
2153 CX23885_VERSION_CODE & 0xff);
2154#ifdef SNAPSHOT
2155 printk(KERN_INFO "cx23885: snapshot date %04d-%02d-%02d\n",
2156 SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
2157#endif
2158 return pci_register_driver(&cx23885_pci_driver); 2153 return pci_register_driver(&cx23885_pci_driver);
2159} 2154}
2160 2155
@@ -2165,5 +2160,3 @@ static void __exit cx23885_fini(void)
2165 2160
2166module_init(cx23885_init); 2161module_init(cx23885_init);
2167module_exit(cx23885_fini); 2162module_exit(cx23885_fini);
2168
2169/* ----------------------------------------------------------- */
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index 3c315f94cc85..aa83f07b1b0f 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -37,6 +37,7 @@
37#include "tda8290.h" 37#include "tda8290.h"
38#include "tda18271.h" 38#include "tda18271.h"
39#include "lgdt330x.h" 39#include "lgdt330x.h"
40#include "xc4000.h"
40#include "xc5000.h" 41#include "xc5000.h"
41#include "max2165.h" 42#include "max2165.h"
42#include "tda10048.h" 43#include "tda10048.h"
@@ -921,6 +922,26 @@ static int dvb_register(struct cx23885_tsport *port)
921 fe->ops.tuner_ops.set_config(fe, &ctl); 922 fe->ops.tuner_ops.set_config(fe, &ctl);
922 } 923 }
923 break; 924 break;
925 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
926 i2c_bus = &dev->i2c_bus[0];
927
928 fe0->dvb.frontend = dvb_attach(zl10353_attach,
929 &dvico_fusionhdtv_xc3028,
930 &i2c_bus->i2c_adap);
931 if (fe0->dvb.frontend != NULL) {
932 struct dvb_frontend *fe;
933 struct xc4000_config cfg = {
934 .i2c_address = 0x61,
935 .default_pm = 0,
936 .dvb_amplitude = 134,
937 .set_smoothedcvbs = 1,
938 .if_khz = 4560
939 };
940
941 fe = dvb_attach(xc4000_attach, fe0->dvb.frontend,
942 &dev->i2c_bus[1].i2c_adap, &cfg);
943 }
944 break;
924 case CX23885_BOARD_TBS_6920: 945 case CX23885_BOARD_TBS_6920:
925 i2c_bus = &dev->i2c_bus[1]; 946 i2c_bus = &dev->i2c_bus[1];
926 947
@@ -1249,7 +1270,7 @@ int cx23885_dvb_unregister(struct cx23885_tsport *port)
1249 * implement MFE support. 1270 * implement MFE support.
1250 */ 1271 */
1251 fe0 = videobuf_dvb_get_frontend(&port->frontends, 1); 1272 fe0 = videobuf_dvb_get_frontend(&port->frontends, 1);
1252 if (fe0->dvb.frontend) 1273 if (fe0 && fe0->dvb.frontend)
1253 videobuf_dvb_unregister_bus(&port->frontends); 1274 videobuf_dvb_unregister_bus(&port->frontends);
1254 1275
1255 switch (port->dev->board) { 1276 switch (port->dev->board) {
diff --git a/drivers/media/video/cx23885/cx23885-input.c b/drivers/media/video/cx23885/cx23885-input.c
index e97cafd83984..ce765e3f77bd 100644
--- a/drivers/media/video/cx23885/cx23885-input.c
+++ b/drivers/media/video/cx23885/cx23885-input.c
@@ -82,6 +82,7 @@ void cx23885_input_rx_work_handler(struct cx23885_dev *dev, u32 events)
82 return; 82 return;
83 83
84 switch (dev->board) { 84 switch (dev->board) {
85 case CX23885_BOARD_HAUPPAUGE_HVR1270:
85 case CX23885_BOARD_HAUPPAUGE_HVR1850: 86 case CX23885_BOARD_HAUPPAUGE_HVR1850:
86 case CX23885_BOARD_HAUPPAUGE_HVR1290: 87 case CX23885_BOARD_HAUPPAUGE_HVR1290:
87 case CX23885_BOARD_TEVII_S470: 88 case CX23885_BOARD_TEVII_S470:
@@ -133,6 +134,7 @@ static int cx23885_input_ir_start(struct cx23885_dev *dev)
133 134
134 v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, &params); 135 v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, &params);
135 switch (dev->board) { 136 switch (dev->board) {
137 case CX23885_BOARD_HAUPPAUGE_HVR1270:
136 case CX23885_BOARD_HAUPPAUGE_HVR1850: 138 case CX23885_BOARD_HAUPPAUGE_HVR1850:
137 case CX23885_BOARD_HAUPPAUGE_HVR1290: 139 case CX23885_BOARD_HAUPPAUGE_HVR1290:
138 case CX23885_BOARD_HAUPPAUGE_HVR1250: 140 case CX23885_BOARD_HAUPPAUGE_HVR1250:
@@ -229,6 +231,9 @@ static void cx23885_input_ir_stop(struct cx23885_dev *dev)
229 v4l2_subdev_call(dev->sd_ir, ir, rx_s_parameters, &params); 231 v4l2_subdev_call(dev->sd_ir, ir, rx_s_parameters, &params);
230 v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, &params); 232 v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, &params);
231 } 233 }
234 flush_work_sync(&dev->cx25840_work);
235 flush_work_sync(&dev->ir_rx_work);
236 flush_work_sync(&dev->ir_tx_work);
232} 237}
233 238
234static void cx23885_input_ir_close(struct rc_dev *rc) 239static void cx23885_input_ir_close(struct rc_dev *rc)
@@ -257,6 +262,7 @@ int cx23885_input_init(struct cx23885_dev *dev)
257 return -ENODEV; 262 return -ENODEV;
258 263
259 switch (dev->board) { 264 switch (dev->board) {
265 case CX23885_BOARD_HAUPPAUGE_HVR1270:
260 case CX23885_BOARD_HAUPPAUGE_HVR1850: 266 case CX23885_BOARD_HAUPPAUGE_HVR1850:
261 case CX23885_BOARD_HAUPPAUGE_HVR1290: 267 case CX23885_BOARD_HAUPPAUGE_HVR1290:
262 case CX23885_BOARD_HAUPPAUGE_HVR1250: 268 case CX23885_BOARD_HAUPPAUGE_HVR1250:
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index ee57f6bedbe3..896bb32dbf03 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -1000,7 +1000,6 @@ static int vidioc_querycap(struct file *file, void *priv,
1000 strlcpy(cap->card, cx23885_boards[dev->board].name, 1000 strlcpy(cap->card, cx23885_boards[dev->board].name,
1001 sizeof(cap->card)); 1001 sizeof(cap->card));
1002 sprintf(cap->bus_info, "PCIe:%s", pci_name(dev->pci)); 1002 sprintf(cap->bus_info, "PCIe:%s", pci_name(dev->pci));
1003 cap->version = CX23885_VERSION_CODE;
1004 cap->capabilities = 1003 cap->capabilities =
1005 V4L2_CAP_VIDEO_CAPTURE | 1004 V4L2_CAP_VIDEO_CAPTURE |
1006 V4L2_CAP_READWRITE | 1005 V4L2_CAP_READWRITE |
diff --git a/drivers/media/video/cx23885/cx23885.h b/drivers/media/video/cx23885/cx23885.h
index c186473fc570..d86bc0b1317b 100644
--- a/drivers/media/video/cx23885/cx23885.h
+++ b/drivers/media/video/cx23885/cx23885.h
@@ -36,10 +36,9 @@
36#include "cx23885-reg.h" 36#include "cx23885-reg.h"
37#include "media/cx2341x.h" 37#include "media/cx2341x.h"
38 38
39#include <linux/version.h>
40#include <linux/mutex.h> 39#include <linux/mutex.h>
41 40
42#define CX23885_VERSION_CODE KERNEL_VERSION(0, 0, 2) 41#define CX23885_VERSION "0.0.3"
43 42
44#define UNSET (-1U) 43#define UNSET (-1U)
45 44
@@ -86,6 +85,7 @@
86#define CX23885_BOARD_LEADTEK_WINFAST_PXTV1200 28 85#define CX23885_BOARD_LEADTEK_WINFAST_PXTV1200 28
87#define CX23885_BOARD_GOTVIEW_X5_3D_HYBRID 29 86#define CX23885_BOARD_GOTVIEW_X5_3D_HYBRID 29
88#define CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF 30 87#define CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF 30
88#define CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000 31
89 89
90#define GPIO_0 0x00000001 90#define GPIO_0 0x00000001
91#define GPIO_1 0x00000002 91#define GPIO_1 0x00000002
diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
index 423c1af8a782..68d1240f493c 100644
--- a/drivers/media/video/cx88/cx88-alsa.c
+++ b/drivers/media/video/cx88/cx88-alsa.c
@@ -113,6 +113,8 @@ MODULE_DESCRIPTION("ALSA driver module for cx2388x based TV cards");
113MODULE_AUTHOR("Ricardo Cerqueira"); 113MODULE_AUTHOR("Ricardo Cerqueira");
114MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); 114MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
115MODULE_LICENSE("GPL"); 115MODULE_LICENSE("GPL");
116MODULE_VERSION(CX88_VERSION);
117
116MODULE_SUPPORTED_DEVICE("{{Conexant,23881}," 118MODULE_SUPPORTED_DEVICE("{{Conexant,23881},"
117 "{{Conexant,23882}," 119 "{{Conexant,23882},"
118 "{{Conexant,23883}"); 120 "{{Conexant,23883}");
@@ -973,14 +975,8 @@ static struct pci_driver cx88_audio_pci_driver = {
973 */ 975 */
974static int __init cx88_audio_init(void) 976static int __init cx88_audio_init(void)
975{ 977{
976 printk(KERN_INFO "cx2388x alsa driver version %d.%d.%d loaded\n", 978 printk(KERN_INFO "cx2388x alsa driver version %s loaded\n",
977 (CX88_VERSION_CODE >> 16) & 0xff, 979 CX88_VERSION);
978 (CX88_VERSION_CODE >> 8) & 0xff,
979 CX88_VERSION_CODE & 0xff);
980#ifdef SNAPSHOT
981 printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n",
982 SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
983#endif
984 return pci_register_driver(&cx88_audio_pci_driver); 980 return pci_register_driver(&cx88_audio_pci_driver);
985} 981}
986 982
@@ -994,10 +990,3 @@ static void __exit cx88_audio_fini(void)
994 990
995module_init(cx88_audio_init); 991module_init(cx88_audio_init);
996module_exit(cx88_audio_fini); 992module_exit(cx88_audio_fini);
997
998/* ----------------------------------------------------------- */
999/*
1000 * Local variables:
1001 * c-basic-offset: 8
1002 * End:
1003 */
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index 11e49bbc4a66..e46446a449c0 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -42,6 +42,7 @@
42MODULE_DESCRIPTION("driver for cx2388x/cx23416 based mpeg encoder cards"); 42MODULE_DESCRIPTION("driver for cx2388x/cx23416 based mpeg encoder cards");
43MODULE_AUTHOR("Jelle Foks <jelle@foks.us>, Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]"); 43MODULE_AUTHOR("Jelle Foks <jelle@foks.us>, Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
44MODULE_LICENSE("GPL"); 44MODULE_LICENSE("GPL");
45MODULE_VERSION(CX88_VERSION);
45 46
46static unsigned int mpegbufs = 32; 47static unsigned int mpegbufs = 32;
47module_param(mpegbufs,int,0644); 48module_param(mpegbufs,int,0644);
@@ -730,7 +731,6 @@ static int vidioc_querycap (struct file *file, void *priv,
730 strcpy(cap->driver, "cx88_blackbird"); 731 strcpy(cap->driver, "cx88_blackbird");
731 strlcpy(cap->card, core->board.name, sizeof(cap->card)); 732 strlcpy(cap->card, core->board.name, sizeof(cap->card));
732 sprintf(cap->bus_info,"PCI:%s",pci_name(dev->pci)); 733 sprintf(cap->bus_info,"PCI:%s",pci_name(dev->pci));
733 cap->version = CX88_VERSION_CODE;
734 cap->capabilities = 734 cap->capabilities =
735 V4L2_CAP_VIDEO_CAPTURE | 735 V4L2_CAP_VIDEO_CAPTURE |
736 V4L2_CAP_READWRITE | 736 V4L2_CAP_READWRITE |
@@ -1368,14 +1368,8 @@ static struct cx8802_driver cx8802_blackbird_driver = {
1368 1368
1369static int __init blackbird_init(void) 1369static int __init blackbird_init(void)
1370{ 1370{
1371 printk(KERN_INFO "cx2388x blackbird driver version %d.%d.%d loaded\n", 1371 printk(KERN_INFO "cx2388x blackbird driver version %s loaded\n",
1372 (CX88_VERSION_CODE >> 16) & 0xff, 1372 CX88_VERSION);
1373 (CX88_VERSION_CODE >> 8) & 0xff,
1374 CX88_VERSION_CODE & 0xff);
1375#ifdef SNAPSHOT
1376 printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n",
1377 SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
1378#endif
1379 return cx8802_register_driver(&cx8802_blackbird_driver); 1373 return cx8802_register_driver(&cx8802_blackbird_driver);
1380} 1374}
1381 1375
@@ -1389,11 +1383,3 @@ module_exit(blackbird_fini);
1389 1383
1390module_param_named(video_debug,cx8802_mpeg_template.debug, int, 0644); 1384module_param_named(video_debug,cx8802_mpeg_template.debug, int, 0644);
1391MODULE_PARM_DESC(debug,"enable debug messages [video]"); 1385MODULE_PARM_DESC(debug,"enable debug messages [video]");
1392
1393/* ----------------------------------------------------------- */
1394/*
1395 * Local variables:
1396 * c-basic-offset: 8
1397 * End:
1398 * kate: eol "unix"; indent-width 3; remove-trailing-space on; replace-trailing-space-save on; tab-width 8; replace-tabs off; space-indent off; mixed-indent off
1399 */
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 27222c92b603..0d719faafd8a 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -28,6 +28,7 @@
28 28
29#include "cx88.h" 29#include "cx88.h"
30#include "tea5767.h" 30#include "tea5767.h"
31#include "xc4000.h"
31 32
32static unsigned int tuner[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET }; 33static unsigned int tuner[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
33static unsigned int radio[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET }; 34static unsigned int radio[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
@@ -2119,6 +2120,99 @@ static const struct cx88_board cx88_boards[] = {
2119 }, 2120 },
2120 .mpeg = CX88_MPEG_DVB, 2121 .mpeg = CX88_MPEG_DVB,
2121 }, 2122 },
2123 [CX88_BOARD_WINFAST_DTV1800H_XC4000] = {
2124 .name = "Leadtek WinFast DTV1800 H (XC4000)",
2125 .tuner_type = TUNER_XC4000,
2126 .radio_type = TUNER_XC4000,
2127 .tuner_addr = 0x61,
2128 .radio_addr = 0x61,
2129 /*
2130 * GPIO setting
2131 *
2132 * 2: mute (0=off,1=on)
2133 * 12: tuner reset pin
2134 * 13: audio source (0=tuner audio,1=line in)
2135 * 14: FM (0=on,1=off ???)
2136 */
2137 .input = {{
2138 .type = CX88_VMUX_TELEVISION,
2139 .vmux = 0,
2140 .gpio0 = 0x0400, /* pin 2 = 0 */
2141 .gpio1 = 0x6040, /* pin 13 = 0, pin 14 = 1 */
2142 .gpio2 = 0x0000,
2143 }, {
2144 .type = CX88_VMUX_COMPOSITE1,
2145 .vmux = 1,
2146 .gpio0 = 0x0400, /* pin 2 = 0 */
2147 .gpio1 = 0x6060, /* pin 13 = 1, pin 14 = 1 */
2148 .gpio2 = 0x0000,
2149 }, {
2150 .type = CX88_VMUX_SVIDEO,
2151 .vmux = 2,
2152 .gpio0 = 0x0400, /* pin 2 = 0 */
2153 .gpio1 = 0x6060, /* pin 13 = 1, pin 14 = 1 */
2154 .gpio2 = 0x0000,
2155 }},
2156 .radio = {
2157 .type = CX88_RADIO,
2158 .gpio0 = 0x0400, /* pin 2 = 0 */
2159 .gpio1 = 0x6000, /* pin 13 = 0, pin 14 = 0 */
2160 .gpio2 = 0x0000,
2161 },
2162 .mpeg = CX88_MPEG_DVB,
2163 },
2164 [CX88_BOARD_WINFAST_DTV2000H_PLUS] = {
2165 .name = "Leadtek WinFast DTV2000 H PLUS",
2166 .tuner_type = TUNER_XC4000,
2167 .radio_type = TUNER_XC4000,
2168 .tuner_addr = 0x61,
2169 .radio_addr = 0x61,
2170 /*
2171 * GPIO
2172 * 2: 1: mute audio
2173 * 12: 0: reset XC4000
2174 * 13: 1: audio input is line in (0: tuner)
2175 * 14: 0: FM radio
2176 * 16: 0: RF input is cable
2177 */
2178 .input = {{
2179 .type = CX88_VMUX_TELEVISION,
2180 .vmux = 0,
2181 .gpio0 = 0x0403,
2182 .gpio1 = 0xF0D7,
2183 .gpio2 = 0x0101,
2184 .gpio3 = 0x0000,
2185 }, {
2186 .type = CX88_VMUX_CABLE,
2187 .vmux = 0,
2188 .gpio0 = 0x0403,
2189 .gpio1 = 0xF0D7,
2190 .gpio2 = 0x0100,
2191 .gpio3 = 0x0000,
2192 }, {
2193 .type = CX88_VMUX_COMPOSITE1,
2194 .vmux = 1,
2195 .gpio0 = 0x0403, /* was 0x0407 */
2196 .gpio1 = 0xF0F7,
2197 .gpio2 = 0x0101,
2198 .gpio3 = 0x0000,
2199 }, {
2200 .type = CX88_VMUX_SVIDEO,
2201 .vmux = 2,
2202 .gpio0 = 0x0403, /* was 0x0407 */
2203 .gpio1 = 0xF0F7,
2204 .gpio2 = 0x0101,
2205 .gpio3 = 0x0000,
2206 }},
2207 .radio = {
2208 .type = CX88_RADIO,
2209 .gpio0 = 0x0403,
2210 .gpio1 = 0xF097,
2211 .gpio2 = 0x0100,
2212 .gpio3 = 0x0000,
2213 },
2214 .mpeg = CX88_MPEG_DVB,
2215 },
2122 [CX88_BOARD_PROF_7301] = { 2216 [CX88_BOARD_PROF_7301] = {
2123 .name = "Prof 7301 DVB-S/S2", 2217 .name = "Prof 7301 DVB-S/S2",
2124 .tuner_type = UNSET, 2218 .tuner_type = UNSET,
@@ -2581,6 +2675,15 @@ static const struct cx88_subid cx88_subids[] = {
2581 .subdevice = 0x6654, 2675 .subdevice = 0x6654,
2582 .card = CX88_BOARD_WINFAST_DTV1800H, 2676 .card = CX88_BOARD_WINFAST_DTV1800H,
2583 }, { 2677 }, {
2678 /* WinFast DTV1800 H with XC4000 tuner */
2679 .subvendor = 0x107d,
2680 .subdevice = 0x6f38,
2681 .card = CX88_BOARD_WINFAST_DTV1800H_XC4000,
2682 }, {
2683 .subvendor = 0x107d,
2684 .subdevice = 0x6f42,
2685 .card = CX88_BOARD_WINFAST_DTV2000H_PLUS,
2686 }, {
2584 /* PVR2000 PAL Model [107d:6630] */ 2687 /* PVR2000 PAL Model [107d:6630] */
2585 .subvendor = 0x107d, 2688 .subvendor = 0x107d,
2586 .subdevice = 0x6630, 2689 .subdevice = 0x6630,
@@ -2846,6 +2949,23 @@ static int cx88_xc3028_winfast1800h_callback(struct cx88_core *core,
2846 return -EINVAL; 2949 return -EINVAL;
2847} 2950}
2848 2951
2952static int cx88_xc4000_winfast2000h_plus_callback(struct cx88_core *core,
2953 int command, int arg)
2954{
2955 switch (command) {
2956 case XC4000_TUNER_RESET:
2957 /* GPIO 12 (xc4000 tuner reset) */
2958 cx_set(MO_GP1_IO, 0x1010);
2959 mdelay(50);
2960 cx_clear(MO_GP1_IO, 0x10);
2961 mdelay(75);
2962 cx_set(MO_GP1_IO, 0x10);
2963 mdelay(75);
2964 return 0;
2965 }
2966 return -EINVAL;
2967}
2968
2849/* ------------------------------------------------------------------- */ 2969/* ------------------------------------------------------------------- */
2850/* some Divco specific stuff */ 2970/* some Divco specific stuff */
2851static int cx88_pv_8000gt_callback(struct cx88_core *core, 2971static int cx88_pv_8000gt_callback(struct cx88_core *core,
@@ -2948,6 +3068,19 @@ static int cx88_xc2028_tuner_callback(struct cx88_core *core,
2948 return -EINVAL; 3068 return -EINVAL;
2949} 3069}
2950 3070
3071static int cx88_xc4000_tuner_callback(struct cx88_core *core,
3072 int command, int arg)
3073{
3074 /* Board-specific callbacks */
3075 switch (core->boardnr) {
3076 case CX88_BOARD_WINFAST_DTV1800H_XC4000:
3077 case CX88_BOARD_WINFAST_DTV2000H_PLUS:
3078 return cx88_xc4000_winfast2000h_plus_callback(core,
3079 command, arg);
3080 }
3081 return -EINVAL;
3082}
3083
2951/* ----------------------------------------------------------------------- */ 3084/* ----------------------------------------------------------------------- */
2952/* Tuner callback function. Currently only needed for the Pinnacle * 3085/* Tuner callback function. Currently only needed for the Pinnacle *
2953 * PCTV HD 800i with an xc5000 sillicon tuner. This is used for both * 3086 * PCTV HD 800i with an xc5000 sillicon tuner. This is used for both *
@@ -3022,6 +3155,9 @@ int cx88_tuner_callback(void *priv, int component, int command, int arg)
3022 case TUNER_XC2028: 3155 case TUNER_XC2028:
3023 info_printk(core, "Calling XC2028/3028 callback\n"); 3156 info_printk(core, "Calling XC2028/3028 callback\n");
3024 return cx88_xc2028_tuner_callback(core, command, arg); 3157 return cx88_xc2028_tuner_callback(core, command, arg);
3158 case TUNER_XC4000:
3159 info_printk(core, "Calling XC4000 callback\n");
3160 return cx88_xc4000_tuner_callback(core, command, arg);
3025 case TUNER_XC5000: 3161 case TUNER_XC5000:
3026 info_printk(core, "Calling XC5000 callback\n"); 3162 info_printk(core, "Calling XC5000 callback\n");
3027 return cx88_xc5000_tuner_callback(core, command, arg); 3163 return cx88_xc5000_tuner_callback(core, command, arg);
@@ -3109,13 +3245,13 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
3109 3245
3110 case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL: 3246 case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
3111 case CX88_BOARD_WINFAST_DTV1800H: 3247 case CX88_BOARD_WINFAST_DTV1800H:
3112 /* GPIO 12 (xc3028 tuner reset) */ 3248 cx88_xc3028_winfast1800h_callback(core, XC2028_TUNER_RESET, 0);
3113 cx_set(MO_GP1_IO, 0x1010); 3249 break;
3114 mdelay(50); 3250
3115 cx_clear(MO_GP1_IO, 0x10); 3251 case CX88_BOARD_WINFAST_DTV1800H_XC4000:
3116 mdelay(50); 3252 case CX88_BOARD_WINFAST_DTV2000H_PLUS:
3117 cx_set(MO_GP1_IO, 0x10); 3253 cx88_xc4000_winfast2000h_plus_callback(core,
3118 mdelay(50); 3254 XC4000_TUNER_RESET, 0);
3119 break; 3255 break;
3120 3256
3121 case CX88_BOARD_TWINHAN_VP1027_DVBS: 3257 case CX88_BOARD_TWINHAN_VP1027_DVBS:
diff --git a/drivers/media/video/cx88/cx88-core.c b/drivers/media/video/cx88/cx88-core.c
index 2e145f0a5fd9..fbcaa1c5b09d 100644
--- a/drivers/media/video/cx88/cx88-core.c
+++ b/drivers/media/video/cx88/cx88-core.c
@@ -636,6 +636,9 @@ int cx88_reset(struct cx88_core *core)
636 cx_write(MO_PCI_INTSTAT, 0xFFFFFFFF); // Clear PCI int 636 cx_write(MO_PCI_INTSTAT, 0xFFFFFFFF); // Clear PCI int
637 cx_write(MO_INT1_STAT, 0xFFFFFFFF); // Clear RISC int 637 cx_write(MO_INT1_STAT, 0xFFFFFFFF); // Clear RISC int
638 638
639 /* set default notch filter */
640 cx_andor(MO_HTOTAL, 0x1800, (HLNotchFilter4xFsc << 11));
641
639 /* Reset on-board parts */ 642 /* Reset on-board parts */
640 cx_write(MO_SRST_IO, 0); 643 cx_write(MO_SRST_IO, 0);
641 msleep(10); 644 msleep(10);
@@ -759,8 +762,8 @@ int cx88_set_scale(struct cx88_core *core, unsigned int width, unsigned int heig
759 if (nocomb) 762 if (nocomb)
760 value |= (3 << 5); // disable comb filter 763 value |= (3 << 5); // disable comb filter
761 764
762 cx_write(MO_FILTER_EVEN, value); 765 cx_andor(MO_FILTER_EVEN, 0x7ffc7f, value); /* preserve PEAKEN, PSEL */
763 cx_write(MO_FILTER_ODD, value); 766 cx_andor(MO_FILTER_ODD, 0x7ffc7f, value);
764 dprintk(1,"set_scale: filter 0x%04x\n", value); 767 dprintk(1,"set_scale: filter 0x%04x\n", value);
765 768
766 return 0; 769 return 0;
@@ -994,10 +997,10 @@ int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm)
994 // htotal 997 // htotal
995 tmp64 = norm_htotal(norm) * (u64)vdec_clock; 998 tmp64 = norm_htotal(norm) * (u64)vdec_clock;
996 do_div(tmp64, fsc8); 999 do_div(tmp64, fsc8);
997 htotal = (u32)tmp64 | (HLNotchFilter4xFsc << 11); 1000 htotal = (u32)tmp64;
998 dprintk(1,"set_tvnorm: MO_HTOTAL 0x%08x [old=0x%08x,htotal=%d]\n", 1001 dprintk(1,"set_tvnorm: MO_HTOTAL 0x%08x [old=0x%08x,htotal=%d]\n",
999 htotal, cx_read(MO_HTOTAL), (u32)tmp64); 1002 htotal, cx_read(MO_HTOTAL), (u32)tmp64);
1000 cx_write(MO_HTOTAL, htotal); 1003 cx_andor(MO_HTOTAL, 0x07ff, htotal);
1001 1004
1002 // vbi stuff, set vbi offset to 10 (for 20 Clk*2 pixels), this makes 1005 // vbi stuff, set vbi offset to 10 (for 20 Clk*2 pixels), this makes
1003 // the effective vbi offset ~244 samples, the same as the Bt8x8 1006 // the effective vbi offset ~244 samples, the same as the Bt8x8
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index c69df7ebb6a7..cf3d33ab541b 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -41,6 +41,7 @@
41#include "or51132.h" 41#include "or51132.h"
42#include "lgdt330x.h" 42#include "lgdt330x.h"
43#include "s5h1409.h" 43#include "s5h1409.h"
44#include "xc4000.h"
44#include "xc5000.h" 45#include "xc5000.h"
45#include "nxt200x.h" 46#include "nxt200x.h"
46#include "cx24123.h" 47#include "cx24123.h"
@@ -63,6 +64,7 @@ MODULE_DESCRIPTION("driver for cx2388x based DVB cards");
63MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>"); 64MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>");
64MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]"); 65MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
65MODULE_LICENSE("GPL"); 66MODULE_LICENSE("GPL");
67MODULE_VERSION(CX88_VERSION);
66 68
67static unsigned int debug; 69static unsigned int debug;
68module_param(debug, int, 0644); 70module_param(debug, int, 0644);
@@ -605,6 +607,39 @@ static int attach_xc3028(u8 addr, struct cx8802_dev *dev)
605 return 0; 607 return 0;
606} 608}
607 609
610static int attach_xc4000(struct cx8802_dev *dev, struct xc4000_config *cfg)
611{
612 struct dvb_frontend *fe;
613 struct videobuf_dvb_frontend *fe0 = NULL;
614
615 /* Get the first frontend */
616 fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1);
617 if (!fe0)
618 return -EINVAL;
619
620 if (!fe0->dvb.frontend) {
621 printk(KERN_ERR "%s/2: dvb frontend not attached. "
622 "Can't attach xc4000\n",
623 dev->core->name);
624 return -EINVAL;
625 }
626
627 fe = dvb_attach(xc4000_attach, fe0->dvb.frontend, &dev->core->i2c_adap,
628 cfg);
629 if (!fe) {
630 printk(KERN_ERR "%s/2: xc4000 attach failed\n",
631 dev->core->name);
632 dvb_frontend_detach(fe0->dvb.frontend);
633 dvb_unregister_frontend(fe0->dvb.frontend);
634 fe0->dvb.frontend = NULL;
635 return -EINVAL;
636 }
637
638 printk(KERN_INFO "%s/2: xc4000 attached\n", dev->core->name);
639
640 return 0;
641}
642
608static int cx24116_set_ts_param(struct dvb_frontend *fe, 643static int cx24116_set_ts_param(struct dvb_frontend *fe,
609 int is_punctured) 644 int is_punctured)
610{ 645{
@@ -1294,7 +1329,25 @@ static int dvb_register(struct cx8802_dev *dev)
1294 goto frontend_detach; 1329 goto frontend_detach;
1295 } 1330 }
1296 break; 1331 break;
1297 case CX88_BOARD_GENIATECH_X8000_MT: 1332 case CX88_BOARD_WINFAST_DTV1800H_XC4000:
1333 case CX88_BOARD_WINFAST_DTV2000H_PLUS:
1334 fe0->dvb.frontend = dvb_attach(zl10353_attach,
1335 &cx88_pinnacle_hybrid_pctv,
1336 &core->i2c_adap);
1337 if (fe0->dvb.frontend) {
1338 struct xc4000_config cfg = {
1339 .i2c_address = 0x61,
1340 .default_pm = 0,
1341 .dvb_amplitude = 134,
1342 .set_smoothedcvbs = 1,
1343 .if_khz = 4560
1344 };
1345 fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL;
1346 if (attach_xc4000(dev, &cfg) < 0)
1347 goto frontend_detach;
1348 }
1349 break;
1350 case CX88_BOARD_GENIATECH_X8000_MT:
1298 dev->ts_gen_cntrl = 0x00; 1351 dev->ts_gen_cntrl = 0x00;
1299 1352
1300 fe0->dvb.frontend = dvb_attach(zl10353_attach, 1353 fe0->dvb.frontend = dvb_attach(zl10353_attach,
@@ -1577,6 +1630,11 @@ static int cx8802_dvb_advise_acquire(struct cx8802_driver *drv)
1577 udelay(1000); 1630 udelay(1000);
1578 break; 1631 break;
1579 1632
1633 case CX88_BOARD_WINFAST_DTV2000H_PLUS:
1634 /* set RF input to AIR for DVB-T (GPIO 16) */
1635 cx_write(MO_GP2_IO, 0x0101);
1636 break;
1637
1580 default: 1638 default:
1581 err = -ENODEV; 1639 err = -ENODEV;
1582 } 1640 }
@@ -1692,14 +1750,8 @@ static struct cx8802_driver cx8802_dvb_driver = {
1692 1750
1693static int __init dvb_init(void) 1751static int __init dvb_init(void)
1694{ 1752{
1695 printk(KERN_INFO "cx88/2: cx2388x dvb driver version %d.%d.%d loaded\n", 1753 printk(KERN_INFO "cx88/2: cx2388x dvb driver version %s loaded\n",
1696 (CX88_VERSION_CODE >> 16) & 0xff, 1754 CX88_VERSION);
1697 (CX88_VERSION_CODE >> 8) & 0xff,
1698 CX88_VERSION_CODE & 0xff);
1699#ifdef SNAPSHOT
1700 printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n",
1701 SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
1702#endif
1703 return cx8802_register_driver(&cx8802_dvb_driver); 1755 return cx8802_register_driver(&cx8802_dvb_driver);
1704} 1756}
1705 1757
@@ -1710,10 +1762,3 @@ static void __exit dvb_fini(void)
1710 1762
1711module_init(dvb_init); 1763module_init(dvb_init);
1712module_exit(dvb_fini); 1764module_exit(dvb_fini);
1713
1714/*
1715 * Local variables:
1716 * c-basic-offset: 8
1717 * compile-command: "make DVB=1"
1718 * End:
1719 */
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index 3f442003623d..e614201b5ed3 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -100,6 +100,8 @@ static void cx88_ir_handle_key(struct cx88_IR *ir)
100 break; 100 break;
101 case CX88_BOARD_WINFAST_DTV1000: 101 case CX88_BOARD_WINFAST_DTV1000:
102 case CX88_BOARD_WINFAST_DTV1800H: 102 case CX88_BOARD_WINFAST_DTV1800H:
103 case CX88_BOARD_WINFAST_DTV1800H_XC4000:
104 case CX88_BOARD_WINFAST_DTV2000H_PLUS:
103 case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL: 105 case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
104 gpio = (gpio & 0x6ff) | ((cx_read(MO_GP1_IO) << 8) & 0x900); 106 gpio = (gpio & 0x6ff) | ((cx_read(MO_GP1_IO) << 8) & 0x900);
105 auxgpio = gpio; 107 auxgpio = gpio;
@@ -289,6 +291,8 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
289 case CX88_BOARD_WINFAST_DTV2000H: 291 case CX88_BOARD_WINFAST_DTV2000H:
290 case CX88_BOARD_WINFAST_DTV2000H_J: 292 case CX88_BOARD_WINFAST_DTV2000H_J:
291 case CX88_BOARD_WINFAST_DTV1800H: 293 case CX88_BOARD_WINFAST_DTV1800H:
294 case CX88_BOARD_WINFAST_DTV1800H_XC4000:
295 case CX88_BOARD_WINFAST_DTV2000H_PLUS:
292 ir_codes = RC_MAP_WINFAST; 296 ir_codes = RC_MAP_WINFAST;
293 ir->gpio_addr = MO_GP0_IO; 297 ir->gpio_addr = MO_GP0_IO;
294 ir->mask_keycode = 0x8f8; 298 ir->mask_keycode = 0x8f8;
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index 1a7b983f8297..cd5386ee210c 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -39,6 +39,7 @@ MODULE_AUTHOR("Jelle Foks <jelle@foks.us>");
39MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>"); 39MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>");
40MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]"); 40MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
41MODULE_LICENSE("GPL"); 41MODULE_LICENSE("GPL");
42MODULE_VERSION(CX88_VERSION);
42 43
43static unsigned int debug; 44static unsigned int debug;
44module_param(debug,int,0644); 45module_param(debug,int,0644);
@@ -613,13 +614,17 @@ static int cx8802_request_acquire(struct cx8802_driver *drv)
613 core->active_type_id != drv->type_id) 614 core->active_type_id != drv->type_id)
614 return -EBUSY; 615 return -EBUSY;
615 616
616 core->input = 0; 617 if (drv->type_id == CX88_MPEG_DVB) {
617 for (i = 0; 618 /* When switching to DVB, always set the input to the tuner */
618 i < (sizeof(core->board.input) / sizeof(struct cx88_input)); 619 core->last_analog_input = core->input;
619 i++) { 620 core->input = 0;
620 if (core->board.input[i].type == CX88_VMUX_DVB) { 621 for (i = 0;
621 core->input = i; 622 i < (sizeof(core->board.input) / sizeof(struct cx88_input));
622 break; 623 i++) {
624 if (core->board.input[i].type == CX88_VMUX_DVB) {
625 core->input = i;
626 break;
627 }
623 } 628 }
624 } 629 }
625 630
@@ -644,6 +649,12 @@ static int cx8802_request_release(struct cx8802_driver *drv)
644 649
645 if (drv->advise_release && --core->active_ref == 0) 650 if (drv->advise_release && --core->active_ref == 0)
646 { 651 {
652 if (drv->type_id == CX88_MPEG_DVB) {
653 /* If the DVB driver is releasing, reset the input
654 state to the last configured analog input */
655 core->input = core->last_analog_input;
656 }
657
647 drv->advise_release(drv); 658 drv->advise_release(drv);
648 core->active_type_id = CX88_BOARD_NONE; 659 core->active_type_id = CX88_BOARD_NONE;
649 mpeg_dbg(1,"%s() Post release GPIO=%x\n", __func__, cx_read(MO_GP0_IO)); 660 mpeg_dbg(1,"%s() Post release GPIO=%x\n", __func__, cx_read(MO_GP0_IO));
@@ -890,14 +901,8 @@ static struct pci_driver cx8802_pci_driver = {
890 901
891static int __init cx8802_init(void) 902static int __init cx8802_init(void)
892{ 903{
893 printk(KERN_INFO "cx88/2: cx2388x MPEG-TS Driver Manager version %d.%d.%d loaded\n", 904 printk(KERN_INFO "cx88/2: cx2388x MPEG-TS Driver Manager version %s loaded\n",
894 (CX88_VERSION_CODE >> 16) & 0xff, 905 CX88_VERSION);
895 (CX88_VERSION_CODE >> 8) & 0xff,
896 CX88_VERSION_CODE & 0xff);
897#ifdef SNAPSHOT
898 printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n",
899 SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
900#endif
901 return pci_register_driver(&cx8802_pci_driver); 906 return pci_register_driver(&cx8802_pci_driver);
902} 907}
903 908
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index cef4f282e5aa..60d28fdd7791 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -45,6 +45,7 @@
45MODULE_DESCRIPTION("v4l2 driver module for cx2388x based TV cards"); 45MODULE_DESCRIPTION("v4l2 driver module for cx2388x based TV cards");
46MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]"); 46MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
47MODULE_LICENSE("GPL"); 47MODULE_LICENSE("GPL");
48MODULE_VERSION(CX88_VERSION);
48 49
49/* ------------------------------------------------------------------ */ 50/* ------------------------------------------------------------------ */
50 51
@@ -220,7 +221,23 @@ static const struct cx88_ctrl cx8800_ctls[] = {
220 .reg = MO_UV_SATURATION, 221 .reg = MO_UV_SATURATION,
221 .mask = 0x00ff, 222 .mask = 0x00ff,
222 .shift = 0, 223 .shift = 0,
223 },{ 224 }, {
225 .v = {
226 .id = V4L2_CID_SHARPNESS,
227 .name = "Sharpness",
228 .minimum = 0,
229 .maximum = 4,
230 .step = 1,
231 .default_value = 0x0,
232 .type = V4L2_CTRL_TYPE_INTEGER,
233 },
234 .off = 0,
235 /* NOTE: the value is converted and written to both even
236 and odd registers in the code */
237 .reg = MO_FILTER_ODD,
238 .mask = 7 << 7,
239 .shift = 7,
240 }, {
224 .v = { 241 .v = {
225 .id = V4L2_CID_CHROMA_AGC, 242 .id = V4L2_CID_CHROMA_AGC,
226 .name = "Chroma AGC", 243 .name = "Chroma AGC",
@@ -245,6 +262,20 @@ static const struct cx88_ctrl cx8800_ctls[] = {
245 .mask = 1 << 9, 262 .mask = 1 << 9,
246 .shift = 9, 263 .shift = 9,
247 }, { 264 }, {
265 .v = {
266 .id = V4L2_CID_BAND_STOP_FILTER,
267 .name = "Notch filter",
268 .minimum = 0,
269 .maximum = 3,
270 .step = 1,
271 .default_value = 0x0,
272 .type = V4L2_CTRL_TYPE_INTEGER,
273 },
274 .off = 0,
275 .reg = MO_HTOTAL,
276 .mask = 3 << 11,
277 .shift = 11,
278 }, {
248 /* --- audio --- */ 279 /* --- audio --- */
249 .v = { 280 .v = {
250 .id = V4L2_CID_AUDIO_MUTE, 281 .id = V4L2_CID_AUDIO_MUTE,
@@ -300,8 +331,10 @@ const u32 cx88_user_ctrls[] = {
300 V4L2_CID_AUDIO_VOLUME, 331 V4L2_CID_AUDIO_VOLUME,
301 V4L2_CID_AUDIO_BALANCE, 332 V4L2_CID_AUDIO_BALANCE,
302 V4L2_CID_AUDIO_MUTE, 333 V4L2_CID_AUDIO_MUTE,
334 V4L2_CID_SHARPNESS,
303 V4L2_CID_CHROMA_AGC, 335 V4L2_CID_CHROMA_AGC,
304 V4L2_CID_COLOR_KILLER, 336 V4L2_CID_COLOR_KILLER,
337 V4L2_CID_BAND_STOP_FILTER,
305 0 338 0
306}; 339};
307EXPORT_SYMBOL(cx88_user_ctrls); 340EXPORT_SYMBOL(cx88_user_ctrls);
@@ -962,6 +995,10 @@ int cx88_get_control (struct cx88_core *core, struct v4l2_control *ctl)
962 case V4L2_CID_AUDIO_VOLUME: 995 case V4L2_CID_AUDIO_VOLUME:
963 ctl->value = 0x3f - (value & 0x3f); 996 ctl->value = 0x3f - (value & 0x3f);
964 break; 997 break;
998 case V4L2_CID_SHARPNESS:
999 ctl->value = ((value & 0x0200) ? (((value & 0x0180) >> 7) + 1)
1000 : 0);
1001 break;
965 default: 1002 default:
966 ctl->value = ((value + (c->off << c->shift)) & c->mask) >> c->shift; 1003 ctl->value = ((value + (c->off << c->shift)) & c->mask) >> c->shift;
967 break; 1004 break;
@@ -1039,6 +1076,12 @@ int cx88_set_control(struct cx88_core *core, struct v4l2_control *ctl)
1039 } 1076 }
1040 mask=0xffff; 1077 mask=0xffff;
1041 break; 1078 break;
1079 case V4L2_CID_SHARPNESS:
1080 /* 0b000, 0b100, 0b101, 0b110, or 0b111 */
1081 value = (ctl->value < 1 ? 0 : ((ctl->value + 3) << 7));
1082 /* needs to be set for both fields */
1083 cx_andor(MO_FILTER_EVEN, mask, value);
1084 break;
1042 case V4L2_CID_CHROMA_AGC: 1085 case V4L2_CID_CHROMA_AGC:
1043 /* Do not allow chroma AGC to be enabled for SECAM */ 1086 /* Do not allow chroma AGC to be enabled for SECAM */
1044 value = ((ctl->value - c->off) << c->shift) & c->mask; 1087 value = ((ctl->value - c->off) << c->shift) & c->mask;
@@ -1161,7 +1204,6 @@ static int vidioc_querycap (struct file *file, void *priv,
1161 strcpy(cap->driver, "cx8800"); 1204 strcpy(cap->driver, "cx8800");
1162 strlcpy(cap->card, core->board.name, sizeof(cap->card)); 1205 strlcpy(cap->card, core->board.name, sizeof(cap->card));
1163 sprintf(cap->bus_info,"PCI:%s",pci_name(dev->pci)); 1206 sprintf(cap->bus_info,"PCI:%s",pci_name(dev->pci));
1164 cap->version = CX88_VERSION_CODE;
1165 cap->capabilities = 1207 cap->capabilities =
1166 V4L2_CAP_VIDEO_CAPTURE | 1208 V4L2_CAP_VIDEO_CAPTURE |
1167 V4L2_CAP_READWRITE | 1209 V4L2_CAP_READWRITE |
@@ -1480,7 +1522,6 @@ static int radio_querycap (struct file *file, void *priv,
1480 strcpy(cap->driver, "cx8800"); 1522 strcpy(cap->driver, "cx8800");
1481 strlcpy(cap->card, core->board.name, sizeof(cap->card)); 1523 strlcpy(cap->card, core->board.name, sizeof(cap->card));
1482 sprintf(cap->bus_info,"PCI:%s", pci_name(dev->pci)); 1524 sprintf(cap->bus_info,"PCI:%s", pci_name(dev->pci));
1483 cap->version = CX88_VERSION_CODE;
1484 cap->capabilities = V4L2_CAP_TUNER; 1525 cap->capabilities = V4L2_CAP_TUNER;
1485 return 0; 1526 return 0;
1486} 1527}
@@ -2139,14 +2180,8 @@ static struct pci_driver cx8800_pci_driver = {
2139 2180
2140static int __init cx8800_init(void) 2181static int __init cx8800_init(void)
2141{ 2182{
2142 printk(KERN_INFO "cx88/0: cx2388x v4l2 driver version %d.%d.%d loaded\n", 2183 printk(KERN_INFO "cx88/0: cx2388x v4l2 driver version %s loaded\n",
2143 (CX88_VERSION_CODE >> 16) & 0xff, 2184 CX88_VERSION);
2144 (CX88_VERSION_CODE >> 8) & 0xff,
2145 CX88_VERSION_CODE & 0xff);
2146#ifdef SNAPSHOT
2147 printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n",
2148 SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
2149#endif
2150 return pci_register_driver(&cx8800_pci_driver); 2185 return pci_register_driver(&cx8800_pci_driver);
2151} 2186}
2152 2187
@@ -2157,11 +2192,3 @@ static void __exit cx8800_fini(void)
2157 2192
2158module_init(cx8800_init); 2193module_init(cx8800_init);
2159module_exit(cx8800_fini); 2194module_exit(cx8800_fini);
2160
2161/* ----------------------------------------------------------- */
2162/*
2163 * Local variables:
2164 * c-basic-offset: 8
2165 * End:
2166 * kate: eol "unix"; indent-width 3; remove-trailing-space on; replace-trailing-space-save on; tab-width 8; replace-tabs off; space-indent off; mixed-indent off
2167 */
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index a399a8b086ba..fa8d307e1a3d 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -39,9 +39,9 @@
39#include "cx88-reg.h" 39#include "cx88-reg.h"
40#include "tuner-xc2028.h" 40#include "tuner-xc2028.h"
41 41
42#include <linux/version.h>
43#include <linux/mutex.h> 42#include <linux/mutex.h>
44#define CX88_VERSION_CODE KERNEL_VERSION(0, 0, 8) 43
44#define CX88_VERSION "0.0.9"
45 45
46#define UNSET (-1U) 46#define UNSET (-1U)
47 47
@@ -242,6 +242,8 @@ extern const struct sram_channel const cx88_sram_channels[];
242#define CX88_BOARD_SAMSUNG_SMT_7020 84 242#define CX88_BOARD_SAMSUNG_SMT_7020 84
243#define CX88_BOARD_TWINHAN_VP1027_DVBS 85 243#define CX88_BOARD_TWINHAN_VP1027_DVBS 85
244#define CX88_BOARD_TEVII_S464 86 244#define CX88_BOARD_TEVII_S464 86
245#define CX88_BOARD_WINFAST_DTV2000H_PLUS 87
246#define CX88_BOARD_WINFAST_DTV1800H_XC4000 88
245 247
246enum cx88_itype { 248enum cx88_itype {
247 CX88_VMUX_COMPOSITE1 = 1, 249 CX88_VMUX_COMPOSITE1 = 1,
@@ -375,6 +377,7 @@ struct cx88_core {
375 u32 audiomode_manual; 377 u32 audiomode_manual;
376 u32 audiomode_current; 378 u32 audiomode_current;
377 u32 input; 379 u32 input;
380 u32 last_analog_input;
378 u32 astat; 381 u32 astat;
379 u32 use_nicam; 382 u32 use_nicam;
380 unsigned long last_change; 383 unsigned long last_change;
diff --git a/drivers/media/video/davinci/Kconfig b/drivers/media/video/davinci/Kconfig
index 6b1954035649..60a456ebdc7c 100644
--- a/drivers/media/video/davinci/Kconfig
+++ b/drivers/media/video/davinci/Kconfig
@@ -91,3 +91,26 @@ config VIDEO_ISIF
91 91
92 To compile this driver as a module, choose M here: the 92 To compile this driver as a module, choose M here: the
93 module will be called vpfe. 93 module will be called vpfe.
94
95config VIDEO_DM644X_VPBE
96 tristate "DM644X VPBE HW module"
97 depends on ARCH_DAVINCI_DM644x
98 select VIDEO_VPSS_SYSTEM
99 select VIDEOBUF_DMA_CONTIG
100 help
101 Enables VPBE modules used for display on a DM644x
102 SoC.
103
104 To compile this driver as a module, choose M here: the
105 module will be called vpbe.
106
107
108config VIDEO_VPBE_DISPLAY
109 tristate "VPBE V4L2 Display driver"
110 depends on ARCH_DAVINCI_DM644x
111 select VIDEO_DM644X_VPBE
112 help
113 Enables VPBE V4L2 Display driver on a DM644x device
114
115 To compile this driver as a module, choose M here: the
116 module will be called vpbe_display.
diff --git a/drivers/media/video/davinci/Makefile b/drivers/media/video/davinci/Makefile
index a37955745aaa..ae7dafb689ab 100644
--- a/drivers/media/video/davinci/Makefile
+++ b/drivers/media/video/davinci/Makefile
@@ -16,3 +16,5 @@ obj-$(CONFIG_VIDEO_VPFE_CAPTURE) += vpfe_capture.o
16obj-$(CONFIG_VIDEO_DM6446_CCDC) += dm644x_ccdc.o 16obj-$(CONFIG_VIDEO_DM6446_CCDC) += dm644x_ccdc.o
17obj-$(CONFIG_VIDEO_DM355_CCDC) += dm355_ccdc.o 17obj-$(CONFIG_VIDEO_DM355_CCDC) += dm355_ccdc.o
18obj-$(CONFIG_VIDEO_ISIF) += isif.o 18obj-$(CONFIG_VIDEO_ISIF) += isif.o
19obj-$(CONFIG_VIDEO_DM644X_VPBE) += vpbe.o vpbe_osd.o vpbe_venc.o
20obj-$(CONFIG_VIDEO_VPBE_DISPLAY) += vpbe_display.o
diff --git a/drivers/media/video/davinci/vpbe.c b/drivers/media/video/davinci/vpbe.c
new file mode 100644
index 000000000000..d773d30de221
--- /dev/null
+++ b/drivers/media/video/davinci/vpbe.c
@@ -0,0 +1,864 @@
1/*
2 * Copyright (C) 2010 Texas Instruments Inc
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/errno.h>
21#include <linux/fs.h>
22#include <linux/string.h>
23#include <linux/wait.h>
24#include <linux/time.h>
25#include <linux/platform_device.h>
26#include <linux/io.h>
27#include <linux/slab.h>
28#include <linux/clk.h>
29#include <linux/err.h>
30
31#include <media/v4l2-device.h>
32#include <media/davinci/vpbe_types.h>
33#include <media/davinci/vpbe.h>
34#include <media/davinci/vpss.h>
35#include <media/davinci/vpbe_venc.h>
36
37#define VPBE_DEFAULT_OUTPUT "Composite"
38#define VPBE_DEFAULT_MODE "ntsc"
39
40static char *def_output = VPBE_DEFAULT_OUTPUT;
41static char *def_mode = VPBE_DEFAULT_MODE;
42static int debug;
43
44module_param(def_output, charp, S_IRUGO);
45module_param(def_mode, charp, S_IRUGO);
46module_param(debug, int, 0644);
47
48MODULE_PARM_DESC(def_output, "vpbe output name (default:Composite)");
49MODULE_PARM_DESC(def_mode, "vpbe output mode name (default:ntsc");
50MODULE_PARM_DESC(debug, "Debug level 0-1");
51
52MODULE_DESCRIPTION("TI DMXXX VPBE Display controller");
53MODULE_LICENSE("GPL");
54MODULE_AUTHOR("Texas Instruments");
55
56/**
57 * vpbe_current_encoder_info - Get config info for current encoder
58 * @vpbe_dev - vpbe device ptr
59 *
60 * Return ptr to current encoder config info
61 */
62static struct encoder_config_info*
63vpbe_current_encoder_info(struct vpbe_device *vpbe_dev)
64{
65 struct vpbe_config *cfg = vpbe_dev->cfg;
66 int index = vpbe_dev->current_sd_index;
67
68 return ((index == 0) ? &cfg->venc :
69 &cfg->ext_encoders[index-1]);
70}
71
72/**
73 * vpbe_find_encoder_sd_index - Given a name find encoder sd index
74 *
75 * @vpbe_config - ptr to vpbe cfg
76 * @output_index - index used by application
77 *
78 * Return sd index of the encoder
79 */
80static int vpbe_find_encoder_sd_index(struct vpbe_config *cfg,
81 int index)
82{
83 char *encoder_name = cfg->outputs[index].subdev_name;
84 int i;
85
86 /* Venc is always first */
87 if (!strcmp(encoder_name, cfg->venc.module_name))
88 return 0;
89
90 for (i = 0; i < cfg->num_ext_encoders; i++) {
91 if (!strcmp(encoder_name,
92 cfg->ext_encoders[i].module_name))
93 return i+1;
94 }
95
96 return -EINVAL;
97}
98
99/**
100 * vpbe_g_cropcap - Get crop capabilities of the display
101 * @vpbe_dev - vpbe device ptr
102 * @cropcap - cropcap is a ptr to struct v4l2_cropcap
103 *
104 * Update the crop capabilities in crop cap for current
105 * mode
106 */
107static int vpbe_g_cropcap(struct vpbe_device *vpbe_dev,
108 struct v4l2_cropcap *cropcap)
109{
110 if (NULL == cropcap)
111 return -EINVAL;
112 cropcap->bounds.left = 0;
113 cropcap->bounds.top = 0;
114 cropcap->bounds.width = vpbe_dev->current_timings.xres;
115 cropcap->bounds.height = vpbe_dev->current_timings.yres;
116 cropcap->defrect = cropcap->bounds;
117
118 return 0;
119}
120
121/**
122 * vpbe_enum_outputs - enumerate outputs
123 * @vpbe_dev - vpbe device ptr
124 * @output - ptr to v4l2_output structure
125 *
126 * Enumerates the outputs available at the vpbe display
127 * returns the status, -EINVAL if end of output list
128 */
129static int vpbe_enum_outputs(struct vpbe_device *vpbe_dev,
130 struct v4l2_output *output)
131{
132 struct vpbe_config *cfg = vpbe_dev->cfg;
133 int temp_index = output->index;
134
135 if (temp_index >= cfg->num_outputs)
136 return -EINVAL;
137
138 *output = cfg->outputs[temp_index].output;
139 output->index = temp_index;
140
141 return 0;
142}
143
144static int vpbe_get_mode_info(struct vpbe_device *vpbe_dev, char *mode)
145{
146 struct vpbe_config *cfg = vpbe_dev->cfg;
147 struct vpbe_enc_mode_info var;
148 int curr_output = vpbe_dev->current_out_index;
149 int i;
150
151 if (NULL == mode)
152 return -EINVAL;
153
154 for (i = 0; i < cfg->outputs[curr_output].num_modes; i++) {
155 var = cfg->outputs[curr_output].modes[i];
156 if (!strcmp(mode, var.name)) {
157 vpbe_dev->current_timings = var;
158 return 0;
159 }
160 }
161
162 return -EINVAL;
163}
164
165static int vpbe_get_current_mode_info(struct vpbe_device *vpbe_dev,
166 struct vpbe_enc_mode_info *mode_info)
167{
168 if (NULL == mode_info)
169 return -EINVAL;
170
171 *mode_info = vpbe_dev->current_timings;
172
173 return 0;
174}
175
176static int vpbe_get_dv_preset_info(struct vpbe_device *vpbe_dev,
177 unsigned int dv_preset)
178{
179 struct vpbe_config *cfg = vpbe_dev->cfg;
180 struct vpbe_enc_mode_info var;
181 int curr_output = vpbe_dev->current_out_index;
182 int i;
183
184 for (i = 0; i < vpbe_dev->cfg->outputs[curr_output].num_modes; i++) {
185 var = cfg->outputs[curr_output].modes[i];
186 if ((var.timings_type & VPBE_ENC_DV_PRESET) &&
187 (var.timings.dv_preset == dv_preset)) {
188 vpbe_dev->current_timings = var;
189 return 0;
190 }
191 }
192
193 return -EINVAL;
194}
195
196/* Get std by std id */
197static int vpbe_get_std_info(struct vpbe_device *vpbe_dev,
198 v4l2_std_id std_id)
199{
200 struct vpbe_config *cfg = vpbe_dev->cfg;
201 struct vpbe_enc_mode_info var;
202 int curr_output = vpbe_dev->current_out_index;
203 int i;
204
205 for (i = 0; i < vpbe_dev->cfg->outputs[curr_output].num_modes; i++) {
206 var = cfg->outputs[curr_output].modes[i];
207 if ((var.timings_type & VPBE_ENC_STD) &&
208 (var.timings.std_id & std_id)) {
209 vpbe_dev->current_timings = var;
210 return 0;
211 }
212 }
213
214 return -EINVAL;
215}
216
217static int vpbe_get_std_info_by_name(struct vpbe_device *vpbe_dev,
218 char *std_name)
219{
220 struct vpbe_config *cfg = vpbe_dev->cfg;
221 struct vpbe_enc_mode_info var;
222 int curr_output = vpbe_dev->current_out_index;
223 int i;
224
225 for (i = 0; i < vpbe_dev->cfg->outputs[curr_output].num_modes; i++) {
226 var = cfg->outputs[curr_output].modes[i];
227 if (!strcmp(var.name, std_name)) {
228 vpbe_dev->current_timings = var;
229 return 0;
230 }
231 }
232
233 return -EINVAL;
234}
235
236/**
237 * vpbe_set_output - Set output
238 * @vpbe_dev - vpbe device ptr
239 * @index - index of output
240 *
241 * Set vpbe output to the output specified by the index
242 */
243static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index)
244{
245 struct encoder_config_info *curr_enc_info =
246 vpbe_current_encoder_info(vpbe_dev);
247 struct vpbe_config *cfg = vpbe_dev->cfg;
248 int enc_out_index;
249 int sd_index;
250 int ret = 0;
251
252 if (index >= cfg->num_outputs)
253 return -EINVAL;
254
255 mutex_lock(&vpbe_dev->lock);
256
257 sd_index = vpbe_dev->current_sd_index;
258 enc_out_index = cfg->outputs[index].output.index;
259 /*
260 * Currently we switch the encoder based on output selected
261 * by the application. If media controller is implemented later
262 * there is will be an API added to setup_link between venc
263 * and external encoder. So in that case below comparison always
264 * match and encoder will not be switched. But if application
265 * chose not to use media controller, then this provides current
266 * way of switching encoder at the venc output.
267 */
268 if (strcmp(curr_enc_info->module_name,
269 cfg->outputs[index].subdev_name)) {
270 /* Need to switch the encoder at the output */
271 sd_index = vpbe_find_encoder_sd_index(cfg, index);
272 if (sd_index < 0) {
273 ret = -EINVAL;
274 goto out;
275 }
276
277 if (ret)
278 goto out;
279 }
280
281 /* Set output at the encoder */
282 ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
283 s_routing, 0, enc_out_index, 0);
284 if (ret)
285 goto out;
286
287 /*
288 * It is assumed that venc or extenal encoder will set a default
289 * mode in the sub device. For external encoder or LCD pannel output,
290 * we also need to set up the lcd port for the required mode. So setup
291 * the lcd port for the default mode that is configured in the board
292 * arch/arm/mach-davinci/board-dm355-evm.setup file for the external
293 * encoder.
294 */
295 ret = vpbe_get_mode_info(vpbe_dev,
296 cfg->outputs[index].default_mode);
297 if (!ret) {
298 struct osd_state *osd_device = vpbe_dev->osd_device;
299
300 osd_device->ops.set_left_margin(osd_device,
301 vpbe_dev->current_timings.left_margin);
302 osd_device->ops.set_top_margin(osd_device,
303 vpbe_dev->current_timings.upper_margin);
304 vpbe_dev->current_sd_index = sd_index;
305 vpbe_dev->current_out_index = index;
306 }
307out:
308 mutex_unlock(&vpbe_dev->lock);
309 return ret;
310}
311
312static int vpbe_set_default_output(struct vpbe_device *vpbe_dev)
313{
314 struct vpbe_config *cfg = vpbe_dev->cfg;
315 int ret = 0;
316 int i;
317
318 for (i = 0; i < cfg->num_outputs; i++) {
319 if (!strcmp(def_output,
320 cfg->outputs[i].output.name)) {
321 ret = vpbe_set_output(vpbe_dev, i);
322 if (!ret)
323 vpbe_dev->current_out_index = i;
324 return ret;
325 }
326 }
327 return ret;
328}
329
330/**
331 * vpbe_get_output - Get output
332 * @vpbe_dev - vpbe device ptr
333 *
334 * return current vpbe output to the the index
335 */
336static unsigned int vpbe_get_output(struct vpbe_device *vpbe_dev)
337{
338 return vpbe_dev->current_out_index;
339}
340
341/**
342 * vpbe_s_dv_preset - Set the given preset timings in the encoder
343 *
344 * Sets the preset if supported by the current encoder. Return the status.
345 * 0 - success & -EINVAL on error
346 */
347static int vpbe_s_dv_preset(struct vpbe_device *vpbe_dev,
348 struct v4l2_dv_preset *dv_preset)
349{
350 struct vpbe_config *cfg = vpbe_dev->cfg;
351 int out_index = vpbe_dev->current_out_index;
352 int sd_index = vpbe_dev->current_sd_index;
353 int ret;
354
355
356 if (!(cfg->outputs[out_index].output.capabilities &
357 V4L2_OUT_CAP_PRESETS))
358 return -EINVAL;
359
360 ret = vpbe_get_dv_preset_info(vpbe_dev, dv_preset->preset);
361
362 if (ret)
363 return ret;
364
365 mutex_lock(&vpbe_dev->lock);
366
367
368 ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
369 s_dv_preset, dv_preset);
370 /* set the lcd controller output for the given mode */
371 if (!ret) {
372 struct osd_state *osd_device = vpbe_dev->osd_device;
373
374 osd_device->ops.set_left_margin(osd_device,
375 vpbe_dev->current_timings.left_margin);
376 osd_device->ops.set_top_margin(osd_device,
377 vpbe_dev->current_timings.upper_margin);
378 }
379 mutex_unlock(&vpbe_dev->lock);
380
381 return ret;
382}
383
384/**
385 * vpbe_g_dv_preset - Get the preset in the current encoder
386 *
387 * Get the preset in the current encoder. Return the status. 0 - success
388 * -EINVAL on error
389 */
390static int vpbe_g_dv_preset(struct vpbe_device *vpbe_dev,
391 struct v4l2_dv_preset *dv_preset)
392{
393 if (vpbe_dev->current_timings.timings_type &
394 VPBE_ENC_DV_PRESET) {
395 dv_preset->preset = vpbe_dev->current_timings.timings.dv_preset;
396 return 0;
397 }
398
399 return -EINVAL;
400}
401
402/**
403 * vpbe_enum_dv_presets - Enumerate the dv presets in the current encoder
404 *
405 * Get the preset in the current encoder. Return the status. 0 - success
406 * -EINVAL on error
407 */
408static int vpbe_enum_dv_presets(struct vpbe_device *vpbe_dev,
409 struct v4l2_dv_enum_preset *preset_info)
410{
411 struct vpbe_config *cfg = vpbe_dev->cfg;
412 int out_index = vpbe_dev->current_out_index;
413 struct vpbe_output *output = &cfg->outputs[out_index];
414 int j = 0;
415 int i;
416
417 if (!(output->output.capabilities & V4L2_OUT_CAP_PRESETS))
418 return -EINVAL;
419
420 for (i = 0; i < output->num_modes; i++) {
421 if (output->modes[i].timings_type == VPBE_ENC_DV_PRESET) {
422 if (j == preset_info->index)
423 break;
424 j++;
425 }
426 }
427
428 if (i == output->num_modes)
429 return -EINVAL;
430
431 return v4l_fill_dv_preset_info(output->modes[i].timings.dv_preset,
432 preset_info);
433}
434
435/**
436 * vpbe_s_std - Set the given standard in the encoder
437 *
438 * Sets the standard if supported by the current encoder. Return the status.
439 * 0 - success & -EINVAL on error
440 */
441static int vpbe_s_std(struct vpbe_device *vpbe_dev, v4l2_std_id *std_id)
442{
443 struct vpbe_config *cfg = vpbe_dev->cfg;
444 int out_index = vpbe_dev->current_out_index;
445 int sd_index = vpbe_dev->current_sd_index;
446 int ret;
447
448 if (!(cfg->outputs[out_index].output.capabilities &
449 V4L2_OUT_CAP_STD))
450 return -EINVAL;
451
452 ret = vpbe_get_std_info(vpbe_dev, *std_id);
453 if (ret)
454 return ret;
455
456 mutex_lock(&vpbe_dev->lock);
457
458 ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
459 s_std_output, *std_id);
460 /* set the lcd controller output for the given mode */
461 if (!ret) {
462 struct osd_state *osd_device = vpbe_dev->osd_device;
463
464 osd_device->ops.set_left_margin(osd_device,
465 vpbe_dev->current_timings.left_margin);
466 osd_device->ops.set_top_margin(osd_device,
467 vpbe_dev->current_timings.upper_margin);
468 }
469 mutex_unlock(&vpbe_dev->lock);
470
471 return ret;
472}
473
474/**
475 * vpbe_g_std - Get the standard in the current encoder
476 *
477 * Get the standard in the current encoder. Return the status. 0 - success
478 * -EINVAL on error
479 */
480static int vpbe_g_std(struct vpbe_device *vpbe_dev, v4l2_std_id *std_id)
481{
482 struct vpbe_enc_mode_info cur_timings = vpbe_dev->current_timings;
483
484 if (cur_timings.timings_type & VPBE_ENC_STD) {
485 *std_id = cur_timings.timings.std_id;
486 return 0;
487 }
488
489 return -EINVAL;
490}
491
492/**
493 * vpbe_set_mode - Set mode in the current encoder using mode info
494 *
495 * Use the mode string to decide what timings to set in the encoder
496 * This is typically useful when fbset command is used to change the current
497 * timings by specifying a string to indicate the timings.
498 */
499static int vpbe_set_mode(struct vpbe_device *vpbe_dev,
500 struct vpbe_enc_mode_info *mode_info)
501{
502 struct vpbe_enc_mode_info *preset_mode = NULL;
503 struct vpbe_config *cfg = vpbe_dev->cfg;
504 struct v4l2_dv_preset dv_preset;
505 struct osd_state *osd_device;
506 int out_index = vpbe_dev->current_out_index;
507 int ret = 0;
508 int i;
509
510 if ((NULL == mode_info) || (NULL == mode_info->name))
511 return -EINVAL;
512
513 for (i = 0; i < cfg->outputs[out_index].num_modes; i++) {
514 if (!strcmp(mode_info->name,
515 cfg->outputs[out_index].modes[i].name)) {
516 preset_mode = &cfg->outputs[out_index].modes[i];
517 /*
518 * it may be one of the 3 timings type. Check and
519 * invoke right API
520 */
521 if (preset_mode->timings_type & VPBE_ENC_STD)
522 return vpbe_s_std(vpbe_dev,
523 &preset_mode->timings.std_id);
524 if (preset_mode->timings_type & VPBE_ENC_DV_PRESET) {
525 dv_preset.preset =
526 preset_mode->timings.dv_preset;
527 return vpbe_s_dv_preset(vpbe_dev, &dv_preset);
528 }
529 }
530 }
531
532 /* Only custom timing should reach here */
533 if (preset_mode == NULL)
534 return -EINVAL;
535
536 mutex_lock(&vpbe_dev->lock);
537
538 osd_device = vpbe_dev->osd_device;
539 vpbe_dev->current_timings = *preset_mode;
540 osd_device->ops.set_left_margin(osd_device,
541 vpbe_dev->current_timings.left_margin);
542 osd_device->ops.set_top_margin(osd_device,
543 vpbe_dev->current_timings.upper_margin);
544
545 mutex_unlock(&vpbe_dev->lock);
546
547 return ret;
548}
549
550static int vpbe_set_default_mode(struct vpbe_device *vpbe_dev)
551{
552 int ret;
553
554 ret = vpbe_get_std_info_by_name(vpbe_dev, def_mode);
555 if (ret)
556 return ret;
557
558 /* set the default mode in the encoder */
559 return vpbe_set_mode(vpbe_dev, &vpbe_dev->current_timings);
560}
561
562static int platform_device_get(struct device *dev, void *data)
563{
564 struct platform_device *pdev = to_platform_device(dev);
565 struct vpbe_device *vpbe_dev = data;
566
567 if (strcmp("vpbe-osd", pdev->name) == 0)
568 vpbe_dev->osd_device = platform_get_drvdata(pdev);
569
570 return 0;
571}
572
573/**
574 * vpbe_initialize() - Initialize the vpbe display controller
575 * @vpbe_dev - vpbe device ptr
576 *
577 * Master frame buffer device drivers calls this to initialize vpbe
578 * display controller. This will then registers v4l2 device and the sub
579 * devices and sets a current encoder sub device for display. v4l2 display
580 * device driver is the master and frame buffer display device driver is
581 * the slave. Frame buffer display driver checks the initialized during
582 * probe and exit if not initialized. Returns status.
583 */
584static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
585{
586 struct encoder_config_info *enc_info;
587 struct v4l2_subdev **enc_subdev;
588 struct osd_state *osd_device;
589 struct i2c_adapter *i2c_adap;
590 int output_index;
591 int num_encoders;
592 int ret = 0;
593 int err;
594 int i;
595
596 /*
597 * v4l2 abd FBDev frame buffer devices will get the vpbe_dev pointer
598 * from the platform device by iteration of platform drivers and
599 * matching with device name
600 */
601 if (NULL == vpbe_dev || NULL == dev) {
602 printk(KERN_ERR "Null device pointers.\n");
603 return -ENODEV;
604 }
605
606 if (vpbe_dev->initialized)
607 return 0;
608
609 mutex_lock(&vpbe_dev->lock);
610
611 if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0) {
612 /* We have dac clock available for platform */
613 vpbe_dev->dac_clk = clk_get(vpbe_dev->pdev, "vpss_dac");
614 if (IS_ERR(vpbe_dev->dac_clk)) {
615 ret = PTR_ERR(vpbe_dev->dac_clk);
616 goto vpbe_unlock;
617 }
618 if (clk_enable(vpbe_dev->dac_clk)) {
619 ret = -ENODEV;
620 goto vpbe_unlock;
621 }
622 }
623
624 /* first enable vpss clocks */
625 vpss_enable_clock(VPSS_VPBE_CLOCK, 1);
626
627 /* First register a v4l2 device */
628 ret = v4l2_device_register(dev, &vpbe_dev->v4l2_dev);
629 if (ret) {
630 v4l2_err(dev->driver,
631 "Unable to register v4l2 device.\n");
632 goto vpbe_fail_clock;
633 }
634 v4l2_info(&vpbe_dev->v4l2_dev, "vpbe v4l2 device registered\n");
635
636 err = bus_for_each_dev(&platform_bus_type, NULL, vpbe_dev,
637 platform_device_get);
638 if (err < 0)
639 return err;
640
641 vpbe_dev->venc = venc_sub_dev_init(&vpbe_dev->v4l2_dev,
642 vpbe_dev->cfg->venc.module_name);
643 /* register venc sub device */
644 if (vpbe_dev->venc == NULL) {
645 v4l2_err(&vpbe_dev->v4l2_dev,
646 "vpbe unable to init venc sub device\n");
647 ret = -ENODEV;
648 goto vpbe_fail_v4l2_device;
649 }
650 /* initialize osd device */
651 osd_device = vpbe_dev->osd_device;
652
653 if (NULL != osd_device->ops.initialize) {
654 err = osd_device->ops.initialize(osd_device);
655 if (err) {
656 v4l2_err(&vpbe_dev->v4l2_dev,
657 "unable to initialize the OSD device");
658 err = -ENOMEM;
659 goto vpbe_fail_v4l2_device;
660 }
661 }
662
663 /*
664 * Register any external encoders that are configured. At index 0 we
665 * store venc sd index.
666 */
667 num_encoders = vpbe_dev->cfg->num_ext_encoders + 1;
668 vpbe_dev->encoders = kmalloc(
669 sizeof(struct v4l2_subdev *)*num_encoders,
670 GFP_KERNEL);
671 if (NULL == vpbe_dev->encoders) {
672 v4l2_err(&vpbe_dev->v4l2_dev,
673 "unable to allocate memory for encoders sub devices");
674 ret = -ENOMEM;
675 goto vpbe_fail_v4l2_device;
676 }
677
678 i2c_adap = i2c_get_adapter(vpbe_dev->cfg->i2c_adapter_id);
679 for (i = 0; i < (vpbe_dev->cfg->num_ext_encoders + 1); i++) {
680 if (i == 0) {
681 /* venc is at index 0 */
682 enc_subdev = &vpbe_dev->encoders[i];
683 *enc_subdev = vpbe_dev->venc;
684 continue;
685 }
686 enc_info = &vpbe_dev->cfg->ext_encoders[i];
687 if (enc_info->is_i2c) {
688 enc_subdev = &vpbe_dev->encoders[i];
689 *enc_subdev = v4l2_i2c_new_subdev_board(
690 &vpbe_dev->v4l2_dev, i2c_adap,
691 &enc_info->board_info, NULL);
692 if (*enc_subdev)
693 v4l2_info(&vpbe_dev->v4l2_dev,
694 "v4l2 sub device %s registered\n",
695 enc_info->module_name);
696 else {
697 v4l2_err(&vpbe_dev->v4l2_dev, "encoder %s"
698 " failed to register",
699 enc_info->module_name);
700 ret = -ENODEV;
701 goto vpbe_fail_sd_register;
702 }
703 } else
704 v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c encoders"
705 " currently not supported");
706 }
707
708 /* set the current encoder and output to that of venc by default */
709 vpbe_dev->current_sd_index = 0;
710 vpbe_dev->current_out_index = 0;
711 output_index = 0;
712
713 mutex_unlock(&vpbe_dev->lock);
714
715 printk(KERN_NOTICE "Setting default output to %s\n", def_output);
716 ret = vpbe_set_default_output(vpbe_dev);
717 if (ret) {
718 v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default output %s",
719 def_output);
720 return ret;
721 }
722
723 printk(KERN_NOTICE "Setting default mode to %s\n", def_mode);
724 ret = vpbe_set_default_mode(vpbe_dev);
725 if (ret) {
726 v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default mode %s",
727 def_mode);
728 return ret;
729 }
730 vpbe_dev->initialized = 1;
731 /* TBD handling of bootargs for default output and mode */
732 return 0;
733
734vpbe_fail_sd_register:
735 kfree(vpbe_dev->encoders);
736vpbe_fail_v4l2_device:
737 v4l2_device_unregister(&vpbe_dev->v4l2_dev);
738vpbe_fail_clock:
739 if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0)
740 clk_put(vpbe_dev->dac_clk);
741vpbe_unlock:
742 mutex_unlock(&vpbe_dev->lock);
743 return ret;
744}
745
746/**
747 * vpbe_deinitialize() - de-initialize the vpbe display controller
748 * @dev - Master and slave device ptr
749 *
750 * vpbe_master and slave frame buffer devices calls this to de-initialize
751 * the display controller. It is called when master and slave device
752 * driver modules are removed and no longer requires the display controller.
753 */
754static void vpbe_deinitialize(struct device *dev, struct vpbe_device *vpbe_dev)
755{
756 v4l2_device_unregister(&vpbe_dev->v4l2_dev);
757 if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0)
758 clk_put(vpbe_dev->dac_clk);
759
760 kfree(vpbe_dev->encoders);
761 vpbe_dev->initialized = 0;
762 /* disable vpss clocks */
763 vpss_enable_clock(VPSS_VPBE_CLOCK, 0);
764}
765
766static struct vpbe_device_ops vpbe_dev_ops = {
767 .g_cropcap = vpbe_g_cropcap,
768 .enum_outputs = vpbe_enum_outputs,
769 .set_output = vpbe_set_output,
770 .get_output = vpbe_get_output,
771 .s_dv_preset = vpbe_s_dv_preset,
772 .g_dv_preset = vpbe_g_dv_preset,
773 .enum_dv_presets = vpbe_enum_dv_presets,
774 .s_std = vpbe_s_std,
775 .g_std = vpbe_g_std,
776 .initialize = vpbe_initialize,
777 .deinitialize = vpbe_deinitialize,
778 .get_mode_info = vpbe_get_current_mode_info,
779 .set_mode = vpbe_set_mode,
780};
781
782static __devinit int vpbe_probe(struct platform_device *pdev)
783{
784 struct vpbe_device *vpbe_dev;
785 struct vpbe_config *cfg;
786 int ret = -EINVAL;
787
788 if (pdev->dev.platform_data == NULL) {
789 v4l2_err(pdev->dev.driver, "No platform data\n");
790 return -ENODEV;
791 }
792 cfg = pdev->dev.platform_data;
793
794 if (!cfg->module_name[0] ||
795 !cfg->osd.module_name[0] ||
796 !cfg->venc.module_name[0]) {
797 v4l2_err(pdev->dev.driver, "vpbe display module names not"
798 " defined\n");
799 return ret;
800 }
801
802 vpbe_dev = kzalloc(sizeof(*vpbe_dev), GFP_KERNEL);
803 if (vpbe_dev == NULL) {
804 v4l2_err(pdev->dev.driver, "Unable to allocate memory"
805 " for vpbe_device\n");
806 return -ENOMEM;
807 }
808 vpbe_dev->cfg = cfg;
809 vpbe_dev->ops = vpbe_dev_ops;
810 vpbe_dev->pdev = &pdev->dev;
811
812 if (cfg->outputs->num_modes > 0)
813 vpbe_dev->current_timings = vpbe_dev->cfg->outputs[0].modes[0];
814 else
815 return -ENODEV;
816
817 /* set the driver data in platform device */
818 platform_set_drvdata(pdev, vpbe_dev);
819 mutex_init(&vpbe_dev->lock);
820
821 return 0;
822}
823
824static int vpbe_remove(struct platform_device *device)
825{
826 struct vpbe_device *vpbe_dev = platform_get_drvdata(device);
827
828 kfree(vpbe_dev);
829
830 return 0;
831}
832
833static struct platform_driver vpbe_driver = {
834 .driver = {
835 .name = "vpbe_controller",
836 .owner = THIS_MODULE,
837 },
838 .probe = vpbe_probe,
839 .remove = vpbe_remove,
840};
841
842/**
843 * vpbe_init: initialize the vpbe driver
844 *
845 * This function registers device and driver to the kernel
846 */
847static __init int vpbe_init(void)
848{
849 return platform_driver_register(&vpbe_driver);
850}
851
852/**
853 * vpbe_cleanup : cleanup function for vpbe driver
854 *
855 * This will un-registers the device and driver to the kernel
856 */
857static void vpbe_cleanup(void)
858{
859 platform_driver_unregister(&vpbe_driver);
860}
861
862/* Function for module initialization and cleanup */
863module_init(vpbe_init);
864module_exit(vpbe_cleanup);
diff --git a/drivers/media/video/davinci/vpbe_display.c b/drivers/media/video/davinci/vpbe_display.c
new file mode 100644
index 000000000000..7f1d83a6d575
--- /dev/null
+++ b/drivers/media/video/davinci/vpbe_display.c
@@ -0,0 +1,1860 @@
1/*
2 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation version 2.
7 *
8 * This program is distributed WITHOUT ANY WARRANTY of any
9 * kind, whether express or implied; without even the implied warranty
10 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/errno.h>
17#include <linux/interrupt.h>
18#include <linux/string.h>
19#include <linux/wait.h>
20#include <linux/time.h>
21#include <linux/platform_device.h>
22#include <linux/irq.h>
23#include <linux/mm.h>
24#include <linux/mutex.h>
25#include <linux/videodev2.h>
26#include <linux/slab.h>
27
28#include <asm/pgtable.h>
29#include <mach/cputype.h>
30
31#include <media/v4l2-dev.h>
32#include <media/v4l2-common.h>
33#include <media/v4l2-ioctl.h>
34#include <media/v4l2-device.h>
35#include <media/davinci/vpbe_display.h>
36#include <media/davinci/vpbe_types.h>
37#include <media/davinci/vpbe.h>
38#include <media/davinci/vpbe_venc.h>
39#include <media/davinci/vpbe_osd.h>
40#include "vpbe_venc_regs.h"
41
42#define VPBE_DISPLAY_DRIVER "vpbe-v4l2"
43
44static int debug;
45
46#define VPBE_DISPLAY_SD_BUF_SIZE (720*576*2)
47#define VPBE_DEFAULT_NUM_BUFS 3
48
49module_param(debug, int, 0644);
50
51static int venc_is_second_field(struct vpbe_display *disp_dev)
52{
53 struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
54 int ret;
55 int val;
56
57 ret = v4l2_subdev_call(vpbe_dev->venc,
58 core,
59 ioctl,
60 VENC_GET_FLD,
61 &val);
62 if (ret < 0) {
63 v4l2_err(&vpbe_dev->v4l2_dev,
64 "Error in getting Field ID 0\n");
65 }
66 return val;
67}
68
69static void vpbe_isr_even_field(struct vpbe_display *disp_obj,
70 struct vpbe_layer *layer)
71{
72 struct timespec timevalue;
73
74 if (layer->cur_frm == layer->next_frm)
75 return;
76 ktime_get_ts(&timevalue);
77 layer->cur_frm->ts.tv_sec = timevalue.tv_sec;
78 layer->cur_frm->ts.tv_usec = timevalue.tv_nsec / NSEC_PER_USEC;
79 layer->cur_frm->state = VIDEOBUF_DONE;
80 wake_up_interruptible(&layer->cur_frm->done);
81 /* Make cur_frm pointing to next_frm */
82 layer->cur_frm = layer->next_frm;
83}
84
85static void vpbe_isr_odd_field(struct vpbe_display *disp_obj,
86 struct vpbe_layer *layer)
87{
88 struct osd_state *osd_device = disp_obj->osd_device;
89 unsigned long addr;
90
91 spin_lock(&disp_obj->dma_queue_lock);
92 if (list_empty(&layer->dma_queue) ||
93 (layer->cur_frm != layer->next_frm)) {
94 spin_unlock(&disp_obj->dma_queue_lock);
95 return;
96 }
97 /*
98 * one field is displayed configure
99 * the next frame if it is available
100 * otherwise hold on current frame
101 * Get next from the buffer queue
102 */
103 layer->next_frm = list_entry(
104 layer->dma_queue.next,
105 struct videobuf_buffer,
106 queue);
107 /* Remove that from the buffer queue */
108 list_del(&layer->next_frm->queue);
109 spin_unlock(&disp_obj->dma_queue_lock);
110 /* Mark state of the frame to active */
111 layer->next_frm->state = VIDEOBUF_ACTIVE;
112 addr = videobuf_to_dma_contig(layer->next_frm);
113 osd_device->ops.start_layer(osd_device,
114 layer->layer_info.id,
115 addr,
116 disp_obj->cbcr_ofst);
117}
118
119/* interrupt service routine */
120static irqreturn_t venc_isr(int irq, void *arg)
121{
122 struct vpbe_display *disp_dev = (struct vpbe_display *)arg;
123 struct vpbe_layer *layer;
124 static unsigned last_event;
125 unsigned event = 0;
126 int fid;
127 int i;
128
129 if ((NULL == arg) || (NULL == disp_dev->dev[0]))
130 return IRQ_HANDLED;
131
132 if (venc_is_second_field(disp_dev))
133 event |= VENC_SECOND_FIELD;
134 else
135 event |= VENC_FIRST_FIELD;
136
137 if (event == (last_event & ~VENC_END_OF_FRAME)) {
138 /*
139 * If the display is non-interlaced, then we need to flag the
140 * end-of-frame event at every interrupt regardless of the
141 * value of the FIDST bit. We can conclude that the display is
142 * non-interlaced if the value of the FIDST bit is unchanged
143 * from the previous interrupt.
144 */
145 event |= VENC_END_OF_FRAME;
146 } else if (event == VENC_SECOND_FIELD) {
147 /* end-of-frame for interlaced display */
148 event |= VENC_END_OF_FRAME;
149 }
150 last_event = event;
151
152 for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
153 layer = disp_dev->dev[i];
154 /* If streaming is started in this layer */
155 if (!layer->started)
156 continue;
157
158 if (layer->layer_first_int) {
159 layer->layer_first_int = 0;
160 continue;
161 }
162 /* Check the field format */
163 if ((V4L2_FIELD_NONE == layer->pix_fmt.field) &&
164 (event & VENC_END_OF_FRAME)) {
165 /* Progressive mode */
166
167 vpbe_isr_even_field(disp_dev, layer);
168 vpbe_isr_odd_field(disp_dev, layer);
169 } else {
170 /* Interlaced mode */
171
172 layer->field_id ^= 1;
173 if (event & VENC_FIRST_FIELD)
174 fid = 0;
175 else
176 fid = 1;
177
178 /*
179 * If field id does not match with store
180 * field id
181 */
182 if (fid != layer->field_id) {
183 /* Make them in sync */
184 layer->field_id = fid;
185 continue;
186 }
187 /*
188 * device field id and local field id are
189 * in sync. If this is even field
190 */
191 if (0 == fid)
192 vpbe_isr_even_field(disp_dev, layer);
193 else /* odd field */
194 vpbe_isr_odd_field(disp_dev, layer);
195 }
196 }
197
198 return IRQ_HANDLED;
199}
200
201/*
202 * vpbe_buffer_prepare()
203 * This is the callback function called from videobuf_qbuf() function
204 * the buffer is prepared and user space virtual address is converted into
205 * physical address
206 */
207static int vpbe_buffer_prepare(struct videobuf_queue *q,
208 struct videobuf_buffer *vb,
209 enum v4l2_field field)
210{
211 struct vpbe_fh *fh = q->priv_data;
212 struct vpbe_layer *layer = fh->layer;
213 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
214 unsigned long addr;
215 int ret;
216
217 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
218 "vpbe_buffer_prepare\n");
219
220 /* If buffer is not initialized, initialize it */
221 if (VIDEOBUF_NEEDS_INIT == vb->state) {
222 vb->width = layer->pix_fmt.width;
223 vb->height = layer->pix_fmt.height;
224 vb->size = layer->pix_fmt.sizeimage;
225 vb->field = field;
226
227 ret = videobuf_iolock(q, vb, NULL);
228 if (ret < 0) {
229 v4l2_err(&vpbe_dev->v4l2_dev, "Failed to map \
230 user address\n");
231 return -EINVAL;
232 }
233
234 addr = videobuf_to_dma_contig(vb);
235
236 if (q->streaming) {
237 if (!IS_ALIGNED(addr, 8)) {
238 v4l2_err(&vpbe_dev->v4l2_dev,
239 "buffer_prepare:offset is \
240 not aligned to 32 bytes\n");
241 return -EINVAL;
242 }
243 }
244 vb->state = VIDEOBUF_PREPARED;
245 }
246 return 0;
247}
248
249/*
250 * vpbe_buffer_setup()
251 * This function allocates memory for the buffers
252 */
253static int vpbe_buffer_setup(struct videobuf_queue *q,
254 unsigned int *count,
255 unsigned int *size)
256{
257 /* Get the file handle object and layer object */
258 struct vpbe_fh *fh = q->priv_data;
259 struct vpbe_layer *layer = fh->layer;
260 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
261
262 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_buffer_setup\n");
263
264 *size = layer->pix_fmt.sizeimage;
265
266 /* Store number of buffers allocated in numbuffer member */
267 if (*count < VPBE_DEFAULT_NUM_BUFS)
268 *count = layer->numbuffers = VPBE_DEFAULT_NUM_BUFS;
269
270 return 0;
271}
272
273/*
274 * vpbe_buffer_queue()
275 * This function adds the buffer to DMA queue
276 */
277static void vpbe_buffer_queue(struct videobuf_queue *q,
278 struct videobuf_buffer *vb)
279{
280 /* Get the file handle object and layer object */
281 struct vpbe_fh *fh = q->priv_data;
282 struct vpbe_layer *layer = fh->layer;
283 struct vpbe_display *disp = fh->disp_dev;
284 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
285 unsigned long flags;
286
287 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
288 "vpbe_buffer_queue\n");
289
290 /* add the buffer to the DMA queue */
291 spin_lock_irqsave(&disp->dma_queue_lock, flags);
292 list_add_tail(&vb->queue, &layer->dma_queue);
293 spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
294 /* Change state of the buffer */
295 vb->state = VIDEOBUF_QUEUED;
296}
297
298/*
299 * vpbe_buffer_release()
300 * This function is called from the videobuf layer to free memory allocated to
301 * the buffers
302 */
303static void vpbe_buffer_release(struct videobuf_queue *q,
304 struct videobuf_buffer *vb)
305{
306 /* Get the file handle object and layer object */
307 struct vpbe_fh *fh = q->priv_data;
308 struct vpbe_layer *layer = fh->layer;
309 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
310
311 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
312 "vpbe_buffer_release\n");
313
314 if (V4L2_MEMORY_USERPTR != layer->memory)
315 videobuf_dma_contig_free(q, vb);
316
317 vb->state = VIDEOBUF_NEEDS_INIT;
318}
319
320static struct videobuf_queue_ops video_qops = {
321 .buf_setup = vpbe_buffer_setup,
322 .buf_prepare = vpbe_buffer_prepare,
323 .buf_queue = vpbe_buffer_queue,
324 .buf_release = vpbe_buffer_release,
325};
326
327static
328struct vpbe_layer*
329_vpbe_display_get_other_win_layer(struct vpbe_display *disp_dev,
330 struct vpbe_layer *layer)
331{
332 enum vpbe_display_device_id thiswin, otherwin;
333 thiswin = layer->device_id;
334
335 otherwin = (thiswin == VPBE_DISPLAY_DEVICE_0) ?
336 VPBE_DISPLAY_DEVICE_1 : VPBE_DISPLAY_DEVICE_0;
337 return disp_dev->dev[otherwin];
338}
339
340static int vpbe_set_osd_display_params(struct vpbe_display *disp_dev,
341 struct vpbe_layer *layer)
342{
343 struct osd_layer_config *cfg = &layer->layer_info.config;
344 struct osd_state *osd_device = disp_dev->osd_device;
345 struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
346 unsigned long addr;
347 int ret;
348
349 addr = videobuf_to_dma_contig(layer->cur_frm);
350 /* Set address in the display registers */
351 osd_device->ops.start_layer(osd_device,
352 layer->layer_info.id,
353 addr,
354 disp_dev->cbcr_ofst);
355
356 ret = osd_device->ops.enable_layer(osd_device,
357 layer->layer_info.id, 0);
358 if (ret < 0) {
359 v4l2_err(&vpbe_dev->v4l2_dev,
360 "Error in enabling osd window layer 0\n");
361 return -1;
362 }
363
364 /* Enable the window */
365 layer->layer_info.enable = 1;
366 if (cfg->pixfmt == PIXFMT_NV12) {
367 struct vpbe_layer *otherlayer =
368 _vpbe_display_get_other_win_layer(disp_dev, layer);
369
370 ret = osd_device->ops.enable_layer(osd_device,
371 otherlayer->layer_info.id, 1);
372 if (ret < 0) {
373 v4l2_err(&vpbe_dev->v4l2_dev,
374 "Error in enabling osd window layer 1\n");
375 return -1;
376 }
377 otherlayer->layer_info.enable = 1;
378 }
379 return 0;
380}
381
382static void
383vpbe_disp_calculate_scale_factor(struct vpbe_display *disp_dev,
384 struct vpbe_layer *layer,
385 int expected_xsize, int expected_ysize)
386{
387 struct display_layer_info *layer_info = &layer->layer_info;
388 struct v4l2_pix_format *pixfmt = &layer->pix_fmt;
389 struct osd_layer_config *cfg = &layer->layer_info.config;
390 struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
391 int calculated_xsize;
392 int h_exp = 0;
393 int v_exp = 0;
394 int h_scale;
395 int v_scale;
396
397 v4l2_std_id standard_id = vpbe_dev->current_timings.timings.std_id;
398
399 /*
400 * Application initially set the image format. Current display
401 * size is obtained from the vpbe display controller. expected_xsize
402 * and expected_ysize are set through S_CROP ioctl. Based on this,
403 * driver will calculate the scale factors for vertical and
404 * horizontal direction so that the image is displayed scaled
405 * and expanded. Application uses expansion to display the image
406 * in a square pixel. Otherwise it is displayed using displays
407 * pixel aspect ratio.It is expected that application chooses
408 * the crop coordinates for cropped or scaled display. if crop
409 * size is less than the image size, it is displayed cropped or
410 * it is displayed scaled and/or expanded.
411 *
412 * to begin with, set the crop window same as expected. Later we
413 * will override with scaled window size
414 */
415
416 cfg->xsize = pixfmt->width;
417 cfg->ysize = pixfmt->height;
418 layer_info->h_zoom = ZOOM_X1; /* no horizontal zoom */
419 layer_info->v_zoom = ZOOM_X1; /* no horizontal zoom */
420 layer_info->h_exp = H_EXP_OFF; /* no horizontal zoom */
421 layer_info->v_exp = V_EXP_OFF; /* no horizontal zoom */
422
423 if (pixfmt->width < expected_xsize) {
424 h_scale = vpbe_dev->current_timings.xres / pixfmt->width;
425 if (h_scale < 2)
426 h_scale = 1;
427 else if (h_scale >= 4)
428 h_scale = 4;
429 else
430 h_scale = 2;
431 cfg->xsize *= h_scale;
432 if (cfg->xsize < expected_xsize) {
433 if ((standard_id & V4L2_STD_525_60) ||
434 (standard_id & V4L2_STD_625_50)) {
435 calculated_xsize = (cfg->xsize *
436 VPBE_DISPLAY_H_EXP_RATIO_N) /
437 VPBE_DISPLAY_H_EXP_RATIO_D;
438 if (calculated_xsize <= expected_xsize) {
439 h_exp = 1;
440 cfg->xsize = calculated_xsize;
441 }
442 }
443 }
444 if (h_scale == 2)
445 layer_info->h_zoom = ZOOM_X2;
446 else if (h_scale == 4)
447 layer_info->h_zoom = ZOOM_X4;
448 if (h_exp)
449 layer_info->h_exp = H_EXP_9_OVER_8;
450 } else {
451 /* no scaling, only cropping. Set display area to crop area */
452 cfg->xsize = expected_xsize;
453 }
454
455 if (pixfmt->height < expected_ysize) {
456 v_scale = expected_ysize / pixfmt->height;
457 if (v_scale < 2)
458 v_scale = 1;
459 else if (v_scale >= 4)
460 v_scale = 4;
461 else
462 v_scale = 2;
463 cfg->ysize *= v_scale;
464 if (cfg->ysize < expected_ysize) {
465 if ((standard_id & V4L2_STD_625_50)) {
466 calculated_xsize = (cfg->ysize *
467 VPBE_DISPLAY_V_EXP_RATIO_N) /
468 VPBE_DISPLAY_V_EXP_RATIO_D;
469 if (calculated_xsize <= expected_ysize) {
470 v_exp = 1;
471 cfg->ysize = calculated_xsize;
472 }
473 }
474 }
475 if (v_scale == 2)
476 layer_info->v_zoom = ZOOM_X2;
477 else if (v_scale == 4)
478 layer_info->v_zoom = ZOOM_X4;
479 if (v_exp)
480 layer_info->h_exp = V_EXP_6_OVER_5;
481 } else {
482 /* no scaling, only cropping. Set display area to crop area */
483 cfg->ysize = expected_ysize;
484 }
485 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
486 "crop display xsize = %d, ysize = %d\n",
487 cfg->xsize, cfg->ysize);
488}
489
490static void vpbe_disp_adj_position(struct vpbe_display *disp_dev,
491 struct vpbe_layer *layer,
492 int top, int left)
493{
494 struct osd_layer_config *cfg = &layer->layer_info.config;
495 struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
496
497 cfg->xpos = min((unsigned int)left,
498 vpbe_dev->current_timings.xres - cfg->xsize);
499 cfg->ypos = min((unsigned int)top,
500 vpbe_dev->current_timings.yres - cfg->ysize);
501
502 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
503 "new xpos = %d, ypos = %d\n",
504 cfg->xpos, cfg->ypos);
505}
506
507static void vpbe_disp_check_window_params(struct vpbe_display *disp_dev,
508 struct v4l2_rect *c)
509{
510 struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
511
512 if ((c->width == 0) ||
513 ((c->width + c->left) > vpbe_dev->current_timings.xres))
514 c->width = vpbe_dev->current_timings.xres - c->left;
515
516 if ((c->height == 0) || ((c->height + c->top) >
517 vpbe_dev->current_timings.yres))
518 c->height = vpbe_dev->current_timings.yres - c->top;
519
520 /* window height must be even for interlaced display */
521 if (vpbe_dev->current_timings.interlaced)
522 c->height &= (~0x01);
523
524}
525
526/**
527 * vpbe_try_format()
528 * If user application provides width and height, and have bytesperline set
529 * to zero, driver calculates bytesperline and sizeimage based on hardware
530 * limits.
531 */
532static int vpbe_try_format(struct vpbe_display *disp_dev,
533 struct v4l2_pix_format *pixfmt, int check)
534{
535 struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
536 int min_height = 1;
537 int min_width = 32;
538 int max_height;
539 int max_width;
540 int bpp;
541
542 if ((pixfmt->pixelformat != V4L2_PIX_FMT_UYVY) &&
543 (pixfmt->pixelformat != V4L2_PIX_FMT_NV12))
544 /* choose default as V4L2_PIX_FMT_UYVY */
545 pixfmt->pixelformat = V4L2_PIX_FMT_UYVY;
546
547 /* Check the field format */
548 if ((pixfmt->field != V4L2_FIELD_INTERLACED) &&
549 (pixfmt->field != V4L2_FIELD_NONE)) {
550 if (vpbe_dev->current_timings.interlaced)
551 pixfmt->field = V4L2_FIELD_INTERLACED;
552 else
553 pixfmt->field = V4L2_FIELD_NONE;
554 }
555
556 if (pixfmt->field == V4L2_FIELD_INTERLACED)
557 min_height = 2;
558
559 if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12)
560 bpp = 1;
561 else
562 bpp = 2;
563
564 max_width = vpbe_dev->current_timings.xres;
565 max_height = vpbe_dev->current_timings.yres;
566
567 min_width /= bpp;
568
569 if (!pixfmt->width || (pixfmt->width < min_width) ||
570 (pixfmt->width > max_width)) {
571 pixfmt->width = vpbe_dev->current_timings.xres;
572 }
573
574 if (!pixfmt->height || (pixfmt->height < min_height) ||
575 (pixfmt->height > max_height)) {
576 pixfmt->height = vpbe_dev->current_timings.yres;
577 }
578
579 if (pixfmt->bytesperline < (pixfmt->width * bpp))
580 pixfmt->bytesperline = pixfmt->width * bpp;
581
582 /* Make the bytesperline 32 byte aligned */
583 pixfmt->bytesperline = ((pixfmt->width * bpp + 31) & ~31);
584
585 if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12)
586 pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height +
587 (pixfmt->bytesperline * pixfmt->height >> 1);
588 else
589 pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
590
591 return 0;
592}
593
594static int vpbe_display_g_priority(struct file *file, void *priv,
595 enum v4l2_priority *p)
596{
597 struct vpbe_fh *fh = file->private_data;
598 struct vpbe_layer *layer = fh->layer;
599
600 *p = v4l2_prio_max(&layer->prio);
601
602 return 0;
603}
604
605static int vpbe_display_s_priority(struct file *file, void *priv,
606 enum v4l2_priority p)
607{
608 struct vpbe_fh *fh = file->private_data;
609 struct vpbe_layer *layer = fh->layer;
610 int ret;
611
612 ret = v4l2_prio_change(&layer->prio, &fh->prio, p);
613
614 return ret;
615}
616
617static int vpbe_display_querycap(struct file *file, void *priv,
618 struct v4l2_capability *cap)
619{
620 struct vpbe_fh *fh = file->private_data;
621 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
622
623 cap->version = VPBE_DISPLAY_VERSION_CODE;
624 cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
625 strlcpy(cap->driver, VPBE_DISPLAY_DRIVER, sizeof(cap->driver));
626 strlcpy(cap->bus_info, "platform", sizeof(cap->bus_info));
627 strlcpy(cap->card, vpbe_dev->cfg->module_name, sizeof(cap->card));
628
629 return 0;
630}
631
632static int vpbe_display_s_crop(struct file *file, void *priv,
633 struct v4l2_crop *crop)
634{
635 struct vpbe_fh *fh = file->private_data;
636 struct vpbe_layer *layer = fh->layer;
637 struct vpbe_display *disp_dev = fh->disp_dev;
638 struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
639 struct osd_layer_config *cfg = &layer->layer_info.config;
640 struct osd_state *osd_device = disp_dev->osd_device;
641 struct v4l2_rect *rect = &crop->c;
642 int ret;
643
644 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
645 "VIDIOC_S_CROP, layer id = %d\n", layer->device_id);
646
647 if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
648 v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buf type\n");
649 return -EINVAL;
650 }
651
652 if (rect->top < 0)
653 rect->top = 0;
654 if (rect->left < 0)
655 rect->left = 0;
656
657 vpbe_disp_check_window_params(disp_dev, rect);
658
659 osd_device->ops.get_layer_config(osd_device,
660 layer->layer_info.id, cfg);
661
662 vpbe_disp_calculate_scale_factor(disp_dev, layer,
663 rect->width,
664 rect->height);
665 vpbe_disp_adj_position(disp_dev, layer, rect->top,
666 rect->left);
667 ret = osd_device->ops.set_layer_config(osd_device,
668 layer->layer_info.id, cfg);
669 if (ret < 0) {
670 v4l2_err(&vpbe_dev->v4l2_dev,
671 "Error in set layer config:\n");
672 return -EINVAL;
673 }
674
675 /* apply zooming and h or v expansion */
676 osd_device->ops.set_zoom(osd_device,
677 layer->layer_info.id,
678 layer->layer_info.h_zoom,
679 layer->layer_info.v_zoom);
680 ret = osd_device->ops.set_vid_expansion(osd_device,
681 layer->layer_info.h_exp,
682 layer->layer_info.v_exp);
683 if (ret < 0) {
684 v4l2_err(&vpbe_dev->v4l2_dev,
685 "Error in set vid expansion:\n");
686 return -EINVAL;
687 }
688
689 if ((layer->layer_info.h_zoom != ZOOM_X1) ||
690 (layer->layer_info.v_zoom != ZOOM_X1) ||
691 (layer->layer_info.h_exp != H_EXP_OFF) ||
692 (layer->layer_info.v_exp != V_EXP_OFF))
693 /* Enable expansion filter */
694 osd_device->ops.set_interpolation_filter(osd_device, 1);
695 else
696 osd_device->ops.set_interpolation_filter(osd_device, 0);
697
698 return 0;
699}
700
701static int vpbe_display_g_crop(struct file *file, void *priv,
702 struct v4l2_crop *crop)
703{
704 struct vpbe_fh *fh = file->private_data;
705 struct vpbe_layer *layer = fh->layer;
706 struct osd_layer_config *cfg = &layer->layer_info.config;
707 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
708 struct osd_state *osd_device = fh->disp_dev->osd_device;
709 struct v4l2_rect *rect = &crop->c;
710 int ret;
711
712 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
713 "VIDIOC_G_CROP, layer id = %d\n",
714 layer->device_id);
715
716 if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
717 v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buf type\n");
718 ret = -EINVAL;
719 }
720 osd_device->ops.get_layer_config(osd_device,
721 layer->layer_info.id, cfg);
722 rect->top = cfg->ypos;
723 rect->left = cfg->xpos;
724 rect->width = cfg->xsize;
725 rect->height = cfg->ysize;
726
727 return 0;
728}
729
730static int vpbe_display_cropcap(struct file *file, void *priv,
731 struct v4l2_cropcap *cropcap)
732{
733 struct vpbe_fh *fh = file->private_data;
734 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
735
736 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_CROPCAP ioctl\n");
737
738 cropcap->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
739 cropcap->bounds.left = 0;
740 cropcap->bounds.top = 0;
741 cropcap->bounds.width = vpbe_dev->current_timings.xres;
742 cropcap->bounds.height = vpbe_dev->current_timings.yres;
743 cropcap->pixelaspect = vpbe_dev->current_timings.aspect;
744 cropcap->defrect = cropcap->bounds;
745 return 0;
746}
747
748static int vpbe_display_g_fmt(struct file *file, void *priv,
749 struct v4l2_format *fmt)
750{
751 struct vpbe_fh *fh = file->private_data;
752 struct vpbe_layer *layer = fh->layer;
753 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
754
755 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
756 "VIDIOC_G_FMT, layer id = %d\n",
757 layer->device_id);
758
759 /* If buffer type is video output */
760 if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) {
761 v4l2_err(&vpbe_dev->v4l2_dev, "invalid type\n");
762 return -EINVAL;
763 }
764 /* Fill in the information about format */
765 fmt->fmt.pix = layer->pix_fmt;
766
767 return 0;
768}
769
770static int vpbe_display_enum_fmt(struct file *file, void *priv,
771 struct v4l2_fmtdesc *fmt)
772{
773 struct vpbe_fh *fh = file->private_data;
774 struct vpbe_layer *layer = fh->layer;
775 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
776 unsigned int index = 0;
777
778 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
779 "VIDIOC_ENUM_FMT, layer id = %d\n",
780 layer->device_id);
781 if (fmt->index > 1) {
782 v4l2_err(&vpbe_dev->v4l2_dev, "Invalid format index\n");
783 return -EINVAL;
784 }
785
786 /* Fill in the information about format */
787 index = fmt->index;
788 memset(fmt, 0, sizeof(*fmt));
789 fmt->index = index;
790 fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
791 if (index == 0) {
792 strcpy(fmt->description, "YUV 4:2:2 - UYVY");
793 fmt->pixelformat = V4L2_PIX_FMT_UYVY;
794 } else {
795 strcpy(fmt->description, "Y/CbCr 4:2:0");
796 fmt->pixelformat = V4L2_PIX_FMT_NV12;
797 }
798
799 return 0;
800}
801
802static int vpbe_display_s_fmt(struct file *file, void *priv,
803 struct v4l2_format *fmt)
804{
805 struct vpbe_fh *fh = file->private_data;
806 struct vpbe_layer *layer = fh->layer;
807 struct vpbe_display *disp_dev = fh->disp_dev;
808 struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
809 struct osd_layer_config *cfg = &layer->layer_info.config;
810 struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
811 struct osd_state *osd_device = disp_dev->osd_device;
812 int ret;
813
814 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
815 "VIDIOC_S_FMT, layer id = %d\n",
816 layer->device_id);
817
818 /* If streaming is started, return error */
819 if (layer->started) {
820 v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
821 return -EBUSY;
822 }
823 if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) {
824 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "invalid type\n");
825 return -EINVAL;
826 }
827 /* Check for valid pixel format */
828 ret = vpbe_try_format(disp_dev, pixfmt, 1);
829 if (ret)
830 return ret;
831
832 /* YUV420 is requested, check availability of the
833 other video window */
834
835 layer->pix_fmt = *pixfmt;
836
837 /* Get osd layer config */
838 osd_device->ops.get_layer_config(osd_device,
839 layer->layer_info.id, cfg);
840 /* Store the pixel format in the layer object */
841 cfg->xsize = pixfmt->width;
842 cfg->ysize = pixfmt->height;
843 cfg->line_length = pixfmt->bytesperline;
844 cfg->ypos = 0;
845 cfg->xpos = 0;
846 cfg->interlaced = vpbe_dev->current_timings.interlaced;
847
848 if (V4L2_PIX_FMT_UYVY == pixfmt->pixelformat)
849 cfg->pixfmt = PIXFMT_YCbCrI;
850
851 /* Change of the default pixel format for both video windows */
852 if (V4L2_PIX_FMT_NV12 == pixfmt->pixelformat) {
853 struct vpbe_layer *otherlayer;
854 cfg->pixfmt = PIXFMT_NV12;
855 otherlayer = _vpbe_display_get_other_win_layer(disp_dev,
856 layer);
857 otherlayer->layer_info.config.pixfmt = PIXFMT_NV12;
858 }
859
860 /* Set the layer config in the osd window */
861 ret = osd_device->ops.set_layer_config(osd_device,
862 layer->layer_info.id, cfg);
863 if (ret < 0) {
864 v4l2_err(&vpbe_dev->v4l2_dev,
865 "Error in S_FMT params:\n");
866 return -EINVAL;
867 }
868
869 /* Readback and fill the local copy of current pix format */
870 osd_device->ops.get_layer_config(osd_device,
871 layer->layer_info.id, cfg);
872
873 return 0;
874}
875
876static int vpbe_display_try_fmt(struct file *file, void *priv,
877 struct v4l2_format *fmt)
878{
879 struct vpbe_fh *fh = file->private_data;
880 struct vpbe_display *disp_dev = fh->disp_dev;
881 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
882 struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
883
884 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_TRY_FMT\n");
885
886 if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) {
887 v4l2_err(&vpbe_dev->v4l2_dev, "invalid type\n");
888 return -EINVAL;
889 }
890
891 /* Check for valid field format */
892 return vpbe_try_format(disp_dev, pixfmt, 0);
893
894}
895
896/**
897 * vpbe_display_s_std - Set the given standard in the encoder
898 *
899 * Sets the standard if supported by the current encoder. Return the status.
900 * 0 - success & -EINVAL on error
901 */
902static int vpbe_display_s_std(struct file *file, void *priv,
903 v4l2_std_id *std_id)
904{
905 struct vpbe_fh *fh = priv;
906 struct vpbe_layer *layer = fh->layer;
907 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
908 int ret;
909
910 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_STD\n");
911
912 /* If streaming is started, return error */
913 if (layer->started) {
914 v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
915 return -EBUSY;
916 }
917 if (NULL != vpbe_dev->ops.s_std) {
918 ret = vpbe_dev->ops.s_std(vpbe_dev, std_id);
919 if (ret) {
920 v4l2_err(&vpbe_dev->v4l2_dev,
921 "Failed to set standard for sub devices\n");
922 return -EINVAL;
923 }
924 } else {
925 return -EINVAL;
926 }
927
928 return 0;
929}
930
931/**
932 * vpbe_display_g_std - Get the standard in the current encoder
933 *
934 * Get the standard in the current encoder. Return the status. 0 - success
935 * -EINVAL on error
936 */
937static int vpbe_display_g_std(struct file *file, void *priv,
938 v4l2_std_id *std_id)
939{
940 struct vpbe_fh *fh = priv;
941 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
942
943 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_STD\n");
944
945 /* Get the standard from the current encoder */
946 if (vpbe_dev->current_timings.timings_type & VPBE_ENC_STD) {
947 *std_id = vpbe_dev->current_timings.timings.std_id;
948 return 0;
949 }
950
951 return -EINVAL;
952}
953
954/**
955 * vpbe_display_enum_output - enumerate outputs
956 *
957 * Enumerates the outputs available at the vpbe display
958 * returns the status, -EINVAL if end of output list
959 */
960static int vpbe_display_enum_output(struct file *file, void *priv,
961 struct v4l2_output *output)
962{
963 struct vpbe_fh *fh = priv;
964 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
965 int ret;
966
967 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_OUTPUT\n");
968
969 /* Enumerate outputs */
970
971 if (NULL == vpbe_dev->ops.enum_outputs)
972 return -EINVAL;
973
974 ret = vpbe_dev->ops.enum_outputs(vpbe_dev, output);
975 if (ret) {
976 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
977 "Failed to enumerate outputs\n");
978 return -EINVAL;
979 }
980
981 return 0;
982}
983
984/**
985 * vpbe_display_s_output - Set output to
986 * the output specified by the index
987 */
988static int vpbe_display_s_output(struct file *file, void *priv,
989 unsigned int i)
990{
991 struct vpbe_fh *fh = priv;
992 struct vpbe_layer *layer = fh->layer;
993 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
994 int ret;
995
996 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_OUTPUT\n");
997 /* If streaming is started, return error */
998 if (layer->started) {
999 v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
1000 return -EBUSY;
1001 }
1002 if (NULL == vpbe_dev->ops.set_output)
1003 return -EINVAL;
1004
1005 ret = vpbe_dev->ops.set_output(vpbe_dev, i);
1006 if (ret) {
1007 v4l2_err(&vpbe_dev->v4l2_dev,
1008 "Failed to set output for sub devices\n");
1009 return -EINVAL;
1010 }
1011
1012 return 0;
1013}
1014
1015/**
1016 * vpbe_display_g_output - Get output from subdevice
1017 * for a given by the index
1018 */
1019static int vpbe_display_g_output(struct file *file, void *priv,
1020 unsigned int *i)
1021{
1022 struct vpbe_fh *fh = priv;
1023 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1024
1025 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_OUTPUT\n");
1026 /* Get the standard from the current encoder */
1027 *i = vpbe_dev->current_out_index;
1028
1029 return 0;
1030}
1031
1032/**
1033 * vpbe_display_enum_dv_presets - Enumerate the dv presets
1034 *
1035 * enum the preset in the current encoder. Return the status. 0 - success
1036 * -EINVAL on error
1037 */
1038static int
1039vpbe_display_enum_dv_presets(struct file *file, void *priv,
1040 struct v4l2_dv_enum_preset *preset)
1041{
1042 struct vpbe_fh *fh = priv;
1043 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1044 int ret;
1045
1046 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_DV_PRESETS\n");
1047
1048 /* Enumerate outputs */
1049 if (NULL == vpbe_dev->ops.enum_dv_presets)
1050 return -EINVAL;
1051
1052 ret = vpbe_dev->ops.enum_dv_presets(vpbe_dev, preset);
1053 if (ret) {
1054 v4l2_err(&vpbe_dev->v4l2_dev,
1055 "Failed to enumerate dv presets info\n");
1056 return -EINVAL;
1057 }
1058
1059 return 0;
1060}
1061
1062/**
1063 * vpbe_display_s_dv_preset - Set the dv presets
1064 *
1065 * Set the preset in the current encoder. Return the status. 0 - success
1066 * -EINVAL on error
1067 */
1068static int
1069vpbe_display_s_dv_preset(struct file *file, void *priv,
1070 struct v4l2_dv_preset *preset)
1071{
1072 struct vpbe_fh *fh = priv;
1073 struct vpbe_layer *layer = fh->layer;
1074 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1075 int ret;
1076
1077 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_DV_PRESETS\n");
1078
1079
1080 /* If streaming is started, return error */
1081 if (layer->started) {
1082 v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
1083 return -EBUSY;
1084 }
1085
1086 /* Set the given standard in the encoder */
1087 if (NULL != vpbe_dev->ops.s_dv_preset)
1088 return -EINVAL;
1089
1090 ret = vpbe_dev->ops.s_dv_preset(vpbe_dev, preset);
1091 if (ret) {
1092 v4l2_err(&vpbe_dev->v4l2_dev,
1093 "Failed to set the dv presets info\n");
1094 return -EINVAL;
1095 }
1096 /* set the current norm to zero to be consistent. If STD is used
1097 * v4l2 layer will set the norm properly on successful s_std call
1098 */
1099 layer->video_dev.current_norm = 0;
1100
1101 return 0;
1102}
1103
1104/**
1105 * vpbe_display_g_dv_preset - Set the dv presets
1106 *
1107 * Get the preset in the current encoder. Return the status. 0 - success
1108 * -EINVAL on error
1109 */
1110static int
1111vpbe_display_g_dv_preset(struct file *file, void *priv,
1112 struct v4l2_dv_preset *dv_preset)
1113{
1114 struct vpbe_fh *fh = priv;
1115 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1116
1117 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_DV_PRESETS\n");
1118
1119 /* Get the given standard in the encoder */
1120
1121 if (vpbe_dev->current_timings.timings_type &
1122 VPBE_ENC_DV_PRESET) {
1123 dv_preset->preset =
1124 vpbe_dev->current_timings.timings.dv_preset;
1125 } else {
1126 return -EINVAL;
1127 }
1128
1129 return 0;
1130}
1131
1132static int vpbe_display_streamoff(struct file *file, void *priv,
1133 enum v4l2_buf_type buf_type)
1134{
1135 struct vpbe_fh *fh = file->private_data;
1136 struct vpbe_layer *layer = fh->layer;
1137 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1138 struct osd_state *osd_device = fh->disp_dev->osd_device;
1139 int ret;
1140
1141 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
1142 "VIDIOC_STREAMOFF,layer id = %d\n",
1143 layer->device_id);
1144
1145 if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf_type) {
1146 v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
1147 return -EINVAL;
1148 }
1149
1150 /* If io is allowed for this file handle, return error */
1151 if (!fh->io_allowed) {
1152 v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
1153 return -EACCES;
1154 }
1155
1156 /* If streaming is not started, return error */
1157 if (!layer->started) {
1158 v4l2_err(&vpbe_dev->v4l2_dev, "streaming not started in layer"
1159 " id = %d\n", layer->device_id);
1160 return -EINVAL;
1161 }
1162
1163 osd_device->ops.disable_layer(osd_device,
1164 layer->layer_info.id);
1165 layer->started = 0;
1166 ret = videobuf_streamoff(&layer->buffer_queue);
1167
1168 return ret;
1169}
1170
1171static int vpbe_display_streamon(struct file *file, void *priv,
1172 enum v4l2_buf_type buf_type)
1173{
1174 struct vpbe_fh *fh = file->private_data;
1175 struct vpbe_layer *layer = fh->layer;
1176 struct vpbe_display *disp_dev = fh->disp_dev;
1177 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1178 struct osd_state *osd_device = disp_dev->osd_device;
1179 int ret;
1180
1181 osd_device->ops.disable_layer(osd_device,
1182 layer->layer_info.id);
1183
1184 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_STREAMON, layerid=%d\n",
1185 layer->device_id);
1186
1187 if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf_type) {
1188 v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
1189 return -EINVAL;
1190 }
1191
1192 /* If file handle is not allowed IO, return error */
1193 if (!fh->io_allowed) {
1194 v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
1195 return -EACCES;
1196 }
1197 /* If Streaming is already started, return error */
1198 if (layer->started) {
1199 v4l2_err(&vpbe_dev->v4l2_dev, "layer is already streaming\n");
1200 return -EBUSY;
1201 }
1202
1203 /*
1204 * Call videobuf_streamon to start streaming
1205 * in videobuf
1206 */
1207 ret = videobuf_streamon(&layer->buffer_queue);
1208 if (ret) {
1209 v4l2_err(&vpbe_dev->v4l2_dev,
1210 "error in videobuf_streamon\n");
1211 return ret;
1212 }
1213 /* If buffer queue is empty, return error */
1214 if (list_empty(&layer->dma_queue)) {
1215 v4l2_err(&vpbe_dev->v4l2_dev, "buffer queue is empty\n");
1216 goto streamoff;
1217 }
1218 /* Get the next frame from the buffer queue */
1219 layer->next_frm = layer->cur_frm = list_entry(layer->dma_queue.next,
1220 struct videobuf_buffer, queue);
1221 /* Remove buffer from the buffer queue */
1222 list_del(&layer->cur_frm->queue);
1223 /* Mark state of the current frame to active */
1224 layer->cur_frm->state = VIDEOBUF_ACTIVE;
1225 /* Initialize field_id and started member */
1226 layer->field_id = 0;
1227
1228 /* Set parameters in OSD and VENC */
1229 ret = vpbe_set_osd_display_params(disp_dev, layer);
1230 if (ret < 0)
1231 goto streamoff;
1232
1233 /*
1234 * if request format is yuv420 semiplanar, need to
1235 * enable both video windows
1236 */
1237 layer->started = 1;
1238
1239 layer->layer_first_int = 1;
1240
1241 return ret;
1242streamoff:
1243 ret = videobuf_streamoff(&layer->buffer_queue);
1244 return ret;
1245}
1246
1247static int vpbe_display_dqbuf(struct file *file, void *priv,
1248 struct v4l2_buffer *buf)
1249{
1250 struct vpbe_fh *fh = file->private_data;
1251 struct vpbe_layer *layer = fh->layer;
1252 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1253 int ret;
1254
1255 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
1256 "VIDIOC_DQBUF, layer id = %d\n",
1257 layer->device_id);
1258
1259 if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) {
1260 v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
1261 return -EINVAL;
1262 }
1263 /* If this file handle is not allowed to do IO, return error */
1264 if (!fh->io_allowed) {
1265 v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
1266 return -EACCES;
1267 }
1268 if (file->f_flags & O_NONBLOCK)
1269 /* Call videobuf_dqbuf for non blocking mode */
1270 ret = videobuf_dqbuf(&layer->buffer_queue, buf, 1);
1271 else
1272 /* Call videobuf_dqbuf for blocking mode */
1273 ret = videobuf_dqbuf(&layer->buffer_queue, buf, 0);
1274
1275 return ret;
1276}
1277
1278static int vpbe_display_qbuf(struct file *file, void *priv,
1279 struct v4l2_buffer *p)
1280{
1281 struct vpbe_fh *fh = file->private_data;
1282 struct vpbe_layer *layer = fh->layer;
1283 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1284
1285 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
1286 "VIDIOC_QBUF, layer id = %d\n",
1287 layer->device_id);
1288
1289 if (V4L2_BUF_TYPE_VIDEO_OUTPUT != p->type) {
1290 v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
1291 return -EINVAL;
1292 }
1293
1294 /* If this file handle is not allowed to do IO, return error */
1295 if (!fh->io_allowed) {
1296 v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
1297 return -EACCES;
1298 }
1299
1300 return videobuf_qbuf(&layer->buffer_queue, p);
1301}
1302
1303static int vpbe_display_querybuf(struct file *file, void *priv,
1304 struct v4l2_buffer *buf)
1305{
1306 struct vpbe_fh *fh = file->private_data;
1307 struct vpbe_layer *layer = fh->layer;
1308 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1309 int ret;
1310
1311 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
1312 "VIDIOC_QUERYBUF, layer id = %d\n",
1313 layer->device_id);
1314
1315 if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) {
1316 v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
1317 return -EINVAL;
1318 }
1319
1320 /* Call videobuf_querybuf to get information */
1321 ret = videobuf_querybuf(&layer->buffer_queue, buf);
1322
1323 return ret;
1324}
1325
1326static int vpbe_display_reqbufs(struct file *file, void *priv,
1327 struct v4l2_requestbuffers *req_buf)
1328{
1329 struct vpbe_fh *fh = file->private_data;
1330 struct vpbe_layer *layer = fh->layer;
1331 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1332 int ret;
1333
1334 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_reqbufs\n");
1335
1336 if (V4L2_BUF_TYPE_VIDEO_OUTPUT != req_buf->type) {
1337 v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
1338 return -EINVAL;
1339 }
1340
1341 /* If io users of the layer is not zero, return error */
1342 if (0 != layer->io_usrs) {
1343 v4l2_err(&vpbe_dev->v4l2_dev, "not IO user\n");
1344 return -EBUSY;
1345 }
1346 /* Initialize videobuf queue as per the buffer type */
1347 videobuf_queue_dma_contig_init(&layer->buffer_queue,
1348 &video_qops,
1349 vpbe_dev->pdev,
1350 &layer->irqlock,
1351 V4L2_BUF_TYPE_VIDEO_OUTPUT,
1352 layer->pix_fmt.field,
1353 sizeof(struct videobuf_buffer),
1354 fh, NULL);
1355
1356 /* Set io allowed member of file handle to TRUE */
1357 fh->io_allowed = 1;
1358 /* Increment io usrs member of layer object to 1 */
1359 layer->io_usrs = 1;
1360 /* Store type of memory requested in layer object */
1361 layer->memory = req_buf->memory;
1362 /* Initialize buffer queue */
1363 INIT_LIST_HEAD(&layer->dma_queue);
1364 /* Allocate buffers */
1365 ret = videobuf_reqbufs(&layer->buffer_queue, req_buf);
1366
1367 return ret;
1368}
1369
1370/*
1371 * vpbe_display_mmap()
1372 * It is used to map kernel space buffers into user spaces
1373 */
1374static int vpbe_display_mmap(struct file *filep, struct vm_area_struct *vma)
1375{
1376 /* Get the layer object and file handle object */
1377 struct vpbe_fh *fh = filep->private_data;
1378 struct vpbe_layer *layer = fh->layer;
1379 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1380
1381 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_mmap\n");
1382
1383 return videobuf_mmap_mapper(&layer->buffer_queue, vma);
1384}
1385
1386/* vpbe_display_poll(): It is used for select/poll system call
1387 */
1388static unsigned int vpbe_display_poll(struct file *filep, poll_table *wait)
1389{
1390 struct vpbe_fh *fh = filep->private_data;
1391 struct vpbe_layer *layer = fh->layer;
1392 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1393 unsigned int err = 0;
1394
1395 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_poll\n");
1396 if (layer->started)
1397 err = videobuf_poll_stream(filep, &layer->buffer_queue, wait);
1398 return err;
1399}
1400
1401/*
1402 * vpbe_display_open()
1403 * It creates object of file handle structure and stores it in private_data
1404 * member of filepointer
1405 */
1406static int vpbe_display_open(struct file *file)
1407{
1408 struct vpbe_fh *fh = NULL;
1409 struct vpbe_layer *layer = video_drvdata(file);
1410 struct vpbe_display *disp_dev = layer->disp_dev;
1411 struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
1412 struct osd_state *osd_device = disp_dev->osd_device;
1413 int err;
1414
1415 /* Allocate memory for the file handle object */
1416 fh = kmalloc(sizeof(struct vpbe_fh), GFP_KERNEL);
1417 if (fh == NULL) {
1418 v4l2_err(&vpbe_dev->v4l2_dev,
1419 "unable to allocate memory for file handle object\n");
1420 return -ENOMEM;
1421 }
1422 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
1423 "vpbe display open plane = %d\n",
1424 layer->device_id);
1425
1426 /* store pointer to fh in private_data member of filep */
1427 file->private_data = fh;
1428 fh->layer = layer;
1429 fh->disp_dev = disp_dev;
1430
1431 if (!layer->usrs) {
1432
1433 /* First claim the layer for this device */
1434 err = osd_device->ops.request_layer(osd_device,
1435 layer->layer_info.id);
1436 if (err < 0) {
1437 /* Couldn't get layer */
1438 v4l2_err(&vpbe_dev->v4l2_dev,
1439 "Display Manager failed to allocate layer\n");
1440 kfree(fh);
1441 return -EINVAL;
1442 }
1443 }
1444 /* Increment layer usrs counter */
1445 layer->usrs++;
1446 /* Set io_allowed member to false */
1447 fh->io_allowed = 0;
1448 /* Initialize priority of this instance to default priority */
1449 fh->prio = V4L2_PRIORITY_UNSET;
1450 v4l2_prio_open(&layer->prio, &fh->prio);
1451 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
1452 "vpbe display device opened successfully\n");
1453 return 0;
1454}
1455
1456/*
1457 * vpbe_display_release()
1458 * This function deletes buffer queue, frees the buffers and the davinci
1459 * display file * handle
1460 */
1461static int vpbe_display_release(struct file *file)
1462{
1463 /* Get the layer object and file handle object */
1464 struct vpbe_fh *fh = file->private_data;
1465 struct vpbe_layer *layer = fh->layer;
1466 struct osd_layer_config *cfg = &layer->layer_info.config;
1467 struct vpbe_display *disp_dev = fh->disp_dev;
1468 struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
1469 struct osd_state *osd_device = disp_dev->osd_device;
1470
1471 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_release\n");
1472
1473 /* if this instance is doing IO */
1474 if (fh->io_allowed) {
1475 /* Reset io_usrs member of layer object */
1476 layer->io_usrs = 0;
1477
1478 osd_device->ops.disable_layer(osd_device,
1479 layer->layer_info.id);
1480 layer->started = 0;
1481 /* Free buffers allocated */
1482 videobuf_queue_cancel(&layer->buffer_queue);
1483 videobuf_mmap_free(&layer->buffer_queue);
1484 }
1485
1486 /* Decrement layer usrs counter */
1487 layer->usrs--;
1488 /* If this file handle has initialize encoder device, reset it */
1489 if (!layer->usrs) {
1490 if (cfg->pixfmt == PIXFMT_NV12) {
1491 struct vpbe_layer *otherlayer;
1492 otherlayer =
1493 _vpbe_display_get_other_win_layer(disp_dev, layer);
1494 osd_device->ops.disable_layer(osd_device,
1495 otherlayer->layer_info.id);
1496 osd_device->ops.release_layer(osd_device,
1497 otherlayer->layer_info.id);
1498 }
1499 osd_device->ops.disable_layer(osd_device,
1500 layer->layer_info.id);
1501 osd_device->ops.release_layer(osd_device,
1502 layer->layer_info.id);
1503 }
1504 /* Close the priority */
1505 v4l2_prio_close(&layer->prio, fh->prio);
1506 file->private_data = NULL;
1507
1508 /* Free memory allocated to file handle object */
1509 kfree(fh);
1510
1511 disp_dev->cbcr_ofst = 0;
1512
1513 return 0;
1514}
1515
1516#ifdef CONFIG_VIDEO_ADV_DEBUG
1517static int vpbe_display_g_register(struct file *file, void *priv,
1518 struct v4l2_dbg_register *reg)
1519{
1520 struct v4l2_dbg_match *match = &reg->match;
1521
1522 if (match->type >= 2) {
1523 v4l2_subdev_call(vpbe_dev->venc,
1524 core,
1525 g_register,
1526 reg);
1527 }
1528
1529 return 0;
1530}
1531
1532static int vpbe_display_s_register(struct file *file, void *priv,
1533 struct v4l2_dbg_register *reg)
1534{
1535 return 0;
1536}
1537#endif
1538
1539/* vpbe capture ioctl operations */
1540static const struct v4l2_ioctl_ops vpbe_ioctl_ops = {
1541 .vidioc_querycap = vpbe_display_querycap,
1542 .vidioc_g_fmt_vid_out = vpbe_display_g_fmt,
1543 .vidioc_enum_fmt_vid_out = vpbe_display_enum_fmt,
1544 .vidioc_s_fmt_vid_out = vpbe_display_s_fmt,
1545 .vidioc_try_fmt_vid_out = vpbe_display_try_fmt,
1546 .vidioc_reqbufs = vpbe_display_reqbufs,
1547 .vidioc_querybuf = vpbe_display_querybuf,
1548 .vidioc_qbuf = vpbe_display_qbuf,
1549 .vidioc_dqbuf = vpbe_display_dqbuf,
1550 .vidioc_streamon = vpbe_display_streamon,
1551 .vidioc_streamoff = vpbe_display_streamoff,
1552 .vidioc_cropcap = vpbe_display_cropcap,
1553 .vidioc_g_crop = vpbe_display_g_crop,
1554 .vidioc_s_crop = vpbe_display_s_crop,
1555 .vidioc_g_priority = vpbe_display_g_priority,
1556 .vidioc_s_priority = vpbe_display_s_priority,
1557 .vidioc_s_std = vpbe_display_s_std,
1558 .vidioc_g_std = vpbe_display_g_std,
1559 .vidioc_enum_output = vpbe_display_enum_output,
1560 .vidioc_s_output = vpbe_display_s_output,
1561 .vidioc_g_output = vpbe_display_g_output,
1562 .vidioc_s_dv_preset = vpbe_display_s_dv_preset,
1563 .vidioc_g_dv_preset = vpbe_display_g_dv_preset,
1564 .vidioc_enum_dv_presets = vpbe_display_enum_dv_presets,
1565#ifdef CONFIG_VIDEO_ADV_DEBUG
1566 .vidioc_g_register = vpbe_display_g_register,
1567 .vidioc_s_register = vpbe_display_s_register,
1568#endif
1569};
1570
1571static struct v4l2_file_operations vpbe_fops = {
1572 .owner = THIS_MODULE,
1573 .open = vpbe_display_open,
1574 .release = vpbe_display_release,
1575 .unlocked_ioctl = video_ioctl2,
1576 .mmap = vpbe_display_mmap,
1577 .poll = vpbe_display_poll
1578};
1579
1580static int vpbe_device_get(struct device *dev, void *data)
1581{
1582 struct platform_device *pdev = to_platform_device(dev);
1583 struct vpbe_display *vpbe_disp = data;
1584
1585 if (strcmp("vpbe_controller", pdev->name) == 0)
1586 vpbe_disp->vpbe_dev = platform_get_drvdata(pdev);
1587
1588 if (strcmp("vpbe-osd", pdev->name) == 0)
1589 vpbe_disp->osd_device = platform_get_drvdata(pdev);
1590
1591 return 0;
1592}
1593
1594static __devinit int init_vpbe_layer(int i, struct vpbe_display *disp_dev,
1595 struct platform_device *pdev)
1596{
1597 struct vpbe_layer *vpbe_display_layer = NULL;
1598 struct video_device *vbd = NULL;
1599
1600 /* Allocate memory for four plane display objects */
1601
1602 disp_dev->dev[i] =
1603 kzalloc(sizeof(struct vpbe_layer), GFP_KERNEL);
1604
1605 /* If memory allocation fails, return error */
1606 if (!disp_dev->dev[i]) {
1607 printk(KERN_ERR "ran out of memory\n");
1608 return -ENOMEM;
1609 }
1610 spin_lock_init(&disp_dev->dev[i]->irqlock);
1611 mutex_init(&disp_dev->dev[i]->opslock);
1612
1613 /* Get the pointer to the layer object */
1614 vpbe_display_layer = disp_dev->dev[i];
1615 vbd = &vpbe_display_layer->video_dev;
1616 /* Initialize field of video device */
1617 vbd->release = video_device_release_empty;
1618 vbd->fops = &vpbe_fops;
1619 vbd->ioctl_ops = &vpbe_ioctl_ops;
1620 vbd->minor = -1;
1621 vbd->v4l2_dev = &disp_dev->vpbe_dev->v4l2_dev;
1622 vbd->lock = &vpbe_display_layer->opslock;
1623
1624 if (disp_dev->vpbe_dev->current_timings.timings_type &
1625 VPBE_ENC_STD) {
1626 vbd->tvnorms = (V4L2_STD_525_60 | V4L2_STD_625_50);
1627 vbd->current_norm =
1628 disp_dev->vpbe_dev->
1629 current_timings.timings.std_id;
1630 } else
1631 vbd->current_norm = 0;
1632
1633 snprintf(vbd->name, sizeof(vbd->name),
1634 "DaVinci_VPBE Display_DRIVER_V%d.%d.%d",
1635 (VPBE_DISPLAY_VERSION_CODE >> 16) & 0xff,
1636 (VPBE_DISPLAY_VERSION_CODE >> 8) & 0xff,
1637 (VPBE_DISPLAY_VERSION_CODE) & 0xff);
1638
1639 vpbe_display_layer->device_id = i;
1640
1641 vpbe_display_layer->layer_info.id =
1642 ((i == VPBE_DISPLAY_DEVICE_0) ? WIN_VID0 : WIN_VID1);
1643
1644 /* Initialize prio member of layer object */
1645 v4l2_prio_init(&vpbe_display_layer->prio);
1646
1647 return 0;
1648}
1649
1650static __devinit int register_device(struct vpbe_layer *vpbe_display_layer,
1651 struct vpbe_display *disp_dev,
1652 struct platform_device *pdev) {
1653 int err;
1654
1655 v4l2_info(&disp_dev->vpbe_dev->v4l2_dev,
1656 "Trying to register VPBE display device.\n");
1657 v4l2_info(&disp_dev->vpbe_dev->v4l2_dev,
1658 "layer=%x,layer->video_dev=%x\n",
1659 (int)vpbe_display_layer,
1660 (int)&vpbe_display_layer->video_dev);
1661
1662 err = video_register_device(&vpbe_display_layer->video_dev,
1663 VFL_TYPE_GRABBER,
1664 -1);
1665 if (err)
1666 return -ENODEV;
1667
1668 vpbe_display_layer->disp_dev = disp_dev;
1669 /* set the driver data in platform device */
1670 platform_set_drvdata(pdev, disp_dev);
1671 video_set_drvdata(&vpbe_display_layer->video_dev,
1672 vpbe_display_layer);
1673
1674 return 0;
1675}
1676
1677
1678
1679/*
1680 * vpbe_display_probe()
1681 * This function creates device entries by register itself to the V4L2 driver
1682 * and initializes fields of each layer objects
1683 */
1684static __devinit int vpbe_display_probe(struct platform_device *pdev)
1685{
1686 struct vpbe_layer *vpbe_display_layer;
1687 struct vpbe_display *disp_dev;
1688 struct resource *res = NULL;
1689 int k;
1690 int i;
1691 int err;
1692 int irq;
1693
1694 printk(KERN_DEBUG "vpbe_display_probe\n");
1695 /* Allocate memory for vpbe_display */
1696 disp_dev = kzalloc(sizeof(struct vpbe_display), GFP_KERNEL);
1697 if (!disp_dev) {
1698 printk(KERN_ERR "ran out of memory\n");
1699 return -ENOMEM;
1700 }
1701
1702 spin_lock_init(&disp_dev->dma_queue_lock);
1703 /*
1704 * Scan all the platform devices to find the vpbe
1705 * controller device and get the vpbe_dev object
1706 */
1707 err = bus_for_each_dev(&platform_bus_type, NULL, disp_dev,
1708 vpbe_device_get);
1709 if (err < 0)
1710 return err;
1711 /* Initialize the vpbe display controller */
1712 if (NULL != disp_dev->vpbe_dev->ops.initialize) {
1713 err = disp_dev->vpbe_dev->ops.initialize(&pdev->dev,
1714 disp_dev->vpbe_dev);
1715 if (err) {
1716 v4l2_err(&disp_dev->vpbe_dev->v4l2_dev,
1717 "Error initing vpbe\n");
1718 err = -ENOMEM;
1719 goto probe_out;
1720 }
1721 }
1722
1723 for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
1724 if (init_vpbe_layer(i, disp_dev, pdev)) {
1725 err = -ENODEV;
1726 goto probe_out;
1727 }
1728 }
1729
1730 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1731 if (!res) {
1732 v4l2_err(&disp_dev->vpbe_dev->v4l2_dev,
1733 "Unable to get VENC interrupt resource\n");
1734 err = -ENODEV;
1735 goto probe_out;
1736 }
1737
1738 irq = res->start;
1739 if (request_irq(irq, venc_isr, IRQF_DISABLED, VPBE_DISPLAY_DRIVER,
1740 disp_dev)) {
1741 v4l2_err(&disp_dev->vpbe_dev->v4l2_dev,
1742 "Unable to request interrupt\n");
1743 err = -ENODEV;
1744 goto probe_out;
1745 }
1746
1747 for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
1748 if (register_device(disp_dev->dev[i], disp_dev, pdev)) {
1749 err = -ENODEV;
1750 goto probe_out;
1751 }
1752 }
1753
1754 printk(KERN_DEBUG "Successfully completed the probing of vpbe v4l2 device\n");
1755 return 0;
1756
1757probe_out:
1758 free_irq(res->start, disp_dev);
1759 for (k = 0; k < VPBE_DISPLAY_MAX_DEVICES; k++) {
1760 /* Get the pointer to the layer object */
1761 vpbe_display_layer = disp_dev->dev[k];
1762 /* Unregister video device */
1763 if (vpbe_display_layer) {
1764 video_unregister_device(
1765 &vpbe_display_layer->video_dev);
1766 kfree(disp_dev->dev[k]);
1767 }
1768 }
1769 kfree(disp_dev);
1770 return err;
1771}
1772
1773/*
1774 * vpbe_display_remove()
1775 * It un-register hardware layer from V4L2 driver
1776 */
1777static int vpbe_display_remove(struct platform_device *pdev)
1778{
1779 struct vpbe_layer *vpbe_display_layer;
1780 struct vpbe_display *disp_dev = platform_get_drvdata(pdev);
1781 struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
1782 struct resource *res;
1783 int i;
1784
1785 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_remove\n");
1786
1787 /* unregister irq */
1788 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1789 free_irq(res->start, disp_dev);
1790
1791 /* deinitialize the vpbe display controller */
1792 if (NULL != vpbe_dev->ops.deinitialize)
1793 vpbe_dev->ops.deinitialize(&pdev->dev, vpbe_dev);
1794 /* un-register device */
1795 for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
1796 /* Get the pointer to the layer object */
1797 vpbe_display_layer = disp_dev->dev[i];
1798 /* Unregister video device */
1799 video_unregister_device(&vpbe_display_layer->video_dev);
1800
1801 }
1802 for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
1803 kfree(disp_dev->dev[i]);
1804 disp_dev->dev[i] = NULL;
1805 }
1806
1807 return 0;
1808}
1809
1810static struct platform_driver vpbe_display_driver = {
1811 .driver = {
1812 .name = VPBE_DISPLAY_DRIVER,
1813 .owner = THIS_MODULE,
1814 .bus = &platform_bus_type,
1815 },
1816 .probe = vpbe_display_probe,
1817 .remove = __devexit_p(vpbe_display_remove),
1818};
1819
1820/*
1821 * vpbe_display_init()
1822 * This function registers device and driver to the kernel, requests irq
1823 * handler and allocates memory for layer objects
1824 */
1825static __devinit int vpbe_display_init(void)
1826{
1827 int err;
1828
1829 printk(KERN_DEBUG "vpbe_display_init\n");
1830
1831 /* Register driver to the kernel */
1832 err = platform_driver_register(&vpbe_display_driver);
1833 if (0 != err)
1834 return err;
1835
1836 printk(KERN_DEBUG "vpbe_display_init:"
1837 "VPBE V4L2 Display Driver V1.0 loaded\n");
1838 return 0;
1839}
1840
1841/*
1842 * vpbe_display_cleanup()
1843 * This function un-registers device and driver to the kernel, frees requested
1844 * irq handler and de-allocates memory allocated for layer objects.
1845 */
1846static void vpbe_display_cleanup(void)
1847{
1848 printk(KERN_DEBUG "vpbe_display_cleanup\n");
1849
1850 /* platform driver unregister */
1851 platform_driver_unregister(&vpbe_display_driver);
1852}
1853
1854/* Function for module initialization and cleanup */
1855module_init(vpbe_display_init);
1856module_exit(vpbe_display_cleanup);
1857
1858MODULE_DESCRIPTION("TI DM644x/DM355/DM365 VPBE Display controller");
1859MODULE_LICENSE("GPL");
1860MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/media/video/davinci/vpbe_osd.c b/drivers/media/video/davinci/vpbe_osd.c
new file mode 100644
index 000000000000..5352884998f5
--- /dev/null
+++ b/drivers/media/video/davinci/vpbe_osd.c
@@ -0,0 +1,1231 @@
1/*
2 * Copyright (C) 2007-2010 Texas Instruments Inc
3 * Copyright (C) 2007 MontaVista Software, Inc.
4 *
5 * Andy Lowe (alowe@mvista.com), MontaVista Software
6 * - Initial version
7 * Murali Karicheri (mkaricheri@gmail.com), Texas Instruments Ltd.
8 * - ported to sub device interface
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation version 2.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/interrupt.h>
27#include <linux/platform_device.h>
28#include <linux/clk.h>
29#include <linux/slab.h>
30
31#include <mach/io.h>
32#include <mach/cputype.h>
33#include <mach/hardware.h>
34
35#include <media/davinci/vpss.h>
36#include <media/v4l2-device.h>
37#include <media/davinci/vpbe_types.h>
38#include <media/davinci/vpbe_osd.h>
39
40#include <linux/io.h>
41#include "vpbe_osd_regs.h"
42
43#define MODULE_NAME VPBE_OSD_SUBDEV_NAME
44
45/* register access routines */
46static inline u32 osd_read(struct osd_state *sd, u32 offset)
47{
48 struct osd_state *osd = sd;
49
50 return readl(osd->osd_base + offset);
51}
52
53static inline u32 osd_write(struct osd_state *sd, u32 val, u32 offset)
54{
55 struct osd_state *osd = sd;
56
57 writel(val, osd->osd_base + offset);
58
59 return val;
60}
61
62static inline u32 osd_set(struct osd_state *sd, u32 mask, u32 offset)
63{
64 struct osd_state *osd = sd;
65
66 u32 addr = osd->osd_base + offset;
67 u32 val = readl(addr) | mask;
68
69 writel(val, addr);
70
71 return val;
72}
73
74static inline u32 osd_clear(struct osd_state *sd, u32 mask, u32 offset)
75{
76 struct osd_state *osd = sd;
77
78 u32 addr = osd->osd_base + offset;
79 u32 val = readl(addr) & ~mask;
80
81 writel(val, addr);
82
83 return val;
84}
85
86static inline u32 osd_modify(struct osd_state *sd, u32 mask, u32 val,
87 u32 offset)
88{
89 struct osd_state *osd = sd;
90
91 u32 addr = osd->osd_base + offset;
92 u32 new_val = (readl(addr) & ~mask) | (val & mask);
93
94 writel(new_val, addr);
95
96 return new_val;
97}
98
99/* define some macros for layer and pixfmt classification */
100#define is_osd_win(layer) (((layer) == WIN_OSD0) || ((layer) == WIN_OSD1))
101#define is_vid_win(layer) (((layer) == WIN_VID0) || ((layer) == WIN_VID1))
102#define is_rgb_pixfmt(pixfmt) \
103 (((pixfmt) == PIXFMT_RGB565) || ((pixfmt) == PIXFMT_RGB888))
104#define is_yc_pixfmt(pixfmt) \
105 (((pixfmt) == PIXFMT_YCbCrI) || ((pixfmt) == PIXFMT_YCrCbI) || \
106 ((pixfmt) == PIXFMT_NV12))
107#define MAX_WIN_SIZE OSD_VIDWIN0XP_V0X
108#define MAX_LINE_LENGTH (OSD_VIDWIN0OFST_V0LO << 5)
109
110/**
111 * _osd_dm6446_vid0_pingpong() - field inversion fix for DM6446
112 * @sd - ptr to struct osd_state
113 * @field_inversion - inversion flag
114 * @fb_base_phys - frame buffer address
115 * @lconfig - ptr to layer config
116 *
117 * This routine implements a workaround for the field signal inversion silicon
118 * erratum described in Advisory 1.3.8 for the DM6446. The fb_base_phys and
119 * lconfig parameters apply to the vid0 window. This routine should be called
120 * whenever the vid0 layer configuration or start address is modified, or when
121 * the OSD field inversion setting is modified.
122 * Returns: 1 if the ping-pong buffers need to be toggled in the vsync isr, or
123 * 0 otherwise
124 */
125static int _osd_dm6446_vid0_pingpong(struct osd_state *sd,
126 int field_inversion,
127 unsigned long fb_base_phys,
128 const struct osd_layer_config *lconfig)
129{
130 struct osd_platform_data *pdata;
131
132 pdata = (struct osd_platform_data *)sd->dev->platform_data;
133 if (pdata->field_inv_wa_enable) {
134
135 if (!field_inversion || !lconfig->interlaced) {
136 osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN0ADR);
137 osd_write(sd, fb_base_phys & ~0x1F, OSD_PPVWIN0ADR);
138 osd_modify(sd, OSD_MISCCTL_PPSW | OSD_MISCCTL_PPRV, 0,
139 OSD_MISCCTL);
140 return 0;
141 } else {
142 unsigned miscctl = OSD_MISCCTL_PPRV;
143
144 osd_write(sd,
145 (fb_base_phys & ~0x1F) - lconfig->line_length,
146 OSD_VIDWIN0ADR);
147 osd_write(sd,
148 (fb_base_phys & ~0x1F) + lconfig->line_length,
149 OSD_PPVWIN0ADR);
150 osd_modify(sd,
151 OSD_MISCCTL_PPSW | OSD_MISCCTL_PPRV, miscctl,
152 OSD_MISCCTL);
153
154 return 1;
155 }
156 }
157
158 return 0;
159}
160
161static void _osd_set_field_inversion(struct osd_state *sd, int enable)
162{
163 unsigned fsinv = 0;
164
165 if (enable)
166 fsinv = OSD_MODE_FSINV;
167
168 osd_modify(sd, OSD_MODE_FSINV, fsinv, OSD_MODE);
169}
170
171static void _osd_set_blink_attribute(struct osd_state *sd, int enable,
172 enum osd_blink_interval blink)
173{
174 u32 osdatrmd = 0;
175
176 if (enable) {
177 osdatrmd |= OSD_OSDATRMD_BLNK;
178 osdatrmd |= blink << OSD_OSDATRMD_BLNKINT_SHIFT;
179 }
180 /* caller must ensure that OSD1 is configured in attribute mode */
181 osd_modify(sd, OSD_OSDATRMD_BLNKINT | OSD_OSDATRMD_BLNK, osdatrmd,
182 OSD_OSDATRMD);
183}
184
185static void _osd_set_rom_clut(struct osd_state *sd,
186 enum osd_rom_clut rom_clut)
187{
188 if (rom_clut == ROM_CLUT0)
189 osd_clear(sd, OSD_MISCCTL_RSEL, OSD_MISCCTL);
190 else
191 osd_set(sd, OSD_MISCCTL_RSEL, OSD_MISCCTL);
192}
193
194static void _osd_set_palette_map(struct osd_state *sd,
195 enum osd_win_layer osdwin,
196 unsigned char pixel_value,
197 unsigned char clut_index,
198 enum osd_pix_format pixfmt)
199{
200 static const int map_2bpp[] = { 0, 5, 10, 15 };
201 static const int map_1bpp[] = { 0, 15 };
202 int bmp_offset;
203 int bmp_shift;
204 int bmp_mask;
205 int bmp_reg;
206
207 switch (pixfmt) {
208 case PIXFMT_1BPP:
209 bmp_reg = map_1bpp[pixel_value & 0x1];
210 break;
211 case PIXFMT_2BPP:
212 bmp_reg = map_2bpp[pixel_value & 0x3];
213 break;
214 case PIXFMT_4BPP:
215 bmp_reg = pixel_value & 0xf;
216 break;
217 default:
218 return;
219 }
220
221 switch (osdwin) {
222 case OSDWIN_OSD0:
223 bmp_offset = OSD_W0BMP01 + (bmp_reg >> 1) * sizeof(u32);
224 break;
225 case OSDWIN_OSD1:
226 bmp_offset = OSD_W1BMP01 + (bmp_reg >> 1) * sizeof(u32);
227 break;
228 default:
229 return;
230 }
231
232 if (bmp_reg & 1) {
233 bmp_shift = 8;
234 bmp_mask = 0xff << 8;
235 } else {
236 bmp_shift = 0;
237 bmp_mask = 0xff;
238 }
239
240 osd_modify(sd, bmp_mask, clut_index << bmp_shift, bmp_offset);
241}
242
243static void _osd_set_rec601_attenuation(struct osd_state *sd,
244 enum osd_win_layer osdwin, int enable)
245{
246 switch (osdwin) {
247 case OSDWIN_OSD0:
248 osd_modify(sd, OSD_OSDWIN0MD_ATN0E,
249 enable ? OSD_OSDWIN0MD_ATN0E : 0,
250 OSD_OSDWIN0MD);
251 break;
252 case OSDWIN_OSD1:
253 osd_modify(sd, OSD_OSDWIN1MD_ATN1E,
254 enable ? OSD_OSDWIN1MD_ATN1E : 0,
255 OSD_OSDWIN1MD);
256 break;
257 }
258}
259
260static void _osd_set_blending_factor(struct osd_state *sd,
261 enum osd_win_layer osdwin,
262 enum osd_blending_factor blend)
263{
264 switch (osdwin) {
265 case OSDWIN_OSD0:
266 osd_modify(sd, OSD_OSDWIN0MD_BLND0,
267 blend << OSD_OSDWIN0MD_BLND0_SHIFT, OSD_OSDWIN0MD);
268 break;
269 case OSDWIN_OSD1:
270 osd_modify(sd, OSD_OSDWIN1MD_BLND1,
271 blend << OSD_OSDWIN1MD_BLND1_SHIFT, OSD_OSDWIN1MD);
272 break;
273 }
274}
275
276static void _osd_enable_color_key(struct osd_state *sd,
277 enum osd_win_layer osdwin,
278 unsigned colorkey,
279 enum osd_pix_format pixfmt)
280{
281 switch (pixfmt) {
282 case PIXFMT_RGB565:
283 osd_write(sd, colorkey & OSD_TRANSPVAL_RGBTRANS,
284 OSD_TRANSPVAL);
285 break;
286 default:
287 break;
288 }
289
290 switch (osdwin) {
291 case OSDWIN_OSD0:
292 osd_set(sd, OSD_OSDWIN0MD_TE0, OSD_OSDWIN0MD);
293 break;
294 case OSDWIN_OSD1:
295 osd_set(sd, OSD_OSDWIN1MD_TE1, OSD_OSDWIN1MD);
296 break;
297 }
298}
299
300static void _osd_disable_color_key(struct osd_state *sd,
301 enum osd_win_layer osdwin)
302{
303 switch (osdwin) {
304 case OSDWIN_OSD0:
305 osd_clear(sd, OSD_OSDWIN0MD_TE0, OSD_OSDWIN0MD);
306 break;
307 case OSDWIN_OSD1:
308 osd_clear(sd, OSD_OSDWIN1MD_TE1, OSD_OSDWIN1MD);
309 break;
310 }
311}
312
313static void _osd_set_osd_clut(struct osd_state *sd,
314 enum osd_win_layer osdwin,
315 enum osd_clut clut)
316{
317 u32 winmd = 0;
318
319 switch (osdwin) {
320 case OSDWIN_OSD0:
321 if (clut == RAM_CLUT)
322 winmd |= OSD_OSDWIN0MD_CLUTS0;
323 osd_modify(sd, OSD_OSDWIN0MD_CLUTS0, winmd, OSD_OSDWIN0MD);
324 break;
325 case OSDWIN_OSD1:
326 if (clut == RAM_CLUT)
327 winmd |= OSD_OSDWIN1MD_CLUTS1;
328 osd_modify(sd, OSD_OSDWIN1MD_CLUTS1, winmd, OSD_OSDWIN1MD);
329 break;
330 }
331}
332
333static void _osd_set_zoom(struct osd_state *sd, enum osd_layer layer,
334 enum osd_zoom_factor h_zoom,
335 enum osd_zoom_factor v_zoom)
336{
337 u32 winmd = 0;
338
339 switch (layer) {
340 case WIN_OSD0:
341 winmd |= (h_zoom << OSD_OSDWIN0MD_OHZ0_SHIFT);
342 winmd |= (v_zoom << OSD_OSDWIN0MD_OVZ0_SHIFT);
343 osd_modify(sd, OSD_OSDWIN0MD_OHZ0 | OSD_OSDWIN0MD_OVZ0, winmd,
344 OSD_OSDWIN0MD);
345 break;
346 case WIN_VID0:
347 winmd |= (h_zoom << OSD_VIDWINMD_VHZ0_SHIFT);
348 winmd |= (v_zoom << OSD_VIDWINMD_VVZ0_SHIFT);
349 osd_modify(sd, OSD_VIDWINMD_VHZ0 | OSD_VIDWINMD_VVZ0, winmd,
350 OSD_VIDWINMD);
351 break;
352 case WIN_OSD1:
353 winmd |= (h_zoom << OSD_OSDWIN1MD_OHZ1_SHIFT);
354 winmd |= (v_zoom << OSD_OSDWIN1MD_OVZ1_SHIFT);
355 osd_modify(sd, OSD_OSDWIN1MD_OHZ1 | OSD_OSDWIN1MD_OVZ1, winmd,
356 OSD_OSDWIN1MD);
357 break;
358 case WIN_VID1:
359 winmd |= (h_zoom << OSD_VIDWINMD_VHZ1_SHIFT);
360 winmd |= (v_zoom << OSD_VIDWINMD_VVZ1_SHIFT);
361 osd_modify(sd, OSD_VIDWINMD_VHZ1 | OSD_VIDWINMD_VVZ1, winmd,
362 OSD_VIDWINMD);
363 break;
364 }
365}
366
367static void _osd_disable_layer(struct osd_state *sd, enum osd_layer layer)
368{
369 switch (layer) {
370 case WIN_OSD0:
371 osd_clear(sd, OSD_OSDWIN0MD_OACT0, OSD_OSDWIN0MD);
372 break;
373 case WIN_VID0:
374 osd_clear(sd, OSD_VIDWINMD_ACT0, OSD_VIDWINMD);
375 break;
376 case WIN_OSD1:
377 /* disable attribute mode as well as disabling the window */
378 osd_clear(sd, OSD_OSDWIN1MD_OASW | OSD_OSDWIN1MD_OACT1,
379 OSD_OSDWIN1MD);
380 break;
381 case WIN_VID1:
382 osd_clear(sd, OSD_VIDWINMD_ACT1, OSD_VIDWINMD);
383 break;
384 }
385}
386
387static void osd_disable_layer(struct osd_state *sd, enum osd_layer layer)
388{
389 struct osd_state *osd = sd;
390 struct osd_window_state *win = &osd->win[layer];
391 unsigned long flags;
392
393 spin_lock_irqsave(&osd->lock, flags);
394
395 if (!win->is_enabled) {
396 spin_unlock_irqrestore(&osd->lock, flags);
397 return;
398 }
399 win->is_enabled = 0;
400
401 _osd_disable_layer(sd, layer);
402
403 spin_unlock_irqrestore(&osd->lock, flags);
404}
405
406static void _osd_enable_attribute_mode(struct osd_state *sd)
407{
408 /* enable attribute mode for OSD1 */
409 osd_set(sd, OSD_OSDWIN1MD_OASW, OSD_OSDWIN1MD);
410}
411
412static void _osd_enable_layer(struct osd_state *sd, enum osd_layer layer)
413{
414 switch (layer) {
415 case WIN_OSD0:
416 osd_set(sd, OSD_OSDWIN0MD_OACT0, OSD_OSDWIN0MD);
417 break;
418 case WIN_VID0:
419 osd_set(sd, OSD_VIDWINMD_ACT0, OSD_VIDWINMD);
420 break;
421 case WIN_OSD1:
422 /* enable OSD1 and disable attribute mode */
423 osd_modify(sd, OSD_OSDWIN1MD_OASW | OSD_OSDWIN1MD_OACT1,
424 OSD_OSDWIN1MD_OACT1, OSD_OSDWIN1MD);
425 break;
426 case WIN_VID1:
427 osd_set(sd, OSD_VIDWINMD_ACT1, OSD_VIDWINMD);
428 break;
429 }
430}
431
432static int osd_enable_layer(struct osd_state *sd, enum osd_layer layer,
433 int otherwin)
434{
435 struct osd_state *osd = sd;
436 struct osd_window_state *win = &osd->win[layer];
437 struct osd_layer_config *cfg = &win->lconfig;
438 unsigned long flags;
439
440 spin_lock_irqsave(&osd->lock, flags);
441
442 /*
443 * use otherwin flag to know this is the other vid window
444 * in YUV420 mode, if is, skip this check
445 */
446 if (!otherwin && (!win->is_allocated ||
447 !win->fb_base_phys ||
448 !cfg->line_length ||
449 !cfg->xsize ||
450 !cfg->ysize)) {
451 spin_unlock_irqrestore(&osd->lock, flags);
452 return -1;
453 }
454
455 if (win->is_enabled) {
456 spin_unlock_irqrestore(&osd->lock, flags);
457 return 0;
458 }
459 win->is_enabled = 1;
460
461 if (cfg->pixfmt != PIXFMT_OSD_ATTR)
462 _osd_enable_layer(sd, layer);
463 else {
464 _osd_enable_attribute_mode(sd);
465 _osd_set_blink_attribute(sd, osd->is_blinking, osd->blink);
466 }
467
468 spin_unlock_irqrestore(&osd->lock, flags);
469
470 return 0;
471}
472
473static void _osd_start_layer(struct osd_state *sd, enum osd_layer layer,
474 unsigned long fb_base_phys,
475 unsigned long cbcr_ofst)
476{
477 switch (layer) {
478 case WIN_OSD0:
479 osd_write(sd, fb_base_phys & ~0x1F, OSD_OSDWIN0ADR);
480 break;
481 case WIN_VID0:
482 osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN0ADR);
483 break;
484 case WIN_OSD1:
485 osd_write(sd, fb_base_phys & ~0x1F, OSD_OSDWIN1ADR);
486 break;
487 case WIN_VID1:
488 osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN1ADR);
489 break;
490 }
491}
492
493static void osd_start_layer(struct osd_state *sd, enum osd_layer layer,
494 unsigned long fb_base_phys,
495 unsigned long cbcr_ofst)
496{
497 struct osd_state *osd = sd;
498 struct osd_window_state *win = &osd->win[layer];
499 struct osd_layer_config *cfg = &win->lconfig;
500 unsigned long flags;
501
502 spin_lock_irqsave(&osd->lock, flags);
503
504 win->fb_base_phys = fb_base_phys & ~0x1F;
505 _osd_start_layer(sd, layer, fb_base_phys, cbcr_ofst);
506
507 if (layer == WIN_VID0) {
508 osd->pingpong =
509 _osd_dm6446_vid0_pingpong(sd, osd->field_inversion,
510 win->fb_base_phys,
511 cfg);
512 }
513
514 spin_unlock_irqrestore(&osd->lock, flags);
515}
516
517static void osd_get_layer_config(struct osd_state *sd, enum osd_layer layer,
518 struct osd_layer_config *lconfig)
519{
520 struct osd_state *osd = sd;
521 struct osd_window_state *win = &osd->win[layer];
522 unsigned long flags;
523
524 spin_lock_irqsave(&osd->lock, flags);
525
526 *lconfig = win->lconfig;
527
528 spin_unlock_irqrestore(&osd->lock, flags);
529}
530
531/**
532 * try_layer_config() - Try a specific configuration for the layer
533 * @sd - ptr to struct osd_state
534 * @layer - layer to configure
535 * @lconfig - layer configuration to try
536 *
537 * If the requested lconfig is completely rejected and the value of lconfig on
538 * exit is the current lconfig, then try_layer_config() returns 1. Otherwise,
539 * try_layer_config() returns 0. A return value of 0 does not necessarily mean
540 * that the value of lconfig on exit is identical to the value of lconfig on
541 * entry, but merely that it represents a change from the current lconfig.
542 */
543static int try_layer_config(struct osd_state *sd, enum osd_layer layer,
544 struct osd_layer_config *lconfig)
545{
546 struct osd_state *osd = sd;
547 struct osd_window_state *win = &osd->win[layer];
548 int bad_config;
549
550 /* verify that the pixel format is compatible with the layer */
551 switch (lconfig->pixfmt) {
552 case PIXFMT_1BPP:
553 case PIXFMT_2BPP:
554 case PIXFMT_4BPP:
555 case PIXFMT_8BPP:
556 case PIXFMT_RGB565:
557 bad_config = !is_osd_win(layer);
558 break;
559 case PIXFMT_YCbCrI:
560 case PIXFMT_YCrCbI:
561 bad_config = !is_vid_win(layer);
562 break;
563 case PIXFMT_RGB888:
564 bad_config = !is_vid_win(layer);
565 break;
566 case PIXFMT_NV12:
567 bad_config = 1;
568 break;
569 case PIXFMT_OSD_ATTR:
570 bad_config = (layer != WIN_OSD1);
571 break;
572 default:
573 bad_config = 1;
574 break;
575 }
576 if (bad_config) {
577 /*
578 * The requested pixel format is incompatible with the layer,
579 * so keep the current layer configuration.
580 */
581 *lconfig = win->lconfig;
582 return bad_config;
583 }
584
585 /* DM6446: */
586 /* only one OSD window at a time can use RGB pixel formats */
587 if (is_osd_win(layer) && is_rgb_pixfmt(lconfig->pixfmt)) {
588 enum osd_pix_format pixfmt;
589 if (layer == WIN_OSD0)
590 pixfmt = osd->win[WIN_OSD1].lconfig.pixfmt;
591 else
592 pixfmt = osd->win[WIN_OSD0].lconfig.pixfmt;
593
594 if (is_rgb_pixfmt(pixfmt)) {
595 /*
596 * The other OSD window is already configured for an
597 * RGB, so keep the current layer configuration.
598 */
599 *lconfig = win->lconfig;
600 return 1;
601 }
602 }
603
604 /* DM6446: only one video window at a time can use RGB888 */
605 if (is_vid_win(layer) && lconfig->pixfmt == PIXFMT_RGB888) {
606 enum osd_pix_format pixfmt;
607
608 if (layer == WIN_VID0)
609 pixfmt = osd->win[WIN_VID1].lconfig.pixfmt;
610 else
611 pixfmt = osd->win[WIN_VID0].lconfig.pixfmt;
612
613 if (pixfmt == PIXFMT_RGB888) {
614 /*
615 * The other video window is already configured for
616 * RGB888, so keep the current layer configuration.
617 */
618 *lconfig = win->lconfig;
619 return 1;
620 }
621 }
622
623 /* window dimensions must be non-zero */
624 if (!lconfig->line_length || !lconfig->xsize || !lconfig->ysize) {
625 *lconfig = win->lconfig;
626 return 1;
627 }
628
629 /* round line_length up to a multiple of 32 */
630 lconfig->line_length = ((lconfig->line_length + 31) / 32) * 32;
631 lconfig->line_length =
632 min(lconfig->line_length, (unsigned)MAX_LINE_LENGTH);
633 lconfig->xsize = min(lconfig->xsize, (unsigned)MAX_WIN_SIZE);
634 lconfig->ysize = min(lconfig->ysize, (unsigned)MAX_WIN_SIZE);
635 lconfig->xpos = min(lconfig->xpos, (unsigned)MAX_WIN_SIZE);
636 lconfig->ypos = min(lconfig->ypos, (unsigned)MAX_WIN_SIZE);
637 lconfig->interlaced = (lconfig->interlaced != 0);
638 if (lconfig->interlaced) {
639 /* ysize and ypos must be even for interlaced displays */
640 lconfig->ysize &= ~1;
641 lconfig->ypos &= ~1;
642 }
643
644 return 0;
645}
646
647static void _osd_disable_vid_rgb888(struct osd_state *sd)
648{
649 /*
650 * The DM6446 supports RGB888 pixel format in a single video window.
651 * This routine disables RGB888 pixel format for both video windows.
652 * The caller must ensure that neither video window is currently
653 * configured for RGB888 pixel format.
654 */
655 osd_clear(sd, OSD_MISCCTL_RGBEN, OSD_MISCCTL);
656}
657
658static void _osd_enable_vid_rgb888(struct osd_state *sd,
659 enum osd_layer layer)
660{
661 /*
662 * The DM6446 supports RGB888 pixel format in a single video window.
663 * This routine enables RGB888 pixel format for the specified video
664 * window. The caller must ensure that the other video window is not
665 * currently configured for RGB888 pixel format, as this routine will
666 * disable RGB888 pixel format for the other window.
667 */
668 if (layer == WIN_VID0) {
669 osd_modify(sd, OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
670 OSD_MISCCTL_RGBEN, OSD_MISCCTL);
671 } else if (layer == WIN_VID1) {
672 osd_modify(sd, OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
673 OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
674 OSD_MISCCTL);
675 }
676}
677
678static void _osd_set_cbcr_order(struct osd_state *sd,
679 enum osd_pix_format pixfmt)
680{
681 /*
682 * The caller must ensure that all windows using YC pixfmt use the same
683 * Cb/Cr order.
684 */
685 if (pixfmt == PIXFMT_YCbCrI)
686 osd_clear(sd, OSD_MODE_CS, OSD_MODE);
687 else if (pixfmt == PIXFMT_YCrCbI)
688 osd_set(sd, OSD_MODE_CS, OSD_MODE);
689}
690
691static void _osd_set_layer_config(struct osd_state *sd, enum osd_layer layer,
692 const struct osd_layer_config *lconfig)
693{
694 u32 winmd = 0, winmd_mask = 0, bmw = 0;
695
696 _osd_set_cbcr_order(sd, lconfig->pixfmt);
697
698 switch (layer) {
699 case WIN_OSD0:
700 winmd_mask |= OSD_OSDWIN0MD_RGB0E;
701 if (lconfig->pixfmt == PIXFMT_RGB565)
702 winmd |= OSD_OSDWIN0MD_RGB0E;
703
704 winmd_mask |= OSD_OSDWIN0MD_BMW0 | OSD_OSDWIN0MD_OFF0;
705
706 switch (lconfig->pixfmt) {
707 case PIXFMT_1BPP:
708 bmw = 0;
709 break;
710 case PIXFMT_2BPP:
711 bmw = 1;
712 break;
713 case PIXFMT_4BPP:
714 bmw = 2;
715 break;
716 case PIXFMT_8BPP:
717 bmw = 3;
718 break;
719 default:
720 break;
721 }
722 winmd |= (bmw << OSD_OSDWIN0MD_BMW0_SHIFT);
723
724 if (lconfig->interlaced)
725 winmd |= OSD_OSDWIN0MD_OFF0;
726
727 osd_modify(sd, winmd_mask, winmd, OSD_OSDWIN0MD);
728 osd_write(sd, lconfig->line_length >> 5, OSD_OSDWIN0OFST);
729 osd_write(sd, lconfig->xpos, OSD_OSDWIN0XP);
730 osd_write(sd, lconfig->xsize, OSD_OSDWIN0XL);
731 if (lconfig->interlaced) {
732 osd_write(sd, lconfig->ypos >> 1, OSD_OSDWIN0YP);
733 osd_write(sd, lconfig->ysize >> 1, OSD_OSDWIN0YL);
734 } else {
735 osd_write(sd, lconfig->ypos, OSD_OSDWIN0YP);
736 osd_write(sd, lconfig->ysize, OSD_OSDWIN0YL);
737 }
738 break;
739 case WIN_VID0:
740 winmd_mask |= OSD_VIDWINMD_VFF0;
741 if (lconfig->interlaced)
742 winmd |= OSD_VIDWINMD_VFF0;
743
744 osd_modify(sd, winmd_mask, winmd, OSD_VIDWINMD);
745 osd_write(sd, lconfig->line_length >> 5, OSD_VIDWIN0OFST);
746 osd_write(sd, lconfig->xpos, OSD_VIDWIN0XP);
747 osd_write(sd, lconfig->xsize, OSD_VIDWIN0XL);
748 /*
749 * For YUV420P format the register contents are
750 * duplicated in both VID registers
751 */
752 if (lconfig->interlaced) {
753 osd_write(sd, lconfig->ypos >> 1, OSD_VIDWIN0YP);
754 osd_write(sd, lconfig->ysize >> 1, OSD_VIDWIN0YL);
755 } else {
756 osd_write(sd, lconfig->ypos, OSD_VIDWIN0YP);
757 osd_write(sd, lconfig->ysize, OSD_VIDWIN0YL);
758 }
759 break;
760 case WIN_OSD1:
761 /*
762 * The caller must ensure that OSD1 is disabled prior to
763 * switching from a normal mode to attribute mode or from
764 * attribute mode to a normal mode.
765 */
766 if (lconfig->pixfmt == PIXFMT_OSD_ATTR) {
767 winmd_mask |=
768 OSD_OSDWIN1MD_ATN1E | OSD_OSDWIN1MD_RGB1E |
769 OSD_OSDWIN1MD_CLUTS1 |
770 OSD_OSDWIN1MD_BLND1 | OSD_OSDWIN1MD_TE1;
771 } else {
772 winmd_mask |= OSD_OSDWIN1MD_RGB1E;
773 if (lconfig->pixfmt == PIXFMT_RGB565)
774 winmd |= OSD_OSDWIN1MD_RGB1E;
775
776 winmd_mask |= OSD_OSDWIN1MD_BMW1;
777 switch (lconfig->pixfmt) {
778 case PIXFMT_1BPP:
779 bmw = 0;
780 break;
781 case PIXFMT_2BPP:
782 bmw = 1;
783 break;
784 case PIXFMT_4BPP:
785 bmw = 2;
786 break;
787 case PIXFMT_8BPP:
788 bmw = 3;
789 break;
790 default:
791 break;
792 }
793 winmd |= (bmw << OSD_OSDWIN1MD_BMW1_SHIFT);
794 }
795
796 winmd_mask |= OSD_OSDWIN1MD_OFF1;
797 if (lconfig->interlaced)
798 winmd |= OSD_OSDWIN1MD_OFF1;
799
800 osd_modify(sd, winmd_mask, winmd, OSD_OSDWIN1MD);
801 osd_write(sd, lconfig->line_length >> 5, OSD_OSDWIN1OFST);
802 osd_write(sd, lconfig->xpos, OSD_OSDWIN1XP);
803 osd_write(sd, lconfig->xsize, OSD_OSDWIN1XL);
804 if (lconfig->interlaced) {
805 osd_write(sd, lconfig->ypos >> 1, OSD_OSDWIN1YP);
806 osd_write(sd, lconfig->ysize >> 1, OSD_OSDWIN1YL);
807 } else {
808 osd_write(sd, lconfig->ypos, OSD_OSDWIN1YP);
809 osd_write(sd, lconfig->ysize, OSD_OSDWIN1YL);
810 }
811 break;
812 case WIN_VID1:
813 winmd_mask |= OSD_VIDWINMD_VFF1;
814 if (lconfig->interlaced)
815 winmd |= OSD_VIDWINMD_VFF1;
816
817 osd_modify(sd, winmd_mask, winmd, OSD_VIDWINMD);
818 osd_write(sd, lconfig->line_length >> 5, OSD_VIDWIN1OFST);
819 osd_write(sd, lconfig->xpos, OSD_VIDWIN1XP);
820 osd_write(sd, lconfig->xsize, OSD_VIDWIN1XL);
821 /*
822 * For YUV420P format the register contents are
823 * duplicated in both VID registers
824 */
825 osd_modify(sd, OSD_MISCCTL_S420D, ~OSD_MISCCTL_S420D,
826 OSD_MISCCTL);
827
828 if (lconfig->interlaced) {
829 osd_write(sd, lconfig->ypos >> 1, OSD_VIDWIN1YP);
830 osd_write(sd, lconfig->ysize >> 1, OSD_VIDWIN1YL);
831 } else {
832 osd_write(sd, lconfig->ypos, OSD_VIDWIN1YP);
833 osd_write(sd, lconfig->ysize, OSD_VIDWIN1YL);
834 }
835 break;
836 }
837}
838
839static int osd_set_layer_config(struct osd_state *sd, enum osd_layer layer,
840 struct osd_layer_config *lconfig)
841{
842 struct osd_state *osd = sd;
843 struct osd_window_state *win = &osd->win[layer];
844 struct osd_layer_config *cfg = &win->lconfig;
845 unsigned long flags;
846 int reject_config;
847
848 spin_lock_irqsave(&osd->lock, flags);
849
850 reject_config = try_layer_config(sd, layer, lconfig);
851 if (reject_config) {
852 spin_unlock_irqrestore(&osd->lock, flags);
853 return reject_config;
854 }
855
856 /* update the current Cb/Cr order */
857 if (is_yc_pixfmt(lconfig->pixfmt))
858 osd->yc_pixfmt = lconfig->pixfmt;
859
860 /*
861 * If we are switching OSD1 from normal mode to attribute mode or from
862 * attribute mode to normal mode, then we must disable the window.
863 */
864 if (layer == WIN_OSD1) {
865 if (((lconfig->pixfmt == PIXFMT_OSD_ATTR) &&
866 (cfg->pixfmt != PIXFMT_OSD_ATTR)) ||
867 ((lconfig->pixfmt != PIXFMT_OSD_ATTR) &&
868 (cfg->pixfmt == PIXFMT_OSD_ATTR))) {
869 win->is_enabled = 0;
870 _osd_disable_layer(sd, layer);
871 }
872 }
873
874 _osd_set_layer_config(sd, layer, lconfig);
875
876 if (layer == WIN_OSD1) {
877 struct osd_osdwin_state *osdwin_state =
878 &osd->osdwin[OSDWIN_OSD1];
879
880 if ((lconfig->pixfmt != PIXFMT_OSD_ATTR) &&
881 (cfg->pixfmt == PIXFMT_OSD_ATTR)) {
882 /*
883 * We just switched OSD1 from attribute mode to normal
884 * mode, so we must initialize the CLUT select, the
885 * blend factor, transparency colorkey enable, and
886 * attenuation enable (DM6446 only) bits in the
887 * OSDWIN1MD register.
888 */
889 _osd_set_osd_clut(sd, OSDWIN_OSD1,
890 osdwin_state->clut);
891 _osd_set_blending_factor(sd, OSDWIN_OSD1,
892 osdwin_state->blend);
893 if (osdwin_state->colorkey_blending) {
894 _osd_enable_color_key(sd, OSDWIN_OSD1,
895 osdwin_state->
896 colorkey,
897 lconfig->pixfmt);
898 } else
899 _osd_disable_color_key(sd, OSDWIN_OSD1);
900 _osd_set_rec601_attenuation(sd, OSDWIN_OSD1,
901 osdwin_state->
902 rec601_attenuation);
903 } else if ((lconfig->pixfmt == PIXFMT_OSD_ATTR) &&
904 (cfg->pixfmt != PIXFMT_OSD_ATTR)) {
905 /*
906 * We just switched OSD1 from normal mode to attribute
907 * mode, so we must initialize the blink enable and
908 * blink interval bits in the OSDATRMD register.
909 */
910 _osd_set_blink_attribute(sd, osd->is_blinking,
911 osd->blink);
912 }
913 }
914
915 /*
916 * If we just switched to a 1-, 2-, or 4-bits-per-pixel bitmap format
917 * then configure a default palette map.
918 */
919 if ((lconfig->pixfmt != cfg->pixfmt) &&
920 ((lconfig->pixfmt == PIXFMT_1BPP) ||
921 (lconfig->pixfmt == PIXFMT_2BPP) ||
922 (lconfig->pixfmt == PIXFMT_4BPP))) {
923 enum osd_win_layer osdwin =
924 ((layer == WIN_OSD0) ? OSDWIN_OSD0 : OSDWIN_OSD1);
925 struct osd_osdwin_state *osdwin_state =
926 &osd->osdwin[osdwin];
927 unsigned char clut_index;
928 unsigned char clut_entries = 0;
929
930 switch (lconfig->pixfmt) {
931 case PIXFMT_1BPP:
932 clut_entries = 2;
933 break;
934 case PIXFMT_2BPP:
935 clut_entries = 4;
936 break;
937 case PIXFMT_4BPP:
938 clut_entries = 16;
939 break;
940 default:
941 break;
942 }
943 /*
944 * The default palette map maps the pixel value to the clut
945 * index, i.e. pixel value 0 maps to clut entry 0, pixel value
946 * 1 maps to clut entry 1, etc.
947 */
948 for (clut_index = 0; clut_index < 16; clut_index++) {
949 osdwin_state->palette_map[clut_index] = clut_index;
950 if (clut_index < clut_entries) {
951 _osd_set_palette_map(sd, osdwin, clut_index,
952 clut_index,
953 lconfig->pixfmt);
954 }
955 }
956 }
957
958 *cfg = *lconfig;
959 /* DM6446: configure the RGB888 enable and window selection */
960 if (osd->win[WIN_VID0].lconfig.pixfmt == PIXFMT_RGB888)
961 _osd_enable_vid_rgb888(sd, WIN_VID0);
962 else if (osd->win[WIN_VID1].lconfig.pixfmt == PIXFMT_RGB888)
963 _osd_enable_vid_rgb888(sd, WIN_VID1);
964 else
965 _osd_disable_vid_rgb888(sd);
966
967 if (layer == WIN_VID0) {
968 osd->pingpong =
969 _osd_dm6446_vid0_pingpong(sd, osd->field_inversion,
970 win->fb_base_phys,
971 cfg);
972 }
973
974 spin_unlock_irqrestore(&osd->lock, flags);
975
976 return 0;
977}
978
979static void osd_init_layer(struct osd_state *sd, enum osd_layer layer)
980{
981 struct osd_state *osd = sd;
982 struct osd_window_state *win = &osd->win[layer];
983 enum osd_win_layer osdwin;
984 struct osd_osdwin_state *osdwin_state;
985 struct osd_layer_config *cfg = &win->lconfig;
986 unsigned long flags;
987
988 spin_lock_irqsave(&osd->lock, flags);
989
990 win->is_enabled = 0;
991 _osd_disable_layer(sd, layer);
992
993 win->h_zoom = ZOOM_X1;
994 win->v_zoom = ZOOM_X1;
995 _osd_set_zoom(sd, layer, win->h_zoom, win->v_zoom);
996
997 win->fb_base_phys = 0;
998 _osd_start_layer(sd, layer, win->fb_base_phys, 0);
999
1000 cfg->line_length = 0;
1001 cfg->xsize = 0;
1002 cfg->ysize = 0;
1003 cfg->xpos = 0;
1004 cfg->ypos = 0;
1005 cfg->interlaced = 0;
1006 switch (layer) {
1007 case WIN_OSD0:
1008 case WIN_OSD1:
1009 osdwin = (layer == WIN_OSD0) ? OSDWIN_OSD0 : OSDWIN_OSD1;
1010 osdwin_state = &osd->osdwin[osdwin];
1011 /*
1012 * Other code relies on the fact that OSD windows default to a
1013 * bitmap pixel format when they are deallocated, so don't
1014 * change this default pixel format.
1015 */
1016 cfg->pixfmt = PIXFMT_8BPP;
1017 _osd_set_layer_config(sd, layer, cfg);
1018 osdwin_state->clut = RAM_CLUT;
1019 _osd_set_osd_clut(sd, osdwin, osdwin_state->clut);
1020 osdwin_state->colorkey_blending = 0;
1021 _osd_disable_color_key(sd, osdwin);
1022 osdwin_state->blend = OSD_8_VID_0;
1023 _osd_set_blending_factor(sd, osdwin, osdwin_state->blend);
1024 osdwin_state->rec601_attenuation = 0;
1025 _osd_set_rec601_attenuation(sd, osdwin,
1026 osdwin_state->
1027 rec601_attenuation);
1028 if (osdwin == OSDWIN_OSD1) {
1029 osd->is_blinking = 0;
1030 osd->blink = BLINK_X1;
1031 }
1032 break;
1033 case WIN_VID0:
1034 case WIN_VID1:
1035 cfg->pixfmt = osd->yc_pixfmt;
1036 _osd_set_layer_config(sd, layer, cfg);
1037 break;
1038 }
1039
1040 spin_unlock_irqrestore(&osd->lock, flags);
1041}
1042
1043static void osd_release_layer(struct osd_state *sd, enum osd_layer layer)
1044{
1045 struct osd_state *osd = sd;
1046 struct osd_window_state *win = &osd->win[layer];
1047 unsigned long flags;
1048
1049 spin_lock_irqsave(&osd->lock, flags);
1050
1051 if (!win->is_allocated) {
1052 spin_unlock_irqrestore(&osd->lock, flags);
1053 return;
1054 }
1055
1056 spin_unlock_irqrestore(&osd->lock, flags);
1057 osd_init_layer(sd, layer);
1058 spin_lock_irqsave(&osd->lock, flags);
1059
1060 win->is_allocated = 0;
1061
1062 spin_unlock_irqrestore(&osd->lock, flags);
1063}
1064
1065static int osd_request_layer(struct osd_state *sd, enum osd_layer layer)
1066{
1067 struct osd_state *osd = sd;
1068 struct osd_window_state *win = &osd->win[layer];
1069 unsigned long flags;
1070
1071 spin_lock_irqsave(&osd->lock, flags);
1072
1073 if (win->is_allocated) {
1074 spin_unlock_irqrestore(&osd->lock, flags);
1075 return -1;
1076 }
1077 win->is_allocated = 1;
1078
1079 spin_unlock_irqrestore(&osd->lock, flags);
1080
1081 return 0;
1082}
1083
1084static void _osd_init(struct osd_state *sd)
1085{
1086 osd_write(sd, 0, OSD_MODE);
1087 osd_write(sd, 0, OSD_VIDWINMD);
1088 osd_write(sd, 0, OSD_OSDWIN0MD);
1089 osd_write(sd, 0, OSD_OSDWIN1MD);
1090 osd_write(sd, 0, OSD_RECTCUR);
1091 osd_write(sd, 0, OSD_MISCCTL);
1092}
1093
1094static void osd_set_left_margin(struct osd_state *sd, u32 val)
1095{
1096 osd_write(sd, val, OSD_BASEPX);
1097}
1098
1099static void osd_set_top_margin(struct osd_state *sd, u32 val)
1100{
1101 osd_write(sd, val, OSD_BASEPY);
1102}
1103
1104static int osd_initialize(struct osd_state *osd)
1105{
1106 if (osd == NULL)
1107 return -ENODEV;
1108 _osd_init(osd);
1109
1110 /* set default Cb/Cr order */
1111 osd->yc_pixfmt = PIXFMT_YCbCrI;
1112
1113 _osd_set_field_inversion(osd, osd->field_inversion);
1114 _osd_set_rom_clut(osd, osd->rom_clut);
1115
1116 osd_init_layer(osd, WIN_OSD0);
1117 osd_init_layer(osd, WIN_VID0);
1118 osd_init_layer(osd, WIN_OSD1);
1119 osd_init_layer(osd, WIN_VID1);
1120
1121 return 0;
1122}
1123
1124static const struct vpbe_osd_ops osd_ops = {
1125 .initialize = osd_initialize,
1126 .request_layer = osd_request_layer,
1127 .release_layer = osd_release_layer,
1128 .enable_layer = osd_enable_layer,
1129 .disable_layer = osd_disable_layer,
1130 .set_layer_config = osd_set_layer_config,
1131 .get_layer_config = osd_get_layer_config,
1132 .start_layer = osd_start_layer,
1133 .set_left_margin = osd_set_left_margin,
1134 .set_top_margin = osd_set_top_margin,
1135};
1136
1137static int osd_probe(struct platform_device *pdev)
1138{
1139 struct osd_platform_data *pdata;
1140 struct osd_state *osd;
1141 struct resource *res;
1142 int ret = 0;
1143
1144 osd = kzalloc(sizeof(struct osd_state), GFP_KERNEL);
1145 if (osd == NULL)
1146 return -ENOMEM;
1147
1148 osd->dev = &pdev->dev;
1149 pdata = (struct osd_platform_data *)pdev->dev.platform_data;
1150 osd->vpbe_type = (enum vpbe_version)pdata->vpbe_type;
1151 if (NULL == pdev->dev.platform_data) {
1152 dev_err(osd->dev, "No platform data defined for OSD"
1153 " sub device\n");
1154 ret = -ENOENT;
1155 goto free_mem;
1156 }
1157
1158 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1159 if (!res) {
1160 dev_err(osd->dev, "Unable to get OSD register address map\n");
1161 ret = -ENODEV;
1162 goto free_mem;
1163 }
1164 osd->osd_base_phys = res->start;
1165 osd->osd_size = res->end - res->start + 1;
1166 if (!request_mem_region(osd->osd_base_phys, osd->osd_size,
1167 MODULE_NAME)) {
1168 dev_err(osd->dev, "Unable to reserve OSD MMIO region\n");
1169 ret = -ENODEV;
1170 goto free_mem;
1171 }
1172 osd->osd_base = (unsigned long)ioremap_nocache(res->start,
1173 osd->osd_size);
1174 if (!osd->osd_base) {
1175 dev_err(osd->dev, "Unable to map the OSD region\n");
1176 ret = -ENODEV;
1177 goto release_mem_region;
1178 }
1179 spin_lock_init(&osd->lock);
1180 osd->ops = osd_ops;
1181 platform_set_drvdata(pdev, osd);
1182 dev_notice(osd->dev, "OSD sub device probe success\n");
1183 return ret;
1184
1185release_mem_region:
1186 release_mem_region(osd->osd_base_phys, osd->osd_size);
1187free_mem:
1188 kfree(osd);
1189 return ret;
1190}
1191
1192static int osd_remove(struct platform_device *pdev)
1193{
1194 struct osd_state *osd = platform_get_drvdata(pdev);
1195
1196 iounmap((void *)osd->osd_base);
1197 release_mem_region(osd->osd_base_phys, osd->osd_size);
1198 kfree(osd);
1199 return 0;
1200}
1201
1202static struct platform_driver osd_driver = {
1203 .probe = osd_probe,
1204 .remove = osd_remove,
1205 .driver = {
1206 .name = MODULE_NAME,
1207 .owner = THIS_MODULE,
1208 },
1209};
1210
1211static int osd_init(void)
1212{
1213 if (platform_driver_register(&osd_driver)) {
1214 printk(KERN_ERR "Unable to register davinci osd driver\n");
1215 return -ENODEV;
1216 }
1217
1218 return 0;
1219}
1220
1221static void osd_exit(void)
1222{
1223 platform_driver_unregister(&osd_driver);
1224}
1225
1226module_init(osd_init);
1227module_exit(osd_exit);
1228
1229MODULE_LICENSE("GPL");
1230MODULE_DESCRIPTION("DaVinci OSD Manager Driver");
1231MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/media/video/davinci/vpbe_osd_regs.h b/drivers/media/video/davinci/vpbe_osd_regs.h
new file mode 100644
index 000000000000..584520f3af60
--- /dev/null
+++ b/drivers/media/video/davinci/vpbe_osd_regs.h
@@ -0,0 +1,364 @@
1/*
2 * Copyright (C) 2006-2010 Texas Instruments Inc
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17#ifndef _VPBE_OSD_REGS_H
18#define _VPBE_OSD_REGS_H
19
20/* VPBE Global Registers */
21#define VPBE_PID 0x0
22#define VPBE_PCR 0x4
23
24/* VPSS CLock Registers */
25#define VPSSCLK_PID 0x00
26#define VPSSCLK_CLKCTRL 0x04
27
28/* VPSS Buffer Logic Registers */
29#define VPSSBL_PID 0x00
30#define VPSSBL_PCR 0x04
31#define VPSSBL_BCR 0x08
32#define VPSSBL_INTSTAT 0x0C
33#define VPSSBL_INTSEL 0x10
34#define VPSSBL_EVTSEL 0x14
35#define VPSSBL_MEMCTRL 0x18
36#define VPSSBL_CCDCMUX 0x1C
37
38/* DM365 ISP5 system configuration */
39#define ISP5_PID 0x0
40#define ISP5_PCCR 0x4
41#define ISP5_BCR 0x8
42#define ISP5_INTSTAT 0xC
43#define ISP5_INTSEL1 0x10
44#define ISP5_INTSEL2 0x14
45#define ISP5_INTSEL3 0x18
46#define ISP5_EVTSEL 0x1c
47#define ISP5_CCDCMUX 0x20
48
49/* VPBE On-Screen Display Subsystem Registers (OSD) */
50#define OSD_MODE 0x00
51#define OSD_VIDWINMD 0x04
52#define OSD_OSDWIN0MD 0x08
53#define OSD_OSDWIN1MD 0x0C
54#define OSD_OSDATRMD 0x0C
55#define OSD_RECTCUR 0x10
56#define OSD_VIDWIN0OFST 0x18
57#define OSD_VIDWIN1OFST 0x1C
58#define OSD_OSDWIN0OFST 0x20
59#define OSD_OSDWIN1OFST 0x24
60#define OSD_VIDWINADH 0x28
61#define OSD_VIDWIN0ADL 0x2C
62#define OSD_VIDWIN0ADR 0x2C
63#define OSD_VIDWIN1ADL 0x30
64#define OSD_VIDWIN1ADR 0x30
65#define OSD_OSDWINADH 0x34
66#define OSD_OSDWIN0ADL 0x38
67#define OSD_OSDWIN0ADR 0x38
68#define OSD_OSDWIN1ADL 0x3C
69#define OSD_OSDWIN1ADR 0x3C
70#define OSD_BASEPX 0x40
71#define OSD_BASEPY 0x44
72#define OSD_VIDWIN0XP 0x48
73#define OSD_VIDWIN0YP 0x4C
74#define OSD_VIDWIN0XL 0x50
75#define OSD_VIDWIN0YL 0x54
76#define OSD_VIDWIN1XP 0x58
77#define OSD_VIDWIN1YP 0x5C
78#define OSD_VIDWIN1XL 0x60
79#define OSD_VIDWIN1YL 0x64
80#define OSD_OSDWIN0XP 0x68
81#define OSD_OSDWIN0YP 0x6C
82#define OSD_OSDWIN0XL 0x70
83#define OSD_OSDWIN0YL 0x74
84#define OSD_OSDWIN1XP 0x78
85#define OSD_OSDWIN1YP 0x7C
86#define OSD_OSDWIN1XL 0x80
87#define OSD_OSDWIN1YL 0x84
88#define OSD_CURXP 0x88
89#define OSD_CURYP 0x8C
90#define OSD_CURXL 0x90
91#define OSD_CURYL 0x94
92#define OSD_W0BMP01 0xA0
93#define OSD_W0BMP23 0xA4
94#define OSD_W0BMP45 0xA8
95#define OSD_W0BMP67 0xAC
96#define OSD_W0BMP89 0xB0
97#define OSD_W0BMPAB 0xB4
98#define OSD_W0BMPCD 0xB8
99#define OSD_W0BMPEF 0xBC
100#define OSD_W1BMP01 0xC0
101#define OSD_W1BMP23 0xC4
102#define OSD_W1BMP45 0xC8
103#define OSD_W1BMP67 0xCC
104#define OSD_W1BMP89 0xD0
105#define OSD_W1BMPAB 0xD4
106#define OSD_W1BMPCD 0xD8
107#define OSD_W1BMPEF 0xDC
108#define OSD_VBNDRY 0xE0
109#define OSD_EXTMODE 0xE4
110#define OSD_MISCCTL 0xE8
111#define OSD_CLUTRAMYCB 0xEC
112#define OSD_CLUTRAMCR 0xF0
113#define OSD_TRANSPVAL 0xF4
114#define OSD_TRANSPVALL 0xF4
115#define OSD_TRANSPVALU 0xF8
116#define OSD_TRANSPBMPIDX 0xFC
117#define OSD_PPVWIN0ADR 0xFC
118
119/* bit definitions */
120#define VPBE_PCR_VENC_DIV (1 << 1)
121#define VPBE_PCR_CLK_OFF (1 << 0)
122
123#define VPSSBL_INTSTAT_HSSIINT (1 << 14)
124#define VPSSBL_INTSTAT_CFALDINT (1 << 13)
125#define VPSSBL_INTSTAT_IPIPE_INT5 (1 << 12)
126#define VPSSBL_INTSTAT_IPIPE_INT4 (1 << 11)
127#define VPSSBL_INTSTAT_IPIPE_INT3 (1 << 10)
128#define VPSSBL_INTSTAT_IPIPE_INT2 (1 << 9)
129#define VPSSBL_INTSTAT_IPIPE_INT1 (1 << 8)
130#define VPSSBL_INTSTAT_IPIPE_INT0 (1 << 7)
131#define VPSSBL_INTSTAT_IPIPEIFINT (1 << 6)
132#define VPSSBL_INTSTAT_OSDINT (1 << 5)
133#define VPSSBL_INTSTAT_VENCINT (1 << 4)
134#define VPSSBL_INTSTAT_H3AINT (1 << 3)
135#define VPSSBL_INTSTAT_CCDC_VDINT2 (1 << 2)
136#define VPSSBL_INTSTAT_CCDC_VDINT1 (1 << 1)
137#define VPSSBL_INTSTAT_CCDC_VDINT0 (1 << 0)
138
139/* DM365 ISP5 bit definitions */
140#define ISP5_INTSTAT_VENCINT (1 << 21)
141#define ISP5_INTSTAT_OSDINT (1 << 20)
142
143/* VMOD TVTYP options for HDMD=0 */
144#define SDTV_NTSC 0
145#define SDTV_PAL 1
146/* VMOD TVTYP options for HDMD=1 */
147#define HDTV_525P 0
148#define HDTV_625P 1
149#define HDTV_1080I 2
150#define HDTV_720P 3
151
152#define OSD_MODE_CS (1 << 15)
153#define OSD_MODE_OVRSZ (1 << 14)
154#define OSD_MODE_OHRSZ (1 << 13)
155#define OSD_MODE_EF (1 << 12)
156#define OSD_MODE_VVRSZ (1 << 11)
157#define OSD_MODE_VHRSZ (1 << 10)
158#define OSD_MODE_FSINV (1 << 9)
159#define OSD_MODE_BCLUT (1 << 8)
160#define OSD_MODE_CABG_SHIFT 0
161#define OSD_MODE_CABG (0xff << 0)
162
163#define OSD_VIDWINMD_VFINV (1 << 15)
164#define OSD_VIDWINMD_V1EFC (1 << 14)
165#define OSD_VIDWINMD_VHZ1_SHIFT 12
166#define OSD_VIDWINMD_VHZ1 (3 << 12)
167#define OSD_VIDWINMD_VVZ1_SHIFT 10
168#define OSD_VIDWINMD_VVZ1 (3 << 10)
169#define OSD_VIDWINMD_VFF1 (1 << 9)
170#define OSD_VIDWINMD_ACT1 (1 << 8)
171#define OSD_VIDWINMD_V0EFC (1 << 6)
172#define OSD_VIDWINMD_VHZ0_SHIFT 4
173#define OSD_VIDWINMD_VHZ0 (3 << 4)
174#define OSD_VIDWINMD_VVZ0_SHIFT 2
175#define OSD_VIDWINMD_VVZ0 (3 << 2)
176#define OSD_VIDWINMD_VFF0 (1 << 1)
177#define OSD_VIDWINMD_ACT0 (1 << 0)
178
179#define OSD_OSDWIN0MD_ATN0E (1 << 14)
180#define OSD_OSDWIN0MD_RGB0E (1 << 13)
181#define OSD_OSDWIN0MD_BMP0MD_SHIFT 13
182#define OSD_OSDWIN0MD_BMP0MD (3 << 13)
183#define OSD_OSDWIN0MD_CLUTS0 (1 << 12)
184#define OSD_OSDWIN0MD_OHZ0_SHIFT 10
185#define OSD_OSDWIN0MD_OHZ0 (3 << 10)
186#define OSD_OSDWIN0MD_OVZ0_SHIFT 8
187#define OSD_OSDWIN0MD_OVZ0 (3 << 8)
188#define OSD_OSDWIN0MD_BMW0_SHIFT 6
189#define OSD_OSDWIN0MD_BMW0 (3 << 6)
190#define OSD_OSDWIN0MD_BLND0_SHIFT 3
191#define OSD_OSDWIN0MD_BLND0 (7 << 3)
192#define OSD_OSDWIN0MD_TE0 (1 << 2)
193#define OSD_OSDWIN0MD_OFF0 (1 << 1)
194#define OSD_OSDWIN0MD_OACT0 (1 << 0)
195
196#define OSD_OSDWIN1MD_OASW (1 << 15)
197#define OSD_OSDWIN1MD_ATN1E (1 << 14)
198#define OSD_OSDWIN1MD_RGB1E (1 << 13)
199#define OSD_OSDWIN1MD_BMP1MD_SHIFT 13
200#define OSD_OSDWIN1MD_BMP1MD (3 << 13)
201#define OSD_OSDWIN1MD_CLUTS1 (1 << 12)
202#define OSD_OSDWIN1MD_OHZ1_SHIFT 10
203#define OSD_OSDWIN1MD_OHZ1 (3 << 10)
204#define OSD_OSDWIN1MD_OVZ1_SHIFT 8
205#define OSD_OSDWIN1MD_OVZ1 (3 << 8)
206#define OSD_OSDWIN1MD_BMW1_SHIFT 6
207#define OSD_OSDWIN1MD_BMW1 (3 << 6)
208#define OSD_OSDWIN1MD_BLND1_SHIFT 3
209#define OSD_OSDWIN1MD_BLND1 (7 << 3)
210#define OSD_OSDWIN1MD_TE1 (1 << 2)
211#define OSD_OSDWIN1MD_OFF1 (1 << 1)
212#define OSD_OSDWIN1MD_OACT1 (1 << 0)
213
214#define OSD_OSDATRMD_OASW (1 << 15)
215#define OSD_OSDATRMD_OHZA_SHIFT 10
216#define OSD_OSDATRMD_OHZA (3 << 10)
217#define OSD_OSDATRMD_OVZA_SHIFT 8
218#define OSD_OSDATRMD_OVZA (3 << 8)
219#define OSD_OSDATRMD_BLNKINT_SHIFT 6
220#define OSD_OSDATRMD_BLNKINT (3 << 6)
221#define OSD_OSDATRMD_OFFA (1 << 1)
222#define OSD_OSDATRMD_BLNK (1 << 0)
223
224#define OSD_RECTCUR_RCAD_SHIFT 8
225#define OSD_RECTCUR_RCAD (0xff << 8)
226#define OSD_RECTCUR_CLUTSR (1 << 7)
227#define OSD_RECTCUR_RCHW_SHIFT 4
228#define OSD_RECTCUR_RCHW (7 << 4)
229#define OSD_RECTCUR_RCVW_SHIFT 1
230#define OSD_RECTCUR_RCVW (7 << 1)
231#define OSD_RECTCUR_RCACT (1 << 0)
232
233#define OSD_VIDWIN0OFST_V0LO (0x1ff << 0)
234
235#define OSD_VIDWIN1OFST_V1LO (0x1ff << 0)
236
237#define OSD_OSDWIN0OFST_O0LO (0x1ff << 0)
238
239#define OSD_OSDWIN1OFST_O1LO (0x1ff << 0)
240
241#define OSD_WINOFST_AH_SHIFT 9
242
243#define OSD_VIDWIN0OFST_V0AH (0xf << 9)
244#define OSD_VIDWIN1OFST_V1AH (0xf << 9)
245#define OSD_OSDWIN0OFST_O0AH (0xf << 9)
246#define OSD_OSDWIN1OFST_O1AH (0xf << 9)
247
248#define OSD_VIDWINADH_V1AH_SHIFT 8
249#define OSD_VIDWINADH_V1AH (0x7f << 8)
250#define OSD_VIDWINADH_V0AH_SHIFT 0
251#define OSD_VIDWINADH_V0AH (0x7f << 0)
252
253#define OSD_VIDWIN0ADL_V0AL (0xffff << 0)
254
255#define OSD_VIDWIN1ADL_V1AL (0xffff << 0)
256
257#define OSD_OSDWINADH_O1AH_SHIFT 8
258#define OSD_OSDWINADH_O1AH (0x7f << 8)
259#define OSD_OSDWINADH_O0AH_SHIFT 0
260#define OSD_OSDWINADH_O0AH (0x7f << 0)
261
262#define OSD_OSDWIN0ADL_O0AL (0xffff << 0)
263
264#define OSD_OSDWIN1ADL_O1AL (0xffff << 0)
265
266#define OSD_BASEPX_BPX (0x3ff << 0)
267
268#define OSD_BASEPY_BPY (0x1ff << 0)
269
270#define OSD_VIDWIN0XP_V0X (0x7ff << 0)
271
272#define OSD_VIDWIN0YP_V0Y (0x7ff << 0)
273
274#define OSD_VIDWIN0XL_V0W (0x7ff << 0)
275
276#define OSD_VIDWIN0YL_V0H (0x7ff << 0)
277
278#define OSD_VIDWIN1XP_V1X (0x7ff << 0)
279
280#define OSD_VIDWIN1YP_V1Y (0x7ff << 0)
281
282#define OSD_VIDWIN1XL_V1W (0x7ff << 0)
283
284#define OSD_VIDWIN1YL_V1H (0x7ff << 0)
285
286#define OSD_OSDWIN0XP_W0X (0x7ff << 0)
287
288#define OSD_OSDWIN0YP_W0Y (0x7ff << 0)
289
290#define OSD_OSDWIN0XL_W0W (0x7ff << 0)
291
292#define OSD_OSDWIN0YL_W0H (0x7ff << 0)
293
294#define OSD_OSDWIN1XP_W1X (0x7ff << 0)
295
296#define OSD_OSDWIN1YP_W1Y (0x7ff << 0)
297
298#define OSD_OSDWIN1XL_W1W (0x7ff << 0)
299
300#define OSD_OSDWIN1YL_W1H (0x7ff << 0)
301
302#define OSD_CURXP_RCSX (0x7ff << 0)
303
304#define OSD_CURYP_RCSY (0x7ff << 0)
305
306#define OSD_CURXL_RCSW (0x7ff << 0)
307
308#define OSD_CURYL_RCSH (0x7ff << 0)
309
310#define OSD_EXTMODE_EXPMDSEL (1 << 15)
311#define OSD_EXTMODE_SCRNHEXP_SHIFT 13
312#define OSD_EXTMODE_SCRNHEXP (3 << 13)
313#define OSD_EXTMODE_SCRNVEXP (1 << 12)
314#define OSD_EXTMODE_OSD1BLDCHR (1 << 11)
315#define OSD_EXTMODE_OSD0BLDCHR (1 << 10)
316#define OSD_EXTMODE_ATNOSD1EN (1 << 9)
317#define OSD_EXTMODE_ATNOSD0EN (1 << 8)
318#define OSD_EXTMODE_OSDHRSZ15 (1 << 7)
319#define OSD_EXTMODE_VIDHRSZ15 (1 << 6)
320#define OSD_EXTMODE_ZMFILV1HEN (1 << 5)
321#define OSD_EXTMODE_ZMFILV1VEN (1 << 4)
322#define OSD_EXTMODE_ZMFILV0HEN (1 << 3)
323#define OSD_EXTMODE_ZMFILV0VEN (1 << 2)
324#define OSD_EXTMODE_EXPFILHEN (1 << 1)
325#define OSD_EXTMODE_EXPFILVEN (1 << 0)
326
327#define OSD_MISCCTL_BLDSEL (1 << 15)
328#define OSD_MISCCTL_S420D (1 << 14)
329#define OSD_MISCCTL_BMAPT (1 << 13)
330#define OSD_MISCCTL_DM365M (1 << 12)
331#define OSD_MISCCTL_RGBEN (1 << 7)
332#define OSD_MISCCTL_RGBWIN (1 << 6)
333#define OSD_MISCCTL_DMANG (1 << 6)
334#define OSD_MISCCTL_TMON (1 << 5)
335#define OSD_MISCCTL_RSEL (1 << 4)
336#define OSD_MISCCTL_CPBSY (1 << 3)
337#define OSD_MISCCTL_PPSW (1 << 2)
338#define OSD_MISCCTL_PPRV (1 << 1)
339
340#define OSD_CLUTRAMYCB_Y_SHIFT 8
341#define OSD_CLUTRAMYCB_Y (0xff << 8)
342#define OSD_CLUTRAMYCB_CB_SHIFT 0
343#define OSD_CLUTRAMYCB_CB (0xff << 0)
344
345#define OSD_CLUTRAMCR_CR_SHIFT 8
346#define OSD_CLUTRAMCR_CR (0xff << 8)
347#define OSD_CLUTRAMCR_CADDR_SHIFT 0
348#define OSD_CLUTRAMCR_CADDR (0xff << 0)
349
350#define OSD_TRANSPVAL_RGBTRANS (0xffff << 0)
351
352#define OSD_TRANSPVALL_RGBL (0xffff << 0)
353
354#define OSD_TRANSPVALU_Y_SHIFT 8
355#define OSD_TRANSPVALU_Y (0xff << 8)
356#define OSD_TRANSPVALU_RGBU_SHIFT 0
357#define OSD_TRANSPVALU_RGBU (0xff << 0)
358
359#define OSD_TRANSPBMPIDX_BMP1_SHIFT 8
360#define OSD_TRANSPBMPIDX_BMP1 (0xff << 8)
361#define OSD_TRANSPBMPIDX_BMP0_SHIFT 0
362#define OSD_TRANSPBMPIDX_BMP0 0xff
363
364#endif /* _DAVINCI_VPBE_H_ */
diff --git a/drivers/media/video/davinci/vpbe_venc.c b/drivers/media/video/davinci/vpbe_venc.c
new file mode 100644
index 000000000000..03a3e5c65ee7
--- /dev/null
+++ b/drivers/media/video/davinci/vpbe_venc.c
@@ -0,0 +1,566 @@
1/*
2 * Copyright (C) 2010 Texas Instruments Inc
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/ctype.h>
21#include <linux/delay.h>
22#include <linux/device.h>
23#include <linux/interrupt.h>
24#include <linux/platform_device.h>
25#include <linux/videodev2.h>
26#include <linux/slab.h>
27
28#include <mach/hardware.h>
29#include <mach/mux.h>
30#include <mach/io.h>
31#include <mach/i2c.h>
32
33#include <linux/io.h>
34
35#include <media/davinci/vpbe_types.h>
36#include <media/davinci/vpbe_venc.h>
37#include <media/davinci/vpss.h>
38#include <media/v4l2-device.h>
39
40#include "vpbe_venc_regs.h"
41
42#define MODULE_NAME VPBE_VENC_SUBDEV_NAME
43
44static int debug = 2;
45module_param(debug, int, 0644);
46MODULE_PARM_DESC(debug, "Debug level 0-2");
47
48struct venc_state {
49 struct v4l2_subdev sd;
50 struct venc_callback *callback;
51 struct venc_platform_data *pdata;
52 struct device *pdev;
53 u32 output;
54 v4l2_std_id std;
55 spinlock_t lock;
56 void __iomem *venc_base;
57 void __iomem *vdaccfg_reg;
58};
59
60static inline struct venc_state *to_state(struct v4l2_subdev *sd)
61{
62 return container_of(sd, struct venc_state, sd);
63}
64
65static inline u32 venc_read(struct v4l2_subdev *sd, u32 offset)
66{
67 struct venc_state *venc = to_state(sd);
68
69 return readl(venc->venc_base + offset);
70}
71
72static inline u32 venc_write(struct v4l2_subdev *sd, u32 offset, u32 val)
73{
74 struct venc_state *venc = to_state(sd);
75
76 writel(val, (venc->venc_base + offset));
77
78 return val;
79}
80
81static inline u32 venc_modify(struct v4l2_subdev *sd, u32 offset,
82 u32 val, u32 mask)
83{
84 u32 new_val = (venc_read(sd, offset) & ~mask) | (val & mask);
85
86 venc_write(sd, offset, new_val);
87
88 return new_val;
89}
90
91static inline u32 vdaccfg_write(struct v4l2_subdev *sd, u32 val)
92{
93 struct venc_state *venc = to_state(sd);
94
95 writel(val, venc->vdaccfg_reg);
96
97 val = readl(venc->vdaccfg_reg);
98
99 return val;
100}
101
102/* This function sets the dac of the VPBE for various outputs
103 */
104static int venc_set_dac(struct v4l2_subdev *sd, u32 out_index)
105{
106 switch (out_index) {
107 case 0:
108 v4l2_dbg(debug, 1, sd, "Setting output to Composite\n");
109 venc_write(sd, VENC_DACSEL, 0);
110 break;
111 case 1:
112 v4l2_dbg(debug, 1, sd, "Setting output to S-Video\n");
113 venc_write(sd, VENC_DACSEL, 0x210);
114 break;
115 case 2:
116 venc_write(sd, VENC_DACSEL, 0x543);
117 break;
118 default:
119 return -EINVAL;
120 }
121
122 return 0;
123}
124
125static void venc_enabledigitaloutput(struct v4l2_subdev *sd, int benable)
126{
127 v4l2_dbg(debug, 2, sd, "venc_enabledigitaloutput\n");
128
129 if (benable) {
130 venc_write(sd, VENC_VMOD, 0);
131 venc_write(sd, VENC_CVBS, 0);
132 venc_write(sd, VENC_LCDOUT, 0);
133 venc_write(sd, VENC_HSPLS, 0);
134 venc_write(sd, VENC_HSTART, 0);
135 venc_write(sd, VENC_HVALID, 0);
136 venc_write(sd, VENC_HINT, 0);
137 venc_write(sd, VENC_VSPLS, 0);
138 venc_write(sd, VENC_VSTART, 0);
139 venc_write(sd, VENC_VVALID, 0);
140 venc_write(sd, VENC_VINT, 0);
141 venc_write(sd, VENC_YCCCTL, 0);
142 venc_write(sd, VENC_DACSEL, 0);
143
144 } else {
145 venc_write(sd, VENC_VMOD, 0);
146 /* disable VCLK output pin enable */
147 venc_write(sd, VENC_VIDCTL, 0x141);
148
149 /* Disable output sync pins */
150 venc_write(sd, VENC_SYNCCTL, 0);
151
152 /* Disable DCLOCK */
153 venc_write(sd, VENC_DCLKCTL, 0);
154 venc_write(sd, VENC_DRGBX1, 0x0000057C);
155
156 /* Disable LCD output control (accepting default polarity) */
157 venc_write(sd, VENC_LCDOUT, 0);
158 venc_write(sd, VENC_CMPNT, 0x100);
159 venc_write(sd, VENC_HSPLS, 0);
160 venc_write(sd, VENC_HINT, 0);
161 venc_write(sd, VENC_HSTART, 0);
162 venc_write(sd, VENC_HVALID, 0);
163
164 venc_write(sd, VENC_VSPLS, 0);
165 venc_write(sd, VENC_VINT, 0);
166 venc_write(sd, VENC_VSTART, 0);
167 venc_write(sd, VENC_VVALID, 0);
168
169 venc_write(sd, VENC_HSDLY, 0);
170 venc_write(sd, VENC_VSDLY, 0);
171
172 venc_write(sd, VENC_YCCCTL, 0);
173 venc_write(sd, VENC_VSTARTA, 0);
174
175 /* Set OSD clock and OSD Sync Adavance registers */
176 venc_write(sd, VENC_OSDCLK0, 1);
177 venc_write(sd, VENC_OSDCLK1, 2);
178 }
179}
180
181/*
182 * setting NTSC mode
183 */
184static int venc_set_ntsc(struct v4l2_subdev *sd)
185{
186 struct venc_state *venc = to_state(sd);
187 struct venc_platform_data *pdata = venc->pdata;
188
189 v4l2_dbg(debug, 2, sd, "venc_set_ntsc\n");
190
191 /* Setup clock at VPSS & VENC for SD */
192 vpss_enable_clock(VPSS_VENC_CLOCK_SEL, 1);
193 if (pdata->setup_clock(VPBE_ENC_STD, V4L2_STD_525_60) < 0)
194 return -EINVAL;
195
196 venc_enabledigitaloutput(sd, 0);
197
198 /* to set VENC CLK DIV to 1 - final clock is 54 MHz */
199 venc_modify(sd, VENC_VIDCTL, 0, 1 << 1);
200 /* Set REC656 Mode */
201 venc_write(sd, VENC_YCCCTL, 0x1);
202 venc_modify(sd, VENC_VDPRO, 0, VENC_VDPRO_DAFRQ);
203 venc_modify(sd, VENC_VDPRO, 0, VENC_VDPRO_DAUPS);
204
205 venc_write(sd, VENC_VMOD, 0);
206 venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
207 VENC_VMOD_VIE);
208 venc_modify(sd, VENC_VMOD, (0 << VENC_VMOD_VMD), VENC_VMOD_VMD);
209 venc_modify(sd, VENC_VMOD, (0 << VENC_VMOD_TVTYP_SHIFT),
210 VENC_VMOD_TVTYP);
211 venc_write(sd, VENC_DACTST, 0x0);
212 venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
213
214 return 0;
215}
216
217/*
218 * setting PAL mode
219 */
220static int venc_set_pal(struct v4l2_subdev *sd)
221{
222 struct venc_state *venc = to_state(sd);
223
224 v4l2_dbg(debug, 2, sd, "venc_set_pal\n");
225
226 /* Setup clock at VPSS & VENC for SD */
227 vpss_enable_clock(VPSS_VENC_CLOCK_SEL, 1);
228 if (venc->pdata->setup_clock(VPBE_ENC_STD, V4L2_STD_625_50) < 0)
229 return -EINVAL;
230
231 venc_enabledigitaloutput(sd, 0);
232
233 /* to set VENC CLK DIV to 1 - final clock is 54 MHz */
234 venc_modify(sd, VENC_VIDCTL, 0, 1 << 1);
235 /* Set REC656 Mode */
236 venc_write(sd, VENC_YCCCTL, 0x1);
237
238 venc_modify(sd, VENC_SYNCCTL, 1 << VENC_SYNCCTL_OVD_SHIFT,
239 VENC_SYNCCTL_OVD);
240 venc_write(sd, VENC_VMOD, 0);
241 venc_modify(sd, VENC_VMOD,
242 (1 << VENC_VMOD_VIE_SHIFT),
243 VENC_VMOD_VIE);
244 venc_modify(sd, VENC_VMOD,
245 (0 << VENC_VMOD_VMD), VENC_VMOD_VMD);
246 venc_modify(sd, VENC_VMOD,
247 (1 << VENC_VMOD_TVTYP_SHIFT),
248 VENC_VMOD_TVTYP);
249 venc_write(sd, VENC_DACTST, 0x0);
250 venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
251
252 return 0;
253}
254
255/*
256 * venc_set_480p59_94
257 *
258 * This function configures the video encoder to EDTV(525p) component setting.
259 */
260static int venc_set_480p59_94(struct v4l2_subdev *sd)
261{
262 struct venc_state *venc = to_state(sd);
263 struct venc_platform_data *pdata = venc->pdata;
264
265 v4l2_dbg(debug, 2, sd, "venc_set_480p59_94\n");
266
267 /* Setup clock at VPSS & VENC for SD */
268 if (pdata->setup_clock(VPBE_ENC_DV_PRESET, V4L2_DV_480P59_94) < 0)
269 return -EINVAL;
270
271 venc_enabledigitaloutput(sd, 0);
272
273 venc_write(sd, VENC_OSDCLK0, 0);
274 venc_write(sd, VENC_OSDCLK1, 1);
275 venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAFRQ,
276 VENC_VDPRO_DAFRQ);
277 venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAUPS,
278 VENC_VDPRO_DAUPS);
279 venc_write(sd, VENC_VMOD, 0);
280 venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
281 VENC_VMOD_VIE);
282 venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD);
283 venc_modify(sd, VENC_VMOD, (HDTV_525P << VENC_VMOD_TVTYP_SHIFT),
284 VENC_VMOD_TVTYP);
285 venc_modify(sd, VENC_VMOD, VENC_VMOD_VDMD_YCBCR8 <<
286 VENC_VMOD_VDMD_SHIFT, VENC_VMOD_VDMD);
287
288 venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
289
290 return 0;
291}
292
293/*
294 * venc_set_625p
295 *
296 * This function configures the video encoder to HDTV(625p) component setting
297 */
298static int venc_set_576p50(struct v4l2_subdev *sd)
299{
300 struct venc_state *venc = to_state(sd);
301 struct venc_platform_data *pdata = venc->pdata;
302
303 v4l2_dbg(debug, 2, sd, "venc_set_576p50\n");
304
305 /* Setup clock at VPSS & VENC for SD */
306 if (pdata->setup_clock(VPBE_ENC_DV_PRESET, V4L2_DV_576P50) < 0)
307 return -EINVAL;
308
309 venc_enabledigitaloutput(sd, 0);
310
311 venc_write(sd, VENC_OSDCLK0, 0);
312 venc_write(sd, VENC_OSDCLK1, 1);
313
314 venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAFRQ,
315 VENC_VDPRO_DAFRQ);
316 venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAUPS,
317 VENC_VDPRO_DAUPS);
318
319 venc_write(sd, VENC_VMOD, 0);
320 venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
321 VENC_VMOD_VIE);
322 venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD);
323 venc_modify(sd, VENC_VMOD, (HDTV_625P << VENC_VMOD_TVTYP_SHIFT),
324 VENC_VMOD_TVTYP);
325
326 venc_modify(sd, VENC_VMOD, VENC_VMOD_VDMD_YCBCR8 <<
327 VENC_VMOD_VDMD_SHIFT, VENC_VMOD_VDMD);
328 venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
329
330 return 0;
331}
332
333static int venc_s_std_output(struct v4l2_subdev *sd, v4l2_std_id norm)
334{
335 v4l2_dbg(debug, 1, sd, "venc_s_std_output\n");
336
337 if (norm & V4L2_STD_525_60)
338 return venc_set_ntsc(sd);
339 else if (norm & V4L2_STD_625_50)
340 return venc_set_pal(sd);
341
342 return -EINVAL;
343}
344
345static int venc_s_dv_preset(struct v4l2_subdev *sd,
346 struct v4l2_dv_preset *dv_preset)
347{
348 v4l2_dbg(debug, 1, sd, "venc_s_dv_preset\n");
349
350 if (dv_preset->preset == V4L2_DV_576P50)
351 return venc_set_576p50(sd);
352 else if (dv_preset->preset == V4L2_DV_480P59_94)
353 return venc_set_480p59_94(sd);
354
355 return -EINVAL;
356}
357
358static int venc_s_routing(struct v4l2_subdev *sd, u32 input, u32 output,
359 u32 config)
360{
361 struct venc_state *venc = to_state(sd);
362 int ret;
363
364 v4l2_dbg(debug, 1, sd, "venc_s_routing\n");
365
366 ret = venc_set_dac(sd, output);
367 if (!ret)
368 venc->output = output;
369
370 return ret;
371}
372
373static long venc_ioctl(struct v4l2_subdev *sd,
374 unsigned int cmd,
375 void *arg)
376{
377 u32 val;
378
379 switch (cmd) {
380 case VENC_GET_FLD:
381 val = venc_read(sd, VENC_VSTAT);
382 *((int *)arg) = ((val & VENC_VSTAT_FIDST) ==
383 VENC_VSTAT_FIDST);
384 break;
385 default:
386 v4l2_err(sd, "Wrong IOCTL cmd\n");
387 break;
388 }
389
390 return 0;
391}
392
393static const struct v4l2_subdev_core_ops venc_core_ops = {
394 .ioctl = venc_ioctl,
395};
396
397static const struct v4l2_subdev_video_ops venc_video_ops = {
398 .s_routing = venc_s_routing,
399 .s_std_output = venc_s_std_output,
400 .s_dv_preset = venc_s_dv_preset,
401};
402
403static const struct v4l2_subdev_ops venc_ops = {
404 .core = &venc_core_ops,
405 .video = &venc_video_ops,
406};
407
408static int venc_initialize(struct v4l2_subdev *sd)
409{
410 struct venc_state *venc = to_state(sd);
411 int ret;
412
413 /* Set default to output to composite and std to NTSC */
414 venc->output = 0;
415 venc->std = V4L2_STD_525_60;
416
417 ret = venc_s_routing(sd, 0, venc->output, 0);
418 if (ret < 0) {
419 v4l2_err(sd, "Error setting output during init\n");
420 return -EINVAL;
421 }
422
423 ret = venc_s_std_output(sd, venc->std);
424 if (ret < 0) {
425 v4l2_err(sd, "Error setting std during init\n");
426 return -EINVAL;
427 }
428
429 return ret;
430}
431
432static int venc_device_get(struct device *dev, void *data)
433{
434 struct platform_device *pdev = to_platform_device(dev);
435 struct venc_state **venc = data;
436
437 if (strcmp(MODULE_NAME, pdev->name) == 0)
438 *venc = platform_get_drvdata(pdev);
439
440 return 0;
441}
442
443struct v4l2_subdev *venc_sub_dev_init(struct v4l2_device *v4l2_dev,
444 const char *venc_name)
445{
446 struct venc_state *venc;
447 int err;
448
449 err = bus_for_each_dev(&platform_bus_type, NULL, &venc,
450 venc_device_get);
451 if (venc == NULL)
452 return NULL;
453
454 v4l2_subdev_init(&venc->sd, &venc_ops);
455
456 strcpy(venc->sd.name, venc_name);
457 if (v4l2_device_register_subdev(v4l2_dev, &venc->sd) < 0) {
458 v4l2_err(v4l2_dev,
459 "vpbe unable to register venc sub device\n");
460 return NULL;
461 }
462 if (venc_initialize(&venc->sd)) {
463 v4l2_err(v4l2_dev,
464 "vpbe venc initialization failed\n");
465 return NULL;
466 }
467
468 return &venc->sd;
469}
470EXPORT_SYMBOL(venc_sub_dev_init);
471
472static int venc_probe(struct platform_device *pdev)
473{
474 struct venc_state *venc;
475 struct resource *res;
476 int ret;
477
478 venc = kzalloc(sizeof(struct venc_state), GFP_KERNEL);
479 if (venc == NULL)
480 return -ENOMEM;
481
482 venc->pdev = &pdev->dev;
483 venc->pdata = pdev->dev.platform_data;
484 if (NULL == venc->pdata) {
485 dev_err(venc->pdev, "Unable to get platform data for"
486 " VENC sub device");
487 ret = -ENOENT;
488 goto free_mem;
489 }
490 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
491 if (!res) {
492 dev_err(venc->pdev,
493 "Unable to get VENC register address map\n");
494 ret = -ENODEV;
495 goto free_mem;
496 }
497
498 if (!request_mem_region(res->start, resource_size(res), "venc")) {
499 dev_err(venc->pdev, "Unable to reserve VENC MMIO region\n");
500 ret = -ENODEV;
501 goto free_mem;
502 }
503
504 venc->venc_base = ioremap_nocache(res->start, resource_size(res));
505 if (!venc->venc_base) {
506 dev_err(venc->pdev, "Unable to map VENC IO space\n");
507 ret = -ENODEV;
508 goto release_venc_mem_region;
509 }
510
511 spin_lock_init(&venc->lock);
512 platform_set_drvdata(pdev, venc);
513 dev_notice(venc->pdev, "VENC sub device probe success\n");
514 return 0;
515
516release_venc_mem_region:
517 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
518 release_mem_region(res->start, resource_size(res));
519free_mem:
520 kfree(venc);
521 return ret;
522}
523
524static int venc_remove(struct platform_device *pdev)
525{
526 struct venc_state *venc = platform_get_drvdata(pdev);
527 struct resource *res;
528
529 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
530 iounmap((void *)venc->venc_base);
531 release_mem_region(res->start, resource_size(res));
532 kfree(venc);
533
534 return 0;
535}
536
537static struct platform_driver venc_driver = {
538 .probe = venc_probe,
539 .remove = venc_remove,
540 .driver = {
541 .name = MODULE_NAME,
542 .owner = THIS_MODULE,
543 },
544};
545
546static int venc_init(void)
547{
548 if (platform_driver_register(&venc_driver)) {
549 printk(KERN_ERR "Unable to register venc driver\n");
550 return -ENODEV;
551 }
552 return 0;
553}
554
555static void venc_exit(void)
556{
557 platform_driver_unregister(&venc_driver);
558 return;
559}
560
561module_init(venc_init);
562module_exit(venc_exit);
563
564MODULE_LICENSE("GPL");
565MODULE_DESCRIPTION("VPBE VENC Driver");
566MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/media/video/davinci/vpbe_venc_regs.h b/drivers/media/video/davinci/vpbe_venc_regs.h
new file mode 100644
index 000000000000..947cb1510776
--- /dev/null
+++ b/drivers/media/video/davinci/vpbe_venc_regs.h
@@ -0,0 +1,177 @@
1/*
2 * Copyright (C) 2006-2010 Texas Instruments Inc
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation version 2..
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17#ifndef _VPBE_VENC_REGS_H
18#define _VPBE_VENC_REGS_H
19
20/* VPBE Video Encoder / Digital LCD Subsystem Registers (VENC) */
21#define VENC_VMOD 0x00
22#define VENC_VIDCTL 0x04
23#define VENC_VDPRO 0x08
24#define VENC_SYNCCTL 0x0C
25#define VENC_HSPLS 0x10
26#define VENC_VSPLS 0x14
27#define VENC_HINT 0x18
28#define VENC_HSTART 0x1C
29#define VENC_HVALID 0x20
30#define VENC_VINT 0x24
31#define VENC_VSTART 0x28
32#define VENC_VVALID 0x2C
33#define VENC_HSDLY 0x30
34#define VENC_VSDLY 0x34
35#define VENC_YCCCTL 0x38
36#define VENC_RGBCTL 0x3C
37#define VENC_RGBCLP 0x40
38#define VENC_LINECTL 0x44
39#define VENC_CULLLINE 0x48
40#define VENC_LCDOUT 0x4C
41#define VENC_BRTS 0x50
42#define VENC_BRTW 0x54
43#define VENC_ACCTL 0x58
44#define VENC_PWMP 0x5C
45#define VENC_PWMW 0x60
46#define VENC_DCLKCTL 0x64
47#define VENC_DCLKPTN0 0x68
48#define VENC_DCLKPTN1 0x6C
49#define VENC_DCLKPTN2 0x70
50#define VENC_DCLKPTN3 0x74
51#define VENC_DCLKPTN0A 0x78
52#define VENC_DCLKPTN1A 0x7C
53#define VENC_DCLKPTN2A 0x80
54#define VENC_DCLKPTN3A 0x84
55#define VENC_DCLKHS 0x88
56#define VENC_DCLKHSA 0x8C
57#define VENC_DCLKHR 0x90
58#define VENC_DCLKVS 0x94
59#define VENC_DCLKVR 0x98
60#define VENC_CAPCTL 0x9C
61#define VENC_CAPDO 0xA0
62#define VENC_CAPDE 0xA4
63#define VENC_ATR0 0xA8
64#define VENC_ATR1 0xAC
65#define VENC_ATR2 0xB0
66#define VENC_VSTAT 0xB8
67#define VENC_RAMADR 0xBC
68#define VENC_RAMPORT 0xC0
69#define VENC_DACTST 0xC4
70#define VENC_YCOLVL 0xC8
71#define VENC_SCPROG 0xCC
72#define VENC_CVBS 0xDC
73#define VENC_CMPNT 0xE0
74#define VENC_ETMG0 0xE4
75#define VENC_ETMG1 0xE8
76#define VENC_ETMG2 0xEC
77#define VENC_ETMG3 0xF0
78#define VENC_DACSEL 0xF4
79#define VENC_ARGBX0 0x100
80#define VENC_ARGBX1 0x104
81#define VENC_ARGBX2 0x108
82#define VENC_ARGBX3 0x10C
83#define VENC_ARGBX4 0x110
84#define VENC_DRGBX0 0x114
85#define VENC_DRGBX1 0x118
86#define VENC_DRGBX2 0x11C
87#define VENC_DRGBX3 0x120
88#define VENC_DRGBX4 0x124
89#define VENC_VSTARTA 0x128
90#define VENC_OSDCLK0 0x12C
91#define VENC_OSDCLK1 0x130
92#define VENC_HVLDCL0 0x134
93#define VENC_HVLDCL1 0x138
94#define VENC_OSDHADV 0x13C
95#define VENC_CLKCTL 0x140
96#define VENC_GAMCTL 0x144
97#define VENC_XHINTVL 0x174
98
99/* bit definitions */
100#define VPBE_PCR_VENC_DIV (1 << 1)
101#define VPBE_PCR_CLK_OFF (1 << 0)
102
103#define VENC_VMOD_VDMD_SHIFT 12
104#define VENC_VMOD_VDMD_YCBCR16 0
105#define VENC_VMOD_VDMD_YCBCR8 1
106#define VENC_VMOD_VDMD_RGB666 2
107#define VENC_VMOD_VDMD_RGB8 3
108#define VENC_VMOD_VDMD_EPSON 4
109#define VENC_VMOD_VDMD_CASIO 5
110#define VENC_VMOD_VDMD_UDISPQVGA 6
111#define VENC_VMOD_VDMD_STNLCD 7
112#define VENC_VMOD_VIE_SHIFT 1
113#define VENC_VMOD_VDMD (7 << 12)
114#define VENC_VMOD_ITLCL (1 << 11)
115#define VENC_VMOD_ITLC (1 << 10)
116#define VENC_VMOD_NSIT (1 << 9)
117#define VENC_VMOD_HDMD (1 << 8)
118#define VENC_VMOD_TVTYP_SHIFT 6
119#define VENC_VMOD_TVTYP (3 << 6)
120#define VENC_VMOD_SLAVE (1 << 5)
121#define VENC_VMOD_VMD (1 << 4)
122#define VENC_VMOD_BLNK (1 << 3)
123#define VENC_VMOD_VIE (1 << 1)
124#define VENC_VMOD_VENC (1 << 0)
125
126/* VMOD TVTYP options for HDMD=0 */
127#define SDTV_NTSC 0
128#define SDTV_PAL 1
129/* VMOD TVTYP options for HDMD=1 */
130#define HDTV_525P 0
131#define HDTV_625P 1
132#define HDTV_1080I 2
133#define HDTV_720P 3
134
135#define VENC_VIDCTL_VCLKP (1 << 14)
136#define VENC_VIDCTL_VCLKE_SHIFT 13
137#define VENC_VIDCTL_VCLKE (1 << 13)
138#define VENC_VIDCTL_VCLKZ_SHIFT 12
139#define VENC_VIDCTL_VCLKZ (1 << 12)
140#define VENC_VIDCTL_SYDIR_SHIFT 8
141#define VENC_VIDCTL_SYDIR (1 << 8)
142#define VENC_VIDCTL_DOMD_SHIFT 4
143#define VENC_VIDCTL_DOMD (3 << 4)
144#define VENC_VIDCTL_YCDIR_SHIFT 0
145#define VENC_VIDCTL_YCDIR (1 << 0)
146
147#define VENC_VDPRO_ATYCC_SHIFT 5
148#define VENC_VDPRO_ATYCC (1 << 5)
149#define VENC_VDPRO_ATCOM_SHIFT 4
150#define VENC_VDPRO_ATCOM (1 << 4)
151#define VENC_VDPRO_DAFRQ (1 << 3)
152#define VENC_VDPRO_DAUPS (1 << 2)
153#define VENC_VDPRO_CUPS (1 << 1)
154#define VENC_VDPRO_YUPS (1 << 0)
155
156#define VENC_SYNCCTL_VPL_SHIFT 3
157#define VENC_SYNCCTL_VPL (1 << 3)
158#define VENC_SYNCCTL_HPL_SHIFT 2
159#define VENC_SYNCCTL_HPL (1 << 2)
160#define VENC_SYNCCTL_SYEV_SHIFT 1
161#define VENC_SYNCCTL_SYEV (1 << 1)
162#define VENC_SYNCCTL_SYEH_SHIFT 0
163#define VENC_SYNCCTL_SYEH (1 << 0)
164#define VENC_SYNCCTL_OVD_SHIFT 14
165#define VENC_SYNCCTL_OVD (1 << 14)
166
167#define VENC_DCLKCTL_DCKEC_SHIFT 11
168#define VENC_DCLKCTL_DCKEC (1 << 11)
169#define VENC_DCLKCTL_DCKPW_SHIFT 0
170#define VENC_DCLKCTL_DCKPW (0x3f << 0)
171
172#define VENC_VSTAT_FIDST (1 << 4)
173
174#define VENC_CMPNT_MRGB_SHIFT 14
175#define VENC_CMPNT_MRGB (1 << 14)
176
177#endif /* _VPBE_VENC_REGS_H */
diff --git a/drivers/media/video/davinci/vpif_capture.c b/drivers/media/video/davinci/vpif_capture.c
index d93ad74a34c5..49e4deb50043 100644
--- a/drivers/media/video/davinci/vpif_capture.c
+++ b/drivers/media/video/davinci/vpif_capture.c
@@ -33,7 +33,6 @@
33#include <linux/i2c.h> 33#include <linux/i2c.h>
34#include <linux/platform_device.h> 34#include <linux/platform_device.h>
35#include <linux/io.h> 35#include <linux/io.h>
36#include <linux/version.h>
37#include <linux/slab.h> 36#include <linux/slab.h>
38#include <media/v4l2-device.h> 37#include <media/v4l2-device.h>
39#include <media/v4l2-ioctl.h> 38#include <media/v4l2-ioctl.h>
@@ -44,6 +43,7 @@
44 43
45MODULE_DESCRIPTION("TI DaVinci VPIF Capture driver"); 44MODULE_DESCRIPTION("TI DaVinci VPIF Capture driver");
46MODULE_LICENSE("GPL"); 45MODULE_LICENSE("GPL");
46MODULE_VERSION(VPIF_CAPTURE_VERSION);
47 47
48#define vpif_err(fmt, arg...) v4l2_err(&vpif_obj.v4l2_dev, fmt, ## arg) 48#define vpif_err(fmt, arg...) v4l2_err(&vpif_obj.v4l2_dev, fmt, ## arg)
49#define vpif_dbg(level, debug, fmt, arg...) \ 49#define vpif_dbg(level, debug, fmt, arg...) \
@@ -1677,7 +1677,6 @@ static int vpif_querycap(struct file *file, void *priv,
1677{ 1677{
1678 struct vpif_capture_config *config = vpif_dev->platform_data; 1678 struct vpif_capture_config *config = vpif_dev->platform_data;
1679 1679
1680 cap->version = VPIF_CAPTURE_VERSION_CODE;
1681 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 1680 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
1682 strlcpy(cap->driver, "vpif capture", sizeof(cap->driver)); 1681 strlcpy(cap->driver, "vpif capture", sizeof(cap->driver));
1683 strlcpy(cap->bus_info, "DM646x Platform", sizeof(cap->bus_info)); 1682 strlcpy(cap->bus_info, "DM646x Platform", sizeof(cap->bus_info));
@@ -2211,10 +2210,8 @@ static __init int vpif_probe(struct platform_device *pdev)
2211 vfd->v4l2_dev = &vpif_obj.v4l2_dev; 2210 vfd->v4l2_dev = &vpif_obj.v4l2_dev;
2212 vfd->release = video_device_release; 2211 vfd->release = video_device_release;
2213 snprintf(vfd->name, sizeof(vfd->name), 2212 snprintf(vfd->name, sizeof(vfd->name),
2214 "DM646x_VPIFCapture_DRIVER_V%d.%d.%d", 2213 "DM646x_VPIFCapture_DRIVER_V%s",
2215 (VPIF_CAPTURE_VERSION_CODE >> 16) & 0xff, 2214 VPIF_CAPTURE_VERSION);
2216 (VPIF_CAPTURE_VERSION_CODE >> 8) & 0xff,
2217 (VPIF_CAPTURE_VERSION_CODE) & 0xff);
2218 /* Set video_dev to the video device */ 2215 /* Set video_dev to the video device */
2219 ch->video_dev = vfd; 2216 ch->video_dev = vfd;
2220 } 2217 }
diff --git a/drivers/media/video/davinci/vpif_capture.h b/drivers/media/video/davinci/vpif_capture.h
index 7a4196dfdce1..064550f5ce4a 100644
--- a/drivers/media/video/davinci/vpif_capture.h
+++ b/drivers/media/video/davinci/vpif_capture.h
@@ -23,7 +23,6 @@
23 23
24/* Header files */ 24/* Header files */
25#include <linux/videodev2.h> 25#include <linux/videodev2.h>
26#include <linux/version.h>
27#include <media/v4l2-common.h> 26#include <media/v4l2-common.h>
28#include <media/v4l2-device.h> 27#include <media/v4l2-device.h>
29#include <media/videobuf-core.h> 28#include <media/videobuf-core.h>
@@ -33,11 +32,7 @@
33#include "vpif.h" 32#include "vpif.h"
34 33
35/* Macros */ 34/* Macros */
36#define VPIF_MAJOR_RELEASE 0 35#define VPIF_CAPTURE_VERSION "0.0.2"
37#define VPIF_MINOR_RELEASE 0
38#define VPIF_BUILD 1
39#define VPIF_CAPTURE_VERSION_CODE ((VPIF_MAJOR_RELEASE << 16) | \
40 (VPIF_MINOR_RELEASE << 8) | VPIF_BUILD)
41 36
42#define VPIF_VALID_FIELD(field) (((V4L2_FIELD_ANY == field) || \ 37#define VPIF_VALID_FIELD(field) (((V4L2_FIELD_ANY == field) || \
43 (V4L2_FIELD_NONE == field)) || \ 38 (V4L2_FIELD_NONE == field)) || \
diff --git a/drivers/media/video/davinci/vpif_display.c b/drivers/media/video/davinci/vpif_display.c
index cdf659abdc2a..286f02910044 100644
--- a/drivers/media/video/davinci/vpif_display.c
+++ b/drivers/media/video/davinci/vpif_display.c
@@ -29,7 +29,6 @@
29#include <linux/i2c.h> 29#include <linux/i2c.h>
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/version.h>
33#include <linux/slab.h> 32#include <linux/slab.h>
34 33
35#include <asm/irq.h> 34#include <asm/irq.h>
@@ -47,6 +46,7 @@
47 46
48MODULE_DESCRIPTION("TI DaVinci VPIF Display driver"); 47MODULE_DESCRIPTION("TI DaVinci VPIF Display driver");
49MODULE_LICENSE("GPL"); 48MODULE_LICENSE("GPL");
49MODULE_VERSION(VPIF_DISPLAY_VERSION);
50 50
51#define DM646X_V4L2_STD (V4L2_STD_525_60 | V4L2_STD_625_50) 51#define DM646X_V4L2_STD (V4L2_STD_525_60 | V4L2_STD_625_50)
52 52
@@ -701,7 +701,6 @@ static int vpif_querycap(struct file *file, void *priv,
701{ 701{
702 struct vpif_display_config *config = vpif_dev->platform_data; 702 struct vpif_display_config *config = vpif_dev->platform_data;
703 703
704 cap->version = VPIF_DISPLAY_VERSION_CODE;
705 cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; 704 cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
706 strlcpy(cap->driver, "vpif display", sizeof(cap->driver)); 705 strlcpy(cap->driver, "vpif display", sizeof(cap->driver));
707 strlcpy(cap->bus_info, "Platform", sizeof(cap->bus_info)); 706 strlcpy(cap->bus_info, "Platform", sizeof(cap->bus_info));
@@ -1740,10 +1739,8 @@ static __init int vpif_probe(struct platform_device *pdev)
1740 vfd->v4l2_dev = &vpif_obj.v4l2_dev; 1739 vfd->v4l2_dev = &vpif_obj.v4l2_dev;
1741 vfd->release = video_device_release; 1740 vfd->release = video_device_release;
1742 snprintf(vfd->name, sizeof(vfd->name), 1741 snprintf(vfd->name, sizeof(vfd->name),
1743 "DM646x_VPIFDisplay_DRIVER_V%d.%d.%d", 1742 "DM646x_VPIFDisplay_DRIVER_V%s",
1744 (VPIF_DISPLAY_VERSION_CODE >> 16) & 0xff, 1743 VPIF_DISPLAY_VERSION);
1745 (VPIF_DISPLAY_VERSION_CODE >> 8) & 0xff,
1746 (VPIF_DISPLAY_VERSION_CODE) & 0xff);
1747 1744
1748 /* Set video_dev to the video device */ 1745 /* Set video_dev to the video device */
1749 ch->video_dev = vfd; 1746 ch->video_dev = vfd;
diff --git a/drivers/media/video/davinci/vpif_display.h b/drivers/media/video/davinci/vpif_display.h
index b53aaa883075..5d1936dafed2 100644
--- a/drivers/media/video/davinci/vpif_display.h
+++ b/drivers/media/video/davinci/vpif_display.h
@@ -18,7 +18,6 @@
18 18
19/* Header files */ 19/* Header files */
20#include <linux/videodev2.h> 20#include <linux/videodev2.h>
21#include <linux/version.h>
22#include <media/v4l2-common.h> 21#include <media/v4l2-common.h>
23#include <media/v4l2-device.h> 22#include <media/v4l2-device.h>
24#include <media/videobuf-core.h> 23#include <media/videobuf-core.h>
@@ -27,12 +26,7 @@
27#include "vpif.h" 26#include "vpif.h"
28 27
29/* Macros */ 28/* Macros */
30#define VPIF_MAJOR_RELEASE (0) 29#define VPIF_DISPLAY_VERSION "0.0.2"
31#define VPIF_MINOR_RELEASE (0)
32#define VPIF_BUILD (1)
33
34#define VPIF_DISPLAY_VERSION_CODE \
35 ((VPIF_MAJOR_RELEASE << 16) | (VPIF_MINOR_RELEASE << 8) | VPIF_BUILD)
36 30
37#define VPIF_VALID_FIELD(field) \ 31#define VPIF_VALID_FIELD(field) \
38 (((V4L2_FIELD_ANY == field) || (V4L2_FIELD_NONE == field)) || \ 32 (((V4L2_FIELD_ANY == field) || (V4L2_FIELD_NONE == field)) || \
diff --git a/drivers/media/video/em28xx/Kconfig b/drivers/media/video/em28xx/Kconfig
index 3cb78f26df90..281ee427c2ab 100644
--- a/drivers/media/video/em28xx/Kconfig
+++ b/drivers/media/video/em28xx/Kconfig
@@ -3,7 +3,6 @@ config VIDEO_EM28XX
3 depends on VIDEO_DEV && I2C 3 depends on VIDEO_DEV && I2C
4 select VIDEO_TUNER 4 select VIDEO_TUNER
5 select VIDEO_TVEEPROM 5 select VIDEO_TVEEPROM
6 depends on RC_CORE
7 select VIDEOBUF_VMALLOC 6 select VIDEOBUF_VMALLOC
8 select VIDEO_SAA711X if VIDEO_HELPER_CHIPS_AUTO 7 select VIDEO_SAA711X if VIDEO_HELPER_CHIPS_AUTO
9 select VIDEO_TVP5150 if VIDEO_HELPER_CHIPS_AUTO 8 select VIDEO_TVP5150 if VIDEO_HELPER_CHIPS_AUTO
@@ -40,7 +39,18 @@ config VIDEO_EM28XX_DVB
40 select DVB_S921 if !DVB_FE_CUSTOMISE 39 select DVB_S921 if !DVB_FE_CUSTOMISE
41 select DVB_DRXD if !DVB_FE_CUSTOMISE 40 select DVB_DRXD if !DVB_FE_CUSTOMISE
42 select DVB_CXD2820R if !DVB_FE_CUSTOMISE 41 select DVB_CXD2820R if !DVB_FE_CUSTOMISE
42 select DVB_DRXK if !DVB_FE_CUSTOMISE
43 select DVB_TDA18271C2DD if !DVB_FE_CUSTOMISE
43 select VIDEOBUF_DVB 44 select VIDEOBUF_DVB
44 ---help--- 45 ---help---
45 This adds support for DVB cards based on the 46 This adds support for DVB cards based on the
46 Empiatech em28xx chips. 47 Empiatech em28xx chips.
48
49config VIDEO_EM28XX_RC
50 bool "EM28XX Remote Controller support"
51 depends on RC_CORE
52 depends on VIDEO_EM28XX
53 depends on !(RC_CORE=m && VIDEO_EM28XX=y)
54 default y
55 ---help---
56 Enables Remote Controller support on em28xx driver.
diff --git a/drivers/media/video/em28xx/Makefile b/drivers/media/video/em28xx/Makefile
index d0f093d1d0df..38aaa004f57d 100644
--- a/drivers/media/video/em28xx/Makefile
+++ b/drivers/media/video/em28xx/Makefile
@@ -1,5 +1,7 @@
1em28xx-objs := em28xx-video.o em28xx-i2c.o em28xx-cards.o em28xx-core.o \ 1em28xx-y := em28xx-video.o em28xx-i2c.o em28xx-cards.o
2 em28xx-input.o em28xx-vbi.o 2em28xx-y += em28xx-core.o em28xx-vbi.o
3
4em28xx-$(CONFIG_VIDEO_EM28XX_RC) += em28xx-input.o
3 5
4em28xx-alsa-objs := em28xx-audio.o 6em28xx-alsa-objs := em28xx-audio.o
5 7
diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
index 3c48a72eb7de..cff0768afbf5 100644
--- a/drivers/media/video/em28xx/em28xx-audio.c
+++ b/drivers/media/video/em28xx/em28xx-audio.c
@@ -3,9 +3,9 @@
3 * 3 *
4 * Copyright (C) 2006 Markus Rechberger <mrechberger@gmail.com> 4 * Copyright (C) 2006 Markus Rechberger <mrechberger@gmail.com>
5 * 5 *
6 * Copyright (C) 2007 Mauro Carvalho Chehab <mchehab@infradead.org> 6 * Copyright (C) 2007-2011 Mauro Carvalho Chehab <mchehab@redhat.com>
7 * - Port to work with the in-kernel driver 7 * - Port to work with the in-kernel driver
8 * - Several cleanups 8 * - Cleanups, fixes, alsa-controls, etc.
9 * 9 *
10 * This driver is based on my previous au600 usb pstn audio driver 10 * This driver is based on my previous au600 usb pstn audio driver
11 * and inherits all the copyrights 11 * and inherits all the copyrights
@@ -41,6 +41,7 @@
41#include <sound/info.h> 41#include <sound/info.h>
42#include <sound/initval.h> 42#include <sound/initval.h>
43#include <sound/control.h> 43#include <sound/control.h>
44#include <sound/tlv.h>
44#include <media/v4l2-common.h> 45#include <media/v4l2-common.h>
45#include "em28xx.h" 46#include "em28xx.h"
46 47
@@ -212,9 +213,12 @@ static int em28xx_init_audio_isoc(struct em28xx *dev)
212 for (i = 0; i < EM28XX_AUDIO_BUFS; i++) { 213 for (i = 0; i < EM28XX_AUDIO_BUFS; i++) {
213 errCode = usb_submit_urb(dev->adev.urb[i], GFP_ATOMIC); 214 errCode = usb_submit_urb(dev->adev.urb[i], GFP_ATOMIC);
214 if (errCode) { 215 if (errCode) {
216 em28xx_errdev("submit of audio urb failed\n");
215 em28xx_deinit_isoc_audio(dev); 217 em28xx_deinit_isoc_audio(dev);
218 atomic_set(&dev->stream_started, 0);
216 return errCode; 219 return errCode;
217 } 220 }
221
218 } 222 }
219 223
220 return 0; 224 return 0;
@@ -245,6 +249,7 @@ static struct snd_pcm_hardware snd_em28xx_hw_capture = {
245 .info = SNDRV_PCM_INFO_BLOCK_TRANSFER | 249 .info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
246 SNDRV_PCM_INFO_MMAP | 250 SNDRV_PCM_INFO_MMAP |
247 SNDRV_PCM_INFO_INTERLEAVED | 251 SNDRV_PCM_INFO_INTERLEAVED |
252 SNDRV_PCM_INFO_BATCH |
248 SNDRV_PCM_INFO_MMAP_VALID, 253 SNDRV_PCM_INFO_MMAP_VALID,
249 254
250 .formats = SNDRV_PCM_FMTBIT_S16_LE, 255 .formats = SNDRV_PCM_FMTBIT_S16_LE,
@@ -276,24 +281,27 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
276 return -ENODEV; 281 return -ENODEV;
277 } 282 }
278 283
279 /* Sets volume, mute, etc */ 284 runtime->hw = snd_em28xx_hw_capture;
285 if ((dev->alt == 0 || dev->audio_ifnum) && dev->adev.users == 0) {
286 if (dev->audio_ifnum)
287 dev->alt = 1;
288 else
289 dev->alt = 7;
280 290
281 dev->mute = 0; 291 dprintk("changing alternate number on interface %d to %d\n",
282 mutex_lock(&dev->lock); 292 dev->audio_ifnum, dev->alt);
283 ret = em28xx_audio_analog_set(dev); 293 usb_set_interface(dev->udev, dev->audio_ifnum, dev->alt);
284 if (ret < 0)
285 goto err;
286 294
287 runtime->hw = snd_em28xx_hw_capture; 295 /* Sets volume, mute, etc */
288 if (dev->alt == 0 && dev->adev.users == 0) { 296 dev->mute = 0;
289 int errCode; 297 mutex_lock(&dev->lock);
290 dev->alt = 7; 298 ret = em28xx_audio_analog_set(dev);
291 dprintk("changing alternate number to 7\n"); 299 if (ret < 0)
292 errCode = usb_set_interface(dev->udev, 0, 7); 300 goto err;
293 }
294 301
295 dev->adev.users++; 302 dev->adev.users++;
296 mutex_unlock(&dev->lock); 303 mutex_unlock(&dev->lock);
304 }
297 305
298 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); 306 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
299 dev->adev.capture_pcm_substream = substream; 307 dev->adev.capture_pcm_substream = substream;
@@ -342,6 +350,8 @@ static int snd_em28xx_hw_capture_params(struct snd_pcm_substream *substream,
342 350
343 ret = snd_pcm_alloc_vmalloc_buffer(substream, 351 ret = snd_pcm_alloc_vmalloc_buffer(substream,
344 params_buffer_bytes(hw_params)); 352 params_buffer_bytes(hw_params));
353 if (ret < 0)
354 return ret;
345 format = params_format(hw_params); 355 format = params_format(hw_params);
346 rate = params_rate(hw_params); 356 rate = params_rate(hw_params);
347 channels = params_channels(hw_params); 357 channels = params_channels(hw_params);
@@ -393,20 +403,24 @@ static int snd_em28xx_capture_trigger(struct snd_pcm_substream *substream,
393 int cmd) 403 int cmd)
394{ 404{
395 struct em28xx *dev = snd_pcm_substream_chip(substream); 405 struct em28xx *dev = snd_pcm_substream_chip(substream);
396 int retval; 406 int retval = 0;
397 407
398 switch (cmd) { 408 switch (cmd) {
409 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */
410 case SNDRV_PCM_TRIGGER_RESUME: /* fall through */
399 case SNDRV_PCM_TRIGGER_START: 411 case SNDRV_PCM_TRIGGER_START:
400 atomic_set(&dev->stream_started, 1); 412 atomic_set(&dev->stream_started, 1);
401 break; 413 break;
414 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: /* fall through */
415 case SNDRV_PCM_TRIGGER_SUSPEND: /* fall through */
402 case SNDRV_PCM_TRIGGER_STOP: 416 case SNDRV_PCM_TRIGGER_STOP:
403 atomic_set(&dev->stream_started, 1); 417 atomic_set(&dev->stream_started, 0);
404 break; 418 break;
405 default: 419 default:
406 retval = -EINVAL; 420 retval = -EINVAL;
407 } 421 }
408 schedule_work(&dev->wq_trigger); 422 schedule_work(&dev->wq_trigger);
409 return 0; 423 return retval;
410} 424}
411 425
412static snd_pcm_uframes_t snd_em28xx_capture_pointer(struct snd_pcm_substream 426static snd_pcm_uframes_t snd_em28xx_capture_pointer(struct snd_pcm_substream
@@ -432,6 +446,179 @@ static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
432 return vmalloc_to_page(pageptr); 446 return vmalloc_to_page(pageptr);
433} 447}
434 448
449/*
450 * AC97 volume control support
451 */
452static int em28xx_vol_info(struct snd_kcontrol *kcontrol,
453 struct snd_ctl_elem_info *info)
454{
455 info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
456 info->count = 2;
457 info->value.integer.min = 0;
458 info->value.integer.max = 0x1f;
459
460 return 0;
461}
462
463static int em28xx_vol_put(struct snd_kcontrol *kcontrol,
464 struct snd_ctl_elem_value *value)
465{
466 struct em28xx *dev = snd_kcontrol_chip(kcontrol);
467 u16 val = (0x1f - (value->value.integer.value[0] & 0x1f)) |
468 (0x1f - (value->value.integer.value[1] & 0x1f)) << 8;
469 int rc;
470
471 mutex_lock(&dev->lock);
472 rc = em28xx_read_ac97(dev, kcontrol->private_value);
473 if (rc < 0)
474 goto err;
475
476 val |= rc & 0x8000; /* Preserve the mute flag */
477
478 rc = em28xx_write_ac97(dev, kcontrol->private_value, val);
479 if (rc < 0)
480 goto err;
481
482 dprintk("%sleft vol %d, right vol %d (0x%04x) to ac97 volume control 0x%04x\n",
483 (val & 0x8000) ? "muted " : "",
484 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f),
485 val, (int)kcontrol->private_value);
486
487err:
488 mutex_unlock(&dev->lock);
489 return rc;
490}
491
492static int em28xx_vol_get(struct snd_kcontrol *kcontrol,
493 struct snd_ctl_elem_value *value)
494{
495 struct em28xx *dev = snd_kcontrol_chip(kcontrol);
496 int val;
497
498 mutex_lock(&dev->lock);
499 val = em28xx_read_ac97(dev, kcontrol->private_value);
500 mutex_unlock(&dev->lock);
501 if (val < 0)
502 return val;
503
504 dprintk("%sleft vol %d, right vol %d (0x%04x) from ac97 volume control 0x%04x\n",
505 (val & 0x8000) ? "muted " : "",
506 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f),
507 val, (int)kcontrol->private_value);
508
509 value->value.integer.value[0] = 0x1f - (val & 0x1f);
510 value->value.integer.value[1] = 0x1f - ((val << 8) & 0x1f);
511
512 return 0;
513}
514
515static int em28xx_vol_put_mute(struct snd_kcontrol *kcontrol,
516 struct snd_ctl_elem_value *value)
517{
518 struct em28xx *dev = snd_kcontrol_chip(kcontrol);
519 u16 val = value->value.integer.value[0];
520 int rc;
521
522 mutex_lock(&dev->lock);
523 rc = em28xx_read_ac97(dev, kcontrol->private_value);
524 if (rc < 0)
525 goto err;
526
527 if (val)
528 rc &= 0x1f1f;
529 else
530 rc |= 0x8000;
531
532 rc = em28xx_write_ac97(dev, kcontrol->private_value, rc);
533 if (rc < 0)
534 goto err;
535
536 dprintk("%sleft vol %d, right vol %d (0x%04x) to ac97 volume control 0x%04x\n",
537 (val & 0x8000) ? "muted " : "",
538 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f),
539 val, (int)kcontrol->private_value);
540
541err:
542 mutex_unlock(&dev->lock);
543 return rc;
544}
545
546static int em28xx_vol_get_mute(struct snd_kcontrol *kcontrol,
547 struct snd_ctl_elem_value *value)
548{
549 struct em28xx *dev = snd_kcontrol_chip(kcontrol);
550 int val;
551
552 mutex_lock(&dev->lock);
553 val = em28xx_read_ac97(dev, kcontrol->private_value);
554 mutex_unlock(&dev->lock);
555 if (val < 0)
556 return val;
557
558 if (val & 0x8000)
559 value->value.integer.value[0] = 0;
560 else
561 value->value.integer.value[0] = 1;
562
563 dprintk("%sleft vol %d, right vol %d (0x%04x) from ac97 volume control 0x%04x\n",
564 (val & 0x8000) ? "muted " : "",
565 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f),
566 val, (int)kcontrol->private_value);
567
568 return 0;
569}
570
571static const DECLARE_TLV_DB_SCALE(em28xx_db_scale, -3450, 150, 0);
572
573static int em28xx_cvol_new(struct snd_card *card, struct em28xx *dev,
574 char *name, int id)
575{
576 int err;
577 char ctl_name[44];
578 struct snd_kcontrol *kctl;
579 struct snd_kcontrol_new tmp;
580
581 memset (&tmp, 0, sizeof(tmp));
582 tmp.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
583 tmp.private_value = id,
584 tmp.name = ctl_name,
585
586 /* Add Mute Control */
587 sprintf(ctl_name, "%s Switch", name);
588 tmp.get = em28xx_vol_get_mute;
589 tmp.put = em28xx_vol_put_mute;
590 tmp.info = snd_ctl_boolean_mono_info;
591 kctl = snd_ctl_new1(&tmp, dev);
592 err = snd_ctl_add(card, kctl);
593 if (err < 0)
594 return err;
595 dprintk("Added control %s for ac97 volume control 0x%04x\n",
596 ctl_name, id);
597
598 memset (&tmp, 0, sizeof(tmp));
599 tmp.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
600 tmp.private_value = id,
601 tmp.name = ctl_name,
602
603 /* Add Volume Control */
604 sprintf(ctl_name, "%s Volume", name);
605 tmp.get = em28xx_vol_get;
606 tmp.put = em28xx_vol_put;
607 tmp.info = em28xx_vol_info;
608 tmp.tlv.p = em28xx_db_scale,
609 kctl = snd_ctl_new1(&tmp, dev);
610 err = snd_ctl_add(card, kctl);
611 if (err < 0)
612 return err;
613 dprintk("Added control %s for ac97 volume control 0x%04x\n",
614 ctl_name, id);
615
616 return 0;
617}
618
619/*
620 * register/unregister code and data
621 */
435static struct snd_pcm_ops snd_em28xx_pcm_capture = { 622static struct snd_pcm_ops snd_em28xx_pcm_capture = {
436 .open = snd_em28xx_capture_open, 623 .open = snd_em28xx_capture_open,
437 .close = snd_em28xx_pcm_close, 624 .close = snd_em28xx_pcm_close,
@@ -452,17 +639,17 @@ static int em28xx_audio_init(struct em28xx *dev)
452 static int devnr; 639 static int devnr;
453 int err; 640 int err;
454 641
455 if (dev->has_alsa_audio != 1) { 642 if (!dev->has_alsa_audio || dev->audio_ifnum < 0) {
456 /* This device does not support the extension (in this case 643 /* This device does not support the extension (in this case
457 the device is expecting the snd-usb-audio module or 644 the device is expecting the snd-usb-audio module or
458 doesn't have analog audio support at all) */ 645 doesn't have analog audio support at all) */
459 return 0; 646 return 0;
460 } 647 }
461 648
462 printk(KERN_INFO "em28xx-audio.c: probing for em28x1 " 649 printk(KERN_INFO "em28xx-audio.c: probing for em28xx Audio Vendor Class\n");
463 "non standard usbaudio\n");
464 printk(KERN_INFO "em28xx-audio.c: Copyright (C) 2006 Markus " 650 printk(KERN_INFO "em28xx-audio.c: Copyright (C) 2006 Markus "
465 "Rechberger\n"); 651 "Rechberger\n");
652 printk(KERN_INFO "em28xx-audio.c: Copyright (C) 2007-2011 Mauro Carvalho Chehab\n");
466 653
467 err = snd_card_create(index[devnr], "Em28xx Audio", THIS_MODULE, 0, 654 err = snd_card_create(index[devnr], "Em28xx Audio", THIS_MODULE, 0,
468 &card); 655 &card);
@@ -488,6 +675,22 @@ static int em28xx_audio_init(struct em28xx *dev)
488 675
489 INIT_WORK(&dev->wq_trigger, audio_trigger); 676 INIT_WORK(&dev->wq_trigger, audio_trigger);
490 677
678 if (dev->audio_mode.ac97 != EM28XX_NO_AC97) {
679 em28xx_cvol_new(card, dev, "Video", AC97_VIDEO_VOL);
680 em28xx_cvol_new(card, dev, "Line In", AC97_LINEIN_VOL);
681 em28xx_cvol_new(card, dev, "Phone", AC97_PHONE_VOL);
682 em28xx_cvol_new(card, dev, "Microphone", AC97_PHONE_VOL);
683 em28xx_cvol_new(card, dev, "CD", AC97_CD_VOL);
684 em28xx_cvol_new(card, dev, "AUX", AC97_AUX_VOL);
685 em28xx_cvol_new(card, dev, "PCM", AC97_PCM_OUT_VOL);
686
687 em28xx_cvol_new(card, dev, "Master", AC97_MASTER_VOL);
688 em28xx_cvol_new(card, dev, "Line", AC97_LINE_LEVEL_VOL);
689 em28xx_cvol_new(card, dev, "Mono", AC97_MASTER_MONO_VOL);
690 em28xx_cvol_new(card, dev, "LFE", AC97_LFE_MASTER_VOL);
691 em28xx_cvol_new(card, dev, "Surround", AC97_SURR_MASTER_VOL);
692 }
693
491 err = snd_card_register(card); 694 err = snd_card_register(card);
492 if (err < 0) { 695 if (err < 0) {
493 snd_card_free(card); 696 snd_card_free(card);
@@ -538,7 +741,7 @@ static void __exit em28xx_alsa_unregister(void)
538 741
539MODULE_LICENSE("GPL"); 742MODULE_LICENSE("GPL");
540MODULE_AUTHOR("Markus Rechberger <mrechberger@gmail.com>"); 743MODULE_AUTHOR("Markus Rechberger <mrechberger@gmail.com>");
541MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); 744MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
542MODULE_DESCRIPTION("Em28xx Audio driver"); 745MODULE_DESCRIPTION("Em28xx Audio driver");
543 746
544module_init(em28xx_alsa_register); 747module_init(em28xx_alsa_register);
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 4e37375decf5..3e3959fee419 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -289,7 +289,7 @@ static struct em28xx_reg_seq leadership_reset[] = {
289 { -1, -1, -1, -1}, 289 { -1, -1, -1, -1},
290}; 290};
291 291
292/* 2013:024f PCTV Systems nanoStick T2 290e 292/* 2013:024f PCTV nanoStick T2 290e
293 * GPIO_6 - demod reset 293 * GPIO_6 - demod reset
294 * GPIO_7 - LED 294 * GPIO_7 - LED
295 */ 295 */
@@ -300,6 +300,23 @@ static struct em28xx_reg_seq pctv_290e[] = {
300 {-1, -1, -1, -1}, 300 {-1, -1, -1, -1},
301}; 301};
302 302
303#if 0
304static struct em28xx_reg_seq terratec_h5_gpio[] = {
305 {EM28XX_R08_GPIO, 0xff, 0xff, 10},
306 {EM2874_R80_GPIO, 0xf6, 0xff, 100},
307 {EM2874_R80_GPIO, 0xf2, 0xff, 50},
308 {EM2874_R80_GPIO, 0xf6, 0xff, 50},
309 { -1, -1, -1, -1},
310};
311
312static struct em28xx_reg_seq terratec_h5_digital[] = {
313 {EM2874_R80_GPIO, 0xf6, 0xff, 10},
314 {EM2874_R80_GPIO, 0xe6, 0xff, 100},
315 {EM2874_R80_GPIO, 0xa6, 0xff, 10},
316 { -1, -1, -1, -1},
317};
318#endif
319
303/* 320/*
304 * Board definitions 321 * Board definitions
305 */ 322 */
@@ -843,6 +860,19 @@ struct em28xx_board em28xx_boards[] = {
843 .gpio = terratec_cinergy_USB_XS_FR_analog, 860 .gpio = terratec_cinergy_USB_XS_FR_analog,
844 } }, 861 } },
845 }, 862 },
863 [EM2884_BOARD_TERRATEC_H5] = {
864 .name = "Terratec Cinergy H5",
865 .has_dvb = 1,
866#if 0
867 .tuner_type = TUNER_PHILIPS_TDA8290,
868 .tuner_addr = 0x41,
869 .dvb_gpio = terratec_h5_digital, /* FIXME: probably wrong */
870 .tuner_gpio = terratec_h5_gpio,
871#endif
872 .i2c_speed = EM2874_I2C_SECONDARY_BUS_SELECT |
873 EM28XX_I2C_CLK_WAIT_ENABLE |
874 EM28XX_I2C_FREQ_400_KHZ,
875 },
846 [EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900] = { 876 [EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900] = {
847 .name = "Hauppauge WinTV HVR 900", 877 .name = "Hauppauge WinTV HVR 900",
848 .tda9887_conf = TDA9887_PRESENT, 878 .tda9887_conf = TDA9887_PRESENT,
@@ -1259,7 +1289,7 @@ struct em28xx_board em28xx_boards[] = {
1259 } }, 1289 } },
1260 }, 1290 },
1261 1291
1262 [EM2874_LEADERSHIP_ISDBT] = { 1292 [EM2874_BOARD_LEADERSHIP_ISDBT] = {
1263 .i2c_speed = EM2874_I2C_SECONDARY_BUS_SELECT | 1293 .i2c_speed = EM2874_I2C_SECONDARY_BUS_SELECT |
1264 EM28XX_I2C_CLK_WAIT_ENABLE | 1294 EM28XX_I2C_CLK_WAIT_ENABLE |
1265 EM28XX_I2C_FREQ_100_KHZ, 1295 EM28XX_I2C_FREQ_100_KHZ,
@@ -1319,7 +1349,6 @@ struct em28xx_board em28xx_boards[] = {
1319 }, 1349 },
1320 [EM2880_BOARD_KWORLD_DVB_305U] = { 1350 [EM2880_BOARD_KWORLD_DVB_305U] = {
1321 .name = "KWorld DVB-T 305U", 1351 .name = "KWorld DVB-T 305U",
1322 .valid = EM28XX_BOARD_NOT_VALIDATED,
1323 .tuner_type = TUNER_XC2028, 1352 .tuner_type = TUNER_XC2028,
1324 .tuner_gpio = default_tuner_gpio, 1353 .tuner_gpio = default_tuner_gpio,
1325 .decoder = EM28XX_TVP5150, 1354 .decoder = EM28XX_TVP5150,
@@ -1770,16 +1799,16 @@ struct em28xx_board em28xx_boards[] = {
1770 .dvb_gpio = kworld_a340_digital, 1799 .dvb_gpio = kworld_a340_digital,
1771 .tuner_gpio = default_tuner_gpio, 1800 .tuner_gpio = default_tuner_gpio,
1772 }, 1801 },
1773 /* 2013:024f PCTV Systems nanoStick T2 290e. 1802 /* 2013:024f PCTV nanoStick T2 290e.
1774 * Empia EM28174, Sony CXD2820R and NXP TDA18271HD/C2 */ 1803 * Empia EM28174, Sony CXD2820R and NXP TDA18271HD/C2 */
1775 [EM28174_BOARD_PCTV_290E] = { 1804 [EM28174_BOARD_PCTV_290E] = {
1805 .name = "PCTV nanoStick T2 290e",
1776 .i2c_speed = EM2874_I2C_SECONDARY_BUS_SELECT | 1806 .i2c_speed = EM2874_I2C_SECONDARY_BUS_SELECT |
1777 EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_100_KHZ, 1807 EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_100_KHZ,
1778 .xclk = EM28XX_XCLK_FREQUENCY_12MHZ,
1779 .name = "PCTV Systems nanoStick T2 290e",
1780 .tuner_type = TUNER_ABSENT, 1808 .tuner_type = TUNER_ABSENT,
1781 .tuner_gpio = pctv_290e, 1809 .tuner_gpio = pctv_290e,
1782 .has_dvb = 1, 1810 .has_dvb = 1,
1811 .ir_codes = RC_MAP_PINNACLE_PCTV_HD,
1783 }, 1812 },
1784}; 1813};
1785const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards); 1814const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
@@ -1855,8 +1884,10 @@ struct usb_device_id em28xx_id_table[] = {
1855 { USB_DEVICE(0x0ccd, 0x0042), 1884 { USB_DEVICE(0x0ccd, 0x0042),
1856 .driver_info = EM2882_BOARD_TERRATEC_HYBRID_XS }, 1885 .driver_info = EM2882_BOARD_TERRATEC_HYBRID_XS },
1857 { USB_DEVICE(0x0ccd, 0x0043), 1886 { USB_DEVICE(0x0ccd, 0x0043),
1858 .driver_info = EM2870_BOARD_TERRATEC_XS }, 1887 .driver_info = EM2884_BOARD_TERRATEC_H5 },
1859 { USB_DEVICE(0x0ccd, 0x0047), 1888 { USB_DEVICE(0x0ccd, 0x10a2), /* Rev. 1 */
1889 .driver_info = EM2884_BOARD_TERRATEC_H5 },
1890 { USB_DEVICE(0x0ccd, 0x10ad), /* Rev. 2 */
1860 .driver_info = EM2880_BOARD_TERRATEC_PRODIGY_XS }, 1891 .driver_info = EM2880_BOARD_TERRATEC_PRODIGY_XS },
1861 { USB_DEVICE(0x0ccd, 0x0084), 1892 { USB_DEVICE(0x0ccd, 0x0084),
1862 .driver_info = EM2860_BOARD_TERRATEC_AV350 }, 1893 .driver_info = EM2860_BOARD_TERRATEC_AV350 },
@@ -1937,7 +1968,7 @@ static struct em28xx_hash_table em28xx_i2c_hash[] = {
1937 {0x77800080, EM2860_BOARD_TVP5150_REFERENCE_DESIGN, TUNER_ABSENT}, 1968 {0x77800080, EM2860_BOARD_TVP5150_REFERENCE_DESIGN, TUNER_ABSENT},
1938 {0xc51200e3, EM2820_BOARD_GADMEI_TVR200, TUNER_LG_PAL_NEW_TAPC}, 1969 {0xc51200e3, EM2820_BOARD_GADMEI_TVR200, TUNER_LG_PAL_NEW_TAPC},
1939 {0x4ba50080, EM2861_BOARD_GADMEI_UTV330PLUS, TUNER_TNF_5335MF}, 1970 {0x4ba50080, EM2861_BOARD_GADMEI_UTV330PLUS, TUNER_TNF_5335MF},
1940 {0x6b800080, EM2874_LEADERSHIP_ISDBT, TUNER_ABSENT}, 1971 {0x6b800080, EM2874_BOARD_LEADERSHIP_ISDBT, TUNER_ABSENT},
1941}; 1972};
1942 1973
1943/* I2C possible address to saa7115, tvp5150, msp3400, tvaudio */ 1974/* I2C possible address to saa7115, tvp5150, msp3400, tvaudio */
@@ -2660,10 +2691,9 @@ void em28xx_card_setup(struct em28xx *dev)
2660 .addr = 0xba >> 1, 2691 .addr = 0xba >> 1,
2661 .platform_data = &pdata, 2692 .platform_data = &pdata,
2662 }; 2693 };
2663 struct v4l2_subdev *sd;
2664 2694
2665 pdata.xtal = dev->sensor_xtal; 2695 pdata.xtal = dev->sensor_xtal;
2666 sd = v4l2_i2c_new_subdev_board(&dev->v4l2_dev, &dev->i2c_adap, 2696 v4l2_i2c_new_subdev_board(&dev->v4l2_dev, &dev->i2c_adap,
2667 &mt9v011_info, NULL); 2697 &mt9v011_info, NULL);
2668 } 2698 }
2669 2699
@@ -2842,11 +2872,26 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
2842 em28xx_info("chip ID is em2882/em2883\n"); 2872 em28xx_info("chip ID is em2882/em2883\n");
2843 dev->wait_after_write = 0; 2873 dev->wait_after_write = 0;
2844 break; 2874 break;
2875 case CHIP_ID_EM2884:
2876 em28xx_info("chip ID is em2884\n");
2877 dev->reg_gpio_num = EM2874_R80_GPIO;
2878 dev->wait_after_write = 0;
2879 break;
2845 default: 2880 default:
2846 em28xx_info("em28xx chip ID = %d\n", dev->chip_id); 2881 em28xx_info("em28xx chip ID = %d\n", dev->chip_id);
2847 } 2882 }
2848 } 2883 }
2849 2884
2885 if (dev->is_audio_only) {
2886 errCode = em28xx_audio_setup(dev);
2887 if (errCode)
2888 return -ENODEV;
2889 em28xx_add_into_devlist(dev);
2890 em28xx_init_extension(dev);
2891
2892 return 0;
2893 }
2894
2850 /* Prepopulate cached GPO register content */ 2895 /* Prepopulate cached GPO register content */
2851 retval = em28xx_read_reg(dev, dev->reg_gpo_num); 2896 retval = em28xx_read_reg(dev, dev->reg_gpo_num);
2852 if (retval >= 0) 2897 if (retval >= 0)
@@ -2947,6 +2992,9 @@ fail_reg_devices:
2947 return retval; 2992 return retval;
2948} 2993}
2949 2994
2995/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
2996#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
2997
2950/* 2998/*
2951 * em28xx_usb_probe() 2999 * em28xx_usb_probe()
2952 * checks for supported devices 3000 * checks for supported devices
@@ -2956,15 +3004,15 @@ static int em28xx_usb_probe(struct usb_interface *interface,
2956{ 3004{
2957 const struct usb_endpoint_descriptor *endpoint; 3005 const struct usb_endpoint_descriptor *endpoint;
2958 struct usb_device *udev; 3006 struct usb_device *udev;
2959 struct usb_interface *uif;
2960 struct em28xx *dev = NULL; 3007 struct em28xx *dev = NULL;
2961 int retval; 3008 int retval;
2962 int i, nr, ifnum, isoc_pipe; 3009 bool is_audio_only = false, has_audio = false;
3010 int i, nr, isoc_pipe;
3011 const int ifnum = interface->altsetting[0].desc.bInterfaceNumber;
2963 char *speed; 3012 char *speed;
2964 char descr[255] = ""; 3013 char descr[255] = "";
2965 3014
2966 udev = usb_get_dev(interface_to_usbdev(interface)); 3015 udev = usb_get_dev(interface_to_usbdev(interface));
2967 ifnum = interface->altsetting[0].desc.bInterfaceNumber;
2968 3016
2969 /* Check to see next free device and mark as used */ 3017 /* Check to see next free device and mark as used */
2970 nr = find_first_zero_bit(&em28xx_devused, EM28XX_MAXBOARDS); 3018 nr = find_first_zero_bit(&em28xx_devused, EM28XX_MAXBOARDS);
@@ -2984,6 +3032,19 @@ static int em28xx_usb_probe(struct usb_interface *interface,
2984 goto err; 3032 goto err;
2985 } 3033 }
2986 3034
3035 /* Get endpoints */
3036 for (i = 0; i < interface->num_altsetting; i++) {
3037 int ep;
3038
3039 for (ep = 0; ep < interface->altsetting[i].desc.bNumEndpoints; ep++) {
3040 struct usb_host_endpoint *e;
3041 e = &interface->altsetting[i].endpoint[ep];
3042
3043 if (e->desc.bEndpointAddress == 0x83)
3044 has_audio = true;
3045 }
3046 }
3047
2987 endpoint = &interface->cur_altsetting->endpoint[0].desc; 3048 endpoint = &interface->cur_altsetting->endpoint[0].desc;
2988 3049
2989 /* check if the device has the iso in endpoint at the correct place */ 3050 /* check if the device has the iso in endpoint at the correct place */
@@ -3003,19 +3064,22 @@ static int em28xx_usb_probe(struct usb_interface *interface,
3003 check_interface = 0; 3064 check_interface = 0;
3004 3065
3005 if (!check_interface) { 3066 if (!check_interface) {
3006 em28xx_err(DRIVER_NAME " video device (%04x:%04x): " 3067 if (has_audio) {
3007 "interface %i, class %i found.\n", 3068 is_audio_only = true;
3008 le16_to_cpu(udev->descriptor.idVendor), 3069 } else {
3009 le16_to_cpu(udev->descriptor.idProduct), 3070 em28xx_err(DRIVER_NAME " video device (%04x:%04x): "
3010 ifnum, 3071 "interface %i, class %i found.\n",
3011 interface->altsetting[0].desc.bInterfaceClass); 3072 le16_to_cpu(udev->descriptor.idVendor),
3012 3073 le16_to_cpu(udev->descriptor.idProduct),
3013 em28xx_err(DRIVER_NAME " This is an anciliary " 3074 ifnum,
3014 "interface not used by the driver\n"); 3075 interface->altsetting[0].desc.bInterfaceClass);
3015 3076 em28xx_err(DRIVER_NAME " This is an anciliary "
3016 em28xx_devused &= ~(1<<nr); 3077 "interface not used by the driver\n");
3017 retval = -ENODEV; 3078
3018 goto err; 3079 em28xx_devused &= ~(1<<nr);
3080 retval = -ENODEV;
3081 goto err;
3082 }
3019 } 3083 }
3020 } 3084 }
3021 3085
@@ -3045,8 +3109,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
3045 if (*descr) 3109 if (*descr)
3046 strlcat(descr, " ", sizeof(descr)); 3110 strlcat(descr, " ", sizeof(descr));
3047 3111
3048 printk(DRIVER_NAME ": New device %s@ %s Mbps " 3112 printk(KERN_INFO DRIVER_NAME
3049 "(%04x:%04x, interface %d, class %d)\n", 3113 ": New device %s@ %s Mbps (%04x:%04x, interface %d, class %d)\n",
3050 descr, 3114 descr,
3051 speed, 3115 speed,
3052 le16_to_cpu(udev->descriptor.idVendor), 3116 le16_to_cpu(udev->descriptor.idVendor),
@@ -3054,6 +3118,11 @@ static int em28xx_usb_probe(struct usb_interface *interface,
3054 ifnum, 3118 ifnum,
3055 interface->altsetting->desc.bInterfaceNumber); 3119 interface->altsetting->desc.bInterfaceNumber);
3056 3120
3121 if (has_audio)
3122 printk(KERN_INFO DRIVER_NAME
3123 ": Audio Vendor Class interface %i found\n",
3124 ifnum);
3125
3057 /* 3126 /*
3058 * Make sure we have 480 Mbps of bandwidth, otherwise things like 3127 * Make sure we have 480 Mbps of bandwidth, otherwise things like
3059 * video stream wouldn't likely work, since 12 Mbps is generally 3128 * video stream wouldn't likely work, since 12 Mbps is generally
@@ -3089,10 +3158,13 @@ static int em28xx_usb_probe(struct usb_interface *interface,
3089 dev->devno = nr; 3158 dev->devno = nr;
3090 dev->model = id->driver_info; 3159 dev->model = id->driver_info;
3091 dev->alt = -1; 3160 dev->alt = -1;
3161 dev->is_audio_only = is_audio_only;
3162 dev->has_alsa_audio = has_audio;
3163 dev->audio_ifnum = ifnum;
3092 3164
3093 /* Checks if audio is provided by some interface */ 3165 /* Checks if audio is provided by some interface */
3094 for (i = 0; i < udev->config->desc.bNumInterfaces; i++) { 3166 for (i = 0; i < udev->config->desc.bNumInterfaces; i++) {
3095 uif = udev->config->interface[i]; 3167 struct usb_interface *uif = udev->config->interface[i];
3096 if (uif->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) { 3168 if (uif->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) {
3097 dev->has_audio_class = 1; 3169 dev->has_audio_class = 1;
3098 break; 3170 break;
@@ -3100,9 +3172,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
3100 } 3172 }
3101 3173
3102 /* compute alternate max packet sizes */ 3174 /* compute alternate max packet sizes */
3103 uif = udev->actconfig->interface[0]; 3175 dev->num_alt = interface->num_altsetting;
3104
3105 dev->num_alt = uif->num_altsetting;
3106 dev->alt_max_pkt_size = kmalloc(32 * dev->num_alt, GFP_KERNEL); 3176 dev->alt_max_pkt_size = kmalloc(32 * dev->num_alt, GFP_KERNEL);
3107 3177
3108 if (dev->alt_max_pkt_size == NULL) { 3178 if (dev->alt_max_pkt_size == NULL) {
@@ -3114,14 +3184,21 @@ static int em28xx_usb_probe(struct usb_interface *interface,
3114 } 3184 }
3115 3185
3116 for (i = 0; i < dev->num_alt ; i++) { 3186 for (i = 0; i < dev->num_alt ; i++) {
3117 u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize); 3187 u16 tmp = le16_to_cpu(interface->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize);
3118 dev->alt_max_pkt_size[i] = 3188 unsigned int size = tmp & 0x7ff;
3119 (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); 3189
3190 if (udev->speed == USB_SPEED_HIGH)
3191 size = size * hb_mult(tmp);
3192
3193 dev->alt_max_pkt_size[i] = size;
3120 } 3194 }
3121 3195
3122 if ((card[nr] >= 0) && (card[nr] < em28xx_bcount)) 3196 if ((card[nr] >= 0) && (card[nr] < em28xx_bcount))
3123 dev->model = card[nr]; 3197 dev->model = card[nr];
3124 3198
3199 /* save our data pointer in this interface device */
3200 usb_set_intfdata(interface, dev);
3201
3125 /* allocate device struct */ 3202 /* allocate device struct */
3126 mutex_init(&dev->lock); 3203 mutex_init(&dev->lock);
3127 mutex_lock(&dev->lock); 3204 mutex_lock(&dev->lock);
@@ -3133,9 +3210,6 @@ static int em28xx_usb_probe(struct usb_interface *interface,
3133 goto err; 3210 goto err;
3134 } 3211 }
3135 3212
3136 /* save our data pointer in this interface device */
3137 usb_set_intfdata(interface, dev);
3138
3139 request_modules(dev); 3213 request_modules(dev);
3140 3214
3141 /* Should be the last thing to do, to avoid newer udev's to 3215 /* Should be the last thing to do, to avoid newer udev's to
@@ -3164,6 +3238,13 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
3164 if (!dev) 3238 if (!dev)
3165 return; 3239 return;
3166 3240
3241 if (dev->is_audio_only) {
3242 mutex_lock(&dev->lock);
3243 em28xx_close_extension(dev);
3244 mutex_unlock(&dev->lock);
3245 return;
3246 }
3247
3167 em28xx_info("disconnecting %s\n", dev->vdev->name); 3248 em28xx_info("disconnecting %s\n", dev->vdev->name);
3168 3249
3169 flush_request_modules(dev); 3250 flush_request_modules(dev);
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index e33f145d867a..57b1b5c6d885 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -211,6 +211,7 @@ int em28xx_write_reg(struct em28xx *dev, u16 reg, u8 val)
211{ 211{
212 return em28xx_write_regs(dev, reg, &val, 1); 212 return em28xx_write_regs(dev, reg, &val, 1);
213} 213}
214EXPORT_SYMBOL_GPL(em28xx_write_reg);
214 215
215/* 216/*
216 * em28xx_write_reg_bits() 217 * em28xx_write_reg_bits()
@@ -286,6 +287,7 @@ int em28xx_read_ac97(struct em28xx *dev, u8 reg)
286 return ret; 287 return ret;
287 return le16_to_cpu(val); 288 return le16_to_cpu(val);
288} 289}
290EXPORT_SYMBOL_GPL(em28xx_read_ac97);
289 291
290/* 292/*
291 * em28xx_write_ac97() 293 * em28xx_write_ac97()
@@ -313,13 +315,14 @@ int em28xx_write_ac97(struct em28xx *dev, u8 reg, u16 val)
313 315
314 return 0; 316 return 0;
315} 317}
318EXPORT_SYMBOL_GPL(em28xx_write_ac97);
316 319
317struct em28xx_vol_table { 320struct em28xx_vol_itable {
318 enum em28xx_amux mux; 321 enum em28xx_amux mux;
319 u8 reg; 322 u8 reg;
320}; 323};
321 324
322static struct em28xx_vol_table inputs[] = { 325static struct em28xx_vol_itable inputs[] = {
323 { EM28XX_AMUX_VIDEO, AC97_VIDEO_VOL }, 326 { EM28XX_AMUX_VIDEO, AC97_VIDEO_VOL },
324 { EM28XX_AMUX_LINE_IN, AC97_LINEIN_VOL }, 327 { EM28XX_AMUX_LINE_IN, AC97_LINEIN_VOL },
325 { EM28XX_AMUX_PHONE, AC97_PHONE_VOL }, 328 { EM28XX_AMUX_PHONE, AC97_PHONE_VOL },
@@ -403,7 +406,12 @@ static int em28xx_set_audio_source(struct em28xx *dev)
403 return ret; 406 return ret;
404} 407}
405 408
406static const struct em28xx_vol_table outputs[] = { 409struct em28xx_vol_otable {
410 enum em28xx_aout mux;
411 u8 reg;
412};
413
414static const struct em28xx_vol_otable outputs[] = {
407 { EM28XX_AOUT_MASTER, AC97_MASTER_VOL }, 415 { EM28XX_AOUT_MASTER, AC97_MASTER_VOL },
408 { EM28XX_AOUT_LINE, AC97_LINE_LEVEL_VOL }, 416 { EM28XX_AOUT_LINE, AC97_LINE_LEVEL_VOL },
409 { EM28XX_AOUT_MONO, AC97_MASTER_MONO_VOL }, 417 { EM28XX_AOUT_MONO, AC97_MASTER_MONO_VOL },
@@ -492,17 +500,13 @@ int em28xx_audio_setup(struct em28xx *dev)
492 if (dev->chip_id == CHIP_ID_EM2870 || dev->chip_id == CHIP_ID_EM2874 500 if (dev->chip_id == CHIP_ID_EM2870 || dev->chip_id == CHIP_ID_EM2874
493 || dev->chip_id == CHIP_ID_EM28174) { 501 || dev->chip_id == CHIP_ID_EM28174) {
494 /* Digital only device - don't load any alsa module */ 502 /* Digital only device - don't load any alsa module */
495 dev->audio_mode.has_audio = 0; 503 dev->audio_mode.has_audio = false;
496 dev->has_audio_class = 0; 504 dev->has_audio_class = false;
497 dev->has_alsa_audio = 0; 505 dev->has_alsa_audio = false;
498 return 0; 506 return 0;
499 } 507 }
500 508
501 /* If device doesn't support Usb Audio Class, use vendor class */ 509 dev->audio_mode.has_audio = true;
502 if (!dev->has_audio_class)
503 dev->has_alsa_audio = 1;
504
505 dev->audio_mode.has_audio = 1;
506 510
507 /* See how this device is configured */ 511 /* See how this device is configured */
508 cfg = em28xx_read_reg(dev, EM28XX_R00_CHIPCFG); 512 cfg = em28xx_read_reg(dev, EM28XX_R00_CHIPCFG);
@@ -512,8 +516,8 @@ int em28xx_audio_setup(struct em28xx *dev)
512 cfg = EM28XX_CHIPCFG_AC97; /* Be conservative */ 516 cfg = EM28XX_CHIPCFG_AC97; /* Be conservative */
513 } else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) == 0x00) { 517 } else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) == 0x00) {
514 /* The device doesn't have vendor audio at all */ 518 /* The device doesn't have vendor audio at all */
515 dev->has_alsa_audio = 0; 519 dev->has_alsa_audio = false;
516 dev->audio_mode.has_audio = 0; 520 dev->audio_mode.has_audio = false;
517 return 0; 521 return 0;
518 } else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) == 522 } else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
519 EM28XX_CHIPCFG_I2S_3_SAMPRATES) { 523 EM28XX_CHIPCFG_I2S_3_SAMPRATES) {
@@ -542,8 +546,8 @@ int em28xx_audio_setup(struct em28xx *dev)
542 */ 546 */
543 em28xx_warn("AC97 chip type couldn't be determined\n"); 547 em28xx_warn("AC97 chip type couldn't be determined\n");
544 dev->audio_mode.ac97 = EM28XX_NO_AC97; 548 dev->audio_mode.ac97 = EM28XX_NO_AC97;
545 dev->has_alsa_audio = 0; 549 dev->has_alsa_audio = false;
546 dev->audio_mode.has_audio = 0; 550 dev->audio_mode.has_audio = false;
547 goto init_audio; 551 goto init_audio;
548 } 552 }
549 553
@@ -615,7 +619,9 @@ int em28xx_capture_start(struct em28xx *dev, int start)
615{ 619{
616 int rc; 620 int rc;
617 621
618 if (dev->chip_id == CHIP_ID_EM2874 || dev->chip_id == CHIP_ID_EM28174) { 622 if (dev->chip_id == CHIP_ID_EM2874 ||
623 dev->chip_id == CHIP_ID_EM2884 ||
624 dev->chip_id == CHIP_ID_EM28174) {
619 /* The Transport Stream Enable Register moved in em2874 */ 625 /* The Transport Stream Enable Register moved in em2874 */
620 if (!start) { 626 if (!start) {
621 rc = em28xx_write_reg_bits(dev, EM2874_R5F_TS_ENABLE, 627 rc = em28xx_write_reg_bits(dev, EM2874_R5F_TS_ENABLE,
@@ -884,6 +890,7 @@ int em28xx_gpio_set(struct em28xx *dev, struct em28xx_reg_seq *gpio)
884 } 890 }
885 return rc; 891 return rc;
886} 892}
893EXPORT_SYMBOL_GPL(em28xx_gpio_set);
887 894
888int em28xx_set_mode(struct em28xx *dev, enum em28xx_mode set_mode) 895int em28xx_set_mode(struct em28xx *dev, enum em28xx_mode set_mode)
889{ 896{
@@ -917,7 +924,7 @@ EXPORT_SYMBOL_GPL(em28xx_set_mode);
917static void em28xx_irq_callback(struct urb *urb) 924static void em28xx_irq_callback(struct urb *urb)
918{ 925{
919 struct em28xx *dev = urb->context; 926 struct em28xx *dev = urb->context;
920 int rc, i; 927 int i;
921 928
922 switch (urb->status) { 929 switch (urb->status) {
923 case 0: /* success */ 930 case 0: /* success */
@@ -934,7 +941,7 @@ static void em28xx_irq_callback(struct urb *urb)
934 941
935 /* Copy data from URB */ 942 /* Copy data from URB */
936 spin_lock(&dev->slock); 943 spin_lock(&dev->slock);
937 rc = dev->isoc_ctl.isoc_copy(dev, urb); 944 dev->isoc_ctl.isoc_copy(dev, urb);
938 spin_unlock(&dev->slock); 945 spin_unlock(&dev->slock);
939 946
940 /* Reset urb buffers */ 947 /* Reset urb buffers */
@@ -1106,17 +1113,19 @@ EXPORT_SYMBOL_GPL(em28xx_init_isoc);
1106int em28xx_isoc_dvb_max_packetsize(struct em28xx *dev) 1113int em28xx_isoc_dvb_max_packetsize(struct em28xx *dev)
1107{ 1114{
1108 unsigned int chip_cfg2; 1115 unsigned int chip_cfg2;
1109 unsigned int packet_size = 564; 1116 unsigned int packet_size;
1110 1117
1111 if (dev->chip_id == CHIP_ID_EM2874) { 1118 switch (dev->chip_id) {
1112 /* FIXME - for now assume 564 like it was before, but the 1119 case CHIP_ID_EM2710:
1113 em2874 code should be added to return the proper value... */ 1120 case CHIP_ID_EM2750:
1114 packet_size = 564; 1121 case CHIP_ID_EM2800:
1115 } else if (dev->chip_id == CHIP_ID_EM28174) { 1122 case CHIP_ID_EM2820:
1116 /* FIXME same as em2874. 564 was enough for 22 Mbit DVB-T 1123 case CHIP_ID_EM2840:
1117 but too much for 44 Mbit DVB-C. */ 1124 case CHIP_ID_EM2860:
1118 packet_size = 752; 1125 /* No DVB support */
1119 } else { 1126 return -EINVAL;
1127 case CHIP_ID_EM2870:
1128 case CHIP_ID_EM2883:
1120 /* TS max packet size stored in bits 1-0 of R01 */ 1129 /* TS max packet size stored in bits 1-0 of R01 */
1121 chip_cfg2 = em28xx_read_reg(dev, EM28XX_R01_CHIPCFG2); 1130 chip_cfg2 = em28xx_read_reg(dev, EM28XX_R01_CHIPCFG2);
1122 switch (chip_cfg2 & EM28XX_CHIPCFG2_TS_PACKETSIZE_MASK) { 1131 switch (chip_cfg2 & EM28XX_CHIPCFG2_TS_PACKETSIZE_MASK) {
@@ -1133,9 +1142,24 @@ int em28xx_isoc_dvb_max_packetsize(struct em28xx *dev)
1133 packet_size = 752; 1142 packet_size = 752;
1134 break; 1143 break;
1135 } 1144 }
1145 break;
1146 case CHIP_ID_EM2874:
1147 /*
1148 * FIXME: for now assumes 564 like it was before, but the
1149 * em2874 code should be added to return the proper value
1150 */
1151 packet_size = 564;
1152 break;
1153 case CHIP_ID_EM2884:
1154 case CHIP_ID_EM28174:
1155 default:
1156 /*
1157 * FIXME: same as em2874. 564 was enough for 22 Mbit DVB-T
1158 * but not enough for 44 Mbit DVB-C.
1159 */
1160 packet_size = 752;
1136 } 1161 }
1137 1162
1138 em28xx_coredbg("dvb max packet size=%d\n", packet_size);
1139 return packet_size; 1163 return packet_size;
1140} 1164}
1141EXPORT_SYMBOL_GPL(em28xx_isoc_dvb_max_packetsize); 1165EXPORT_SYMBOL_GPL(em28xx_isoc_dvb_max_packetsize);
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
index 7904ca4b6913..e5916dee4094 100644
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -1,7 +1,7 @@
1/* 1/*
2 DVB device driver for em28xx 2 DVB device driver for em28xx
3 3
4 (c) 2008 Mauro Carvalho Chehab <mchehab@infradead.org> 4 (c) 2008-2011 Mauro Carvalho Chehab <mchehab@infradead.org>
5 5
6 (c) 2008 Devin Heitmueller <devin.heitmueller@gmail.com> 6 (c) 2008 Devin Heitmueller <devin.heitmueller@gmail.com>
7 - Fixes for the driver to properly work with HVR-950 7 - Fixes for the driver to properly work with HVR-950
@@ -40,6 +40,8 @@
40#include "s921.h" 40#include "s921.h"
41#include "drxd.h" 41#include "drxd.h"
42#include "cxd2820r.h" 42#include "cxd2820r.h"
43#include "tda18271c2dd.h"
44#include "drxk.h"
43 45
44MODULE_DESCRIPTION("driver for em28xx based DVB cards"); 46MODULE_DESCRIPTION("driver for em28xx based DVB cards");
45MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); 47MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
@@ -73,6 +75,11 @@ struct em28xx_dvb {
73 struct dmx_frontend fe_hw; 75 struct dmx_frontend fe_hw;
74 struct dmx_frontend fe_mem; 76 struct dmx_frontend fe_mem;
75 struct dvb_net net; 77 struct dvb_net net;
78
79 /* Due to DRX-K - probably need changes */
80 int (*gate_ctrl)(struct dvb_frontend *, int);
81 struct semaphore pll_mutex;
82 bool dont_attach_fe1;
76}; 83};
77 84
78 85
@@ -160,6 +167,11 @@ static int start_streaming(struct em28xx_dvb *dvb)
160 return rc; 167 return rc;
161 168
162 max_dvb_packet_size = em28xx_isoc_dvb_max_packetsize(dev); 169 max_dvb_packet_size = em28xx_isoc_dvb_max_packetsize(dev);
170 if (max_dvb_packet_size < 0)
171 return max_dvb_packet_size;
172 dprintk(1, "Using %d buffers each with %d bytes\n",
173 EM28XX_DVB_NUM_BUFS,
174 max_dvb_packet_size);
163 175
164 return em28xx_init_isoc(dev, EM28XX_DVB_MAX_PACKETS, 176 return em28xx_init_isoc(dev, EM28XX_DVB_MAX_PACKETS,
165 EM28XX_DVB_NUM_BUFS, max_dvb_packet_size, 177 EM28XX_DVB_NUM_BUFS, max_dvb_packet_size,
@@ -295,6 +307,79 @@ static struct drxd_config em28xx_drxd = {
295 .disable_i2c_gate_ctrl = 1, 307 .disable_i2c_gate_ctrl = 1,
296}; 308};
297 309
310struct drxk_config terratec_h5_drxk = {
311 .adr = 0x29,
312 .single_master = 1,
313 .no_i2c_bridge = 1,
314 .microcode_name = "dvb-usb-terratec-h5-drxk.fw",
315};
316
317static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
318{
319 struct em28xx_dvb *dvb = fe->sec_priv;
320 int status;
321
322 if (!dvb)
323 return -EINVAL;
324
325 if (enable) {
326 down(&dvb->pll_mutex);
327 status = dvb->gate_ctrl(fe, 1);
328 } else {
329 status = dvb->gate_ctrl(fe, 0);
330 up(&dvb->pll_mutex);
331 }
332 return status;
333}
334
335static void terratec_h5_init(struct em28xx *dev)
336{
337 int i;
338 struct em28xx_reg_seq terratec_h5_init[] = {
339 {EM28XX_R08_GPIO, 0xff, 0xff, 10},
340 {EM2874_R80_GPIO, 0xf6, 0xff, 100},
341 {EM2874_R80_GPIO, 0xf2, 0xff, 50},
342 {EM2874_R80_GPIO, 0xf6, 0xff, 100},
343 { -1, -1, -1, -1},
344 };
345 struct em28xx_reg_seq terratec_h5_end[] = {
346 {EM2874_R80_GPIO, 0xe6, 0xff, 100},
347 {EM2874_R80_GPIO, 0xa6, 0xff, 50},
348 {EM2874_R80_GPIO, 0xe6, 0xff, 100},
349 { -1, -1, -1, -1},
350 };
351 struct {
352 unsigned char r[4];
353 int len;
354 } regs[] = {
355 {{ 0x06, 0x02, 0x00, 0x31 }, 4},
356 {{ 0x01, 0x02 }, 2},
357 {{ 0x01, 0x02, 0x00, 0xc6 }, 4},
358 {{ 0x01, 0x00 }, 2},
359 {{ 0x01, 0x00, 0xff, 0xaf }, 4},
360 {{ 0x01, 0x00, 0x03, 0xa0 }, 4},
361 {{ 0x01, 0x00 }, 2},
362 {{ 0x01, 0x00, 0x73, 0xaf }, 4},
363 {{ 0x04, 0x00 }, 2},
364 {{ 0x00, 0x04 }, 2},
365 {{ 0x00, 0x04, 0x00, 0x0a }, 4},
366 {{ 0x04, 0x14 }, 2},
367 {{ 0x04, 0x14, 0x00, 0x00 }, 4},
368 };
369
370 em28xx_gpio_set(dev, terratec_h5_init);
371 em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x40);
372 msleep(10);
373 em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x45);
374 msleep(10);
375
376 dev->i2c_client.addr = 0x82 >> 1;
377
378 for (i = 0; i < ARRAY_SIZE(regs); i++)
379 i2c_master_send(&dev->i2c_client, regs[i].r, regs[i].len);
380 em28xx_gpio_set(dev, terratec_h5_end);
381};
382
298static int mt352_terratec_xs_init(struct dvb_frontend *fe) 383static int mt352_terratec_xs_init(struct dvb_frontend *fe)
299{ 384{
300 /* Values extracted from a USB trace of the Terratec Windows driver */ 385 /* Values extracted from a USB trace of the Terratec Windows driver */
@@ -516,7 +601,7 @@ static void unregister_dvb(struct em28xx_dvb *dvb)
516 if (dvb->fe[1]) 601 if (dvb->fe[1])
517 dvb_unregister_frontend(dvb->fe[1]); 602 dvb_unregister_frontend(dvb->fe[1]);
518 dvb_unregister_frontend(dvb->fe[0]); 603 dvb_unregister_frontend(dvb->fe[0]);
519 if (dvb->fe[1]) 604 if (dvb->fe[1] && !dvb->dont_attach_fe1)
520 dvb_frontend_detach(dvb->fe[1]); 605 dvb_frontend_detach(dvb->fe[1]);
521 dvb_frontend_detach(dvb->fe[0]); 606 dvb_frontend_detach(dvb->fe[0]);
522 dvb_unregister_adapter(&dvb->adapter); 607 dvb_unregister_adapter(&dvb->adapter);
@@ -546,7 +631,7 @@ static int dvb_init(struct em28xx *dev)
546 em28xx_set_mode(dev, EM28XX_DIGITAL_MODE); 631 em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
547 /* init frontend */ 632 /* init frontend */
548 switch (dev->model) { 633 switch (dev->model) {
549 case EM2874_LEADERSHIP_ISDBT: 634 case EM2874_BOARD_LEADERSHIP_ISDBT:
550 dvb->fe[0] = dvb_attach(s921_attach, 635 dvb->fe[0] = dvb_attach(s921_attach,
551 &sharp_isdbt, &dev->i2c_adap); 636 &sharp_isdbt, &dev->i2c_adap);
552 637
@@ -689,6 +774,41 @@ static int dvb_init(struct em28xx *dev)
689 } 774 }
690 } 775 }
691 break; 776 break;
777 case EM2884_BOARD_TERRATEC_H5:
778 terratec_h5_init(dev);
779
780 dvb->dont_attach_fe1 = 1;
781
782 dvb->fe[0] = dvb_attach(drxk_attach, &terratec_h5_drxk, &dev->i2c_adap, &dvb->fe[1]);
783 if (!dvb->fe[0]) {
784 result = -EINVAL;
785 goto out_free;
786 }
787
788 /* FIXME: do we need a pll semaphore? */
789 dvb->fe[0]->sec_priv = dvb;
790 sema_init(&dvb->pll_mutex, 1);
791 dvb->gate_ctrl = dvb->fe[0]->ops.i2c_gate_ctrl;
792 dvb->fe[0]->ops.i2c_gate_ctrl = drxk_gate_ctrl;
793 dvb->fe[1]->id = 1;
794
795 /* Attach tda18271 to DVB-C frontend */
796 if (dvb->fe[0]->ops.i2c_gate_ctrl)
797 dvb->fe[0]->ops.i2c_gate_ctrl(dvb->fe[0], 1);
798 if (!dvb_attach(tda18271c2dd_attach, dvb->fe[0], &dev->i2c_adap, 0x60)) {
799 result = -EINVAL;
800 goto out_free;
801 }
802 if (dvb->fe[0]->ops.i2c_gate_ctrl)
803 dvb->fe[0]->ops.i2c_gate_ctrl(dvb->fe[0], 0);
804
805 /* Hack - needed by drxk/tda18271c2dd */
806 dvb->fe[1]->tuner_priv = dvb->fe[0]->tuner_priv;
807 memcpy(&dvb->fe[1]->ops.tuner_ops,
808 &dvb->fe[0]->ops.tuner_ops,
809 sizeof(dvb->fe[0]->ops.tuner_ops));
810
811 break;
692 default: 812 default:
693 em28xx_errdev("/2: The frontend of your DVB/ATSC card" 813 em28xx_errdev("/2: The frontend of your DVB/ATSC card"
694 " isn't supported yet\n"); 814 " isn't supported yet\n");
diff --git a/drivers/media/video/em28xx/em28xx-i2c.c b/drivers/media/video/em28xx/em28xx-i2c.c
index 4739fc7e6eb3..36f5a9bc8b76 100644
--- a/drivers/media/video/em28xx/em28xx-i2c.c
+++ b/drivers/media/video/em28xx/em28xx-i2c.c
@@ -181,16 +181,25 @@ static int em2800_i2c_recv_bytes(struct em28xx *dev, unsigned char addr,
181 181
182/* 182/*
183 * em28xx_i2c_send_bytes() 183 * em28xx_i2c_send_bytes()
184 * untested for more than 4 bytes
185 */ 184 */
186static int em28xx_i2c_send_bytes(void *data, unsigned char addr, char *buf, 185static int em28xx_i2c_send_bytes(void *data, unsigned char addr, char *buf,
187 short len, int stop) 186 short len, int stop)
188{ 187{
189 int wrcount = 0; 188 int wrcount = 0;
190 struct em28xx *dev = (struct em28xx *)data; 189 struct em28xx *dev = (struct em28xx *)data;
190 int write_timeout, ret;
191 191
192 wrcount = dev->em28xx_write_regs_req(dev, stop ? 2 : 3, addr, buf, len); 192 wrcount = dev->em28xx_write_regs_req(dev, stop ? 2 : 3, addr, buf, len);
193 193
194 /* Seems to be required after a write */
195 for (write_timeout = EM2800_I2C_WRITE_TIMEOUT; write_timeout > 0;
196 write_timeout -= 5) {
197 ret = dev->em28xx_read_reg(dev, 0x05);
198 if (!ret)
199 break;
200 msleep(5);
201 }
202
194 return wrcount; 203 return wrcount;
195} 204}
196 205
@@ -218,9 +227,7 @@ static int em28xx_i2c_recv_bytes(struct em28xx *dev, unsigned char addr,
218 */ 227 */
219static int em28xx_i2c_check_for_device(struct em28xx *dev, unsigned char addr) 228static int em28xx_i2c_check_for_device(struct em28xx *dev, unsigned char addr)
220{ 229{
221 char msg;
222 int ret; 230 int ret;
223 msg = addr;
224 231
225 ret = dev->em28xx_read_reg_req(dev, 2, addr); 232 ret = dev->em28xx_read_reg_req(dev, 2, addr);
226 if (ret < 0) { 233 if (ret < 0) {
@@ -332,7 +339,9 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned char *eedata, int len)
332 struct em28xx_eeprom *em_eeprom = (void *)eedata; 339 struct em28xx_eeprom *em_eeprom = (void *)eedata;
333 int i, err, size = len, block; 340 int i, err, size = len, block;
334 341
335 if (dev->chip_id == CHIP_ID_EM2874 || dev->chip_id == CHIP_ID_EM28174) { 342 if (dev->chip_id == CHIP_ID_EM2874 ||
343 dev->chip_id == CHIP_ID_EM28174 ||
344 dev->chip_id == CHIP_ID_EM2884) {
336 /* Empia switched to a 16-bit addressable eeprom in newer 345 /* Empia switched to a 16-bit addressable eeprom in newer
337 devices. While we could certainly write a routine to read 346 devices. While we could certainly write a routine to read
338 the eeprom, there is nothing of use in there that cannot be 347 the eeprom, there is nothing of use in there that cannot be
diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c
index ba1ba8648c81..5d12b14282e3 100644
--- a/drivers/media/video/em28xx/em28xx-input.c
+++ b/drivers/media/video/em28xx/em28xx-input.c
@@ -372,6 +372,7 @@ int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 rc_type)
372 ir->get_key = default_polling_getkey; 372 ir->get_key = default_polling_getkey;
373 break; 373 break;
374 case CHIP_ID_EM2874: 374 case CHIP_ID_EM2874:
375 case CHIP_ID_EM28174:
375 ir->get_key = em2874_polling_getkey; 376 ir->get_key = em2874_polling_getkey;
376 em28xx_write_regs(dev, EM2874_R50_IR_CONFIG, &ir_config, 1); 377 em28xx_write_regs(dev, EM2874_R50_IR_CONFIG, &ir_config, 1);
377 break; 378 break;
diff --git a/drivers/media/video/em28xx/em28xx-reg.h b/drivers/media/video/em28xx/em28xx-reg.h
index e92a28ede434..66f792361b97 100644
--- a/drivers/media/video/em28xx/em28xx-reg.h
+++ b/drivers/media/video/em28xx/em28xx-reg.h
@@ -201,6 +201,7 @@ enum em28xx_chip_id {
201 CHIP_ID_EM2870 = 35, 201 CHIP_ID_EM2870 = 35,
202 CHIP_ID_EM2883 = 36, 202 CHIP_ID_EM2883 = 36,
203 CHIP_ID_EM2874 = 65, 203 CHIP_ID_EM2874 = 65,
204 CHIP_ID_EM2884 = 68,
204 CHIP_ID_EM28174 = 113, 205 CHIP_ID_EM28174 = 113,
205}; 206};
206 207
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index 7b6461d2d1ff..d176dc0394e2 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -32,7 +32,6 @@
32#include <linux/bitmap.h> 32#include <linux/bitmap.h>
33#include <linux/usb.h> 33#include <linux/usb.h>
34#include <linux/i2c.h> 34#include <linux/i2c.h>
35#include <linux/version.h>
36#include <linux/mm.h> 35#include <linux/mm.h>
37#include <linux/mutex.h> 36#include <linux/mutex.h>
38#include <linux/slab.h> 37#include <linux/slab.h>
@@ -50,7 +49,8 @@
50 "Sascha Sommer <saschasommer@freenet.de>" 49 "Sascha Sommer <saschasommer@freenet.de>"
51 50
52#define DRIVER_DESC "Empia em28xx based USB video device driver" 51#define DRIVER_DESC "Empia em28xx based USB video device driver"
53#define EM28XX_VERSION_CODE KERNEL_VERSION(0, 1, 2) 52
53#define EM28XX_VERSION "0.1.3"
54 54
55#define em28xx_videodbg(fmt, arg...) do {\ 55#define em28xx_videodbg(fmt, arg...) do {\
56 if (video_debug) \ 56 if (video_debug) \
@@ -72,6 +72,7 @@ do {\
72MODULE_AUTHOR(DRIVER_AUTHOR); 72MODULE_AUTHOR(DRIVER_AUTHOR);
73MODULE_DESCRIPTION(DRIVER_DESC); 73MODULE_DESCRIPTION(DRIVER_DESC);
74MODULE_LICENSE("GPL"); 74MODULE_LICENSE("GPL");
75MODULE_VERSION(EM28XX_VERSION);
75 76
76static unsigned int video_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET }; 77static unsigned int video_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
77static unsigned int vbi_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET }; 78static unsigned int vbi_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
@@ -1757,8 +1758,6 @@ static int vidioc_querycap(struct file *file, void *priv,
1757 strlcpy(cap->card, em28xx_boards[dev->model].name, sizeof(cap->card)); 1758 strlcpy(cap->card, em28xx_boards[dev->model].name, sizeof(cap->card));
1758 usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info)); 1759 usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
1759 1760
1760 cap->version = EM28XX_VERSION_CODE;
1761
1762 cap->capabilities = 1761 cap->capabilities =
1763 V4L2_CAP_SLICED_VBI_CAPTURE | 1762 V4L2_CAP_SLICED_VBI_CAPTURE |
1764 V4L2_CAP_VIDEO_CAPTURE | 1763 V4L2_CAP_VIDEO_CAPTURE |
@@ -1976,7 +1975,6 @@ static int radio_querycap(struct file *file, void *priv,
1976 strlcpy(cap->card, em28xx_boards[dev->model].name, sizeof(cap->card)); 1975 strlcpy(cap->card, em28xx_boards[dev->model].name, sizeof(cap->card));
1977 usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info)); 1976 usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
1978 1977
1979 cap->version = EM28XX_VERSION_CODE;
1980 cap->capabilities = V4L2_CAP_TUNER; 1978 cap->capabilities = V4L2_CAP_TUNER;
1981 return 0; 1979 return 0;
1982} 1980}
@@ -2450,10 +2448,8 @@ int em28xx_register_analog_devices(struct em28xx *dev)
2450 u8 val; 2448 u8 val;
2451 int ret; 2449 int ret;
2452 2450
2453 printk(KERN_INFO "%s: v4l2 driver version %d.%d.%d\n", 2451 printk(KERN_INFO "%s: v4l2 driver version %s\n",
2454 dev->name, 2452 dev->name, EM28XX_VERSION);
2455 (EM28XX_VERSION_CODE >> 16) & 0xff,
2456 (EM28XX_VERSION_CODE >> 8) & 0xff, EM28XX_VERSION_CODE & 0xff);
2457 2453
2458 /* set default norm */ 2454 /* set default norm */
2459 dev->norm = em28xx_video_template.current_norm; 2455 dev->norm = em28xx_video_template.current_norm;
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index 3cca33122450..d80658bf3da9 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -117,9 +117,9 @@
117#define EM2800_BOARD_VC211A 74 117#define EM2800_BOARD_VC211A 74
118#define EM2882_BOARD_DIKOM_DK300 75 118#define EM2882_BOARD_DIKOM_DK300 75
119#define EM2870_BOARD_KWORLD_A340 76 119#define EM2870_BOARD_KWORLD_A340 76
120#define EM2874_LEADERSHIP_ISDBT 77 120#define EM2874_BOARD_LEADERSHIP_ISDBT 77
121#define EM28174_BOARD_PCTV_290E 78 121#define EM28174_BOARD_PCTV_290E 78
122 122#define EM2884_BOARD_TERRATEC_H5 79
123 123
124/* Limits minimum and default number of buffers */ 124/* Limits minimum and default number of buffers */
125#define EM28XX_MIN_BUF 4 125#define EM28XX_MIN_BUF 4
@@ -487,6 +487,8 @@ struct em28xx {
487 int devno; /* marks the number of this device */ 487 int devno; /* marks the number of this device */
488 enum em28xx_chip_id chip_id; 488 enum em28xx_chip_id chip_id;
489 489
490 int audio_ifnum;
491
490 struct v4l2_device v4l2_dev; 492 struct v4l2_device v4l2_dev;
491 struct em28xx_board board; 493 struct em28xx_board board;
492 494
@@ -503,6 +505,7 @@ struct em28xx {
503 505
504 unsigned int has_audio_class:1; 506 unsigned int has_audio_class:1;
505 unsigned int has_alsa_audio:1; 507 unsigned int has_alsa_audio:1;
508 unsigned int is_audio_only:1;
506 509
507 /* Controls audio streaming */ 510 /* Controls audio streaming */
508 struct work_struct wq_trigger; /* Trigger to start/stop audio for alsa module */ 511 struct work_struct wq_trigger; /* Trigger to start/stop audio for alsa module */
@@ -697,6 +700,9 @@ int em28xx_tuner_callback(void *ptr, int component, int command, int arg);
697void em28xx_release_resources(struct em28xx *dev); 700void em28xx_release_resources(struct em28xx *dev);
698 701
699/* Provided by em28xx-input.c */ 702/* Provided by em28xx-input.c */
703
704#ifdef CONFIG_VIDEO_EM28XX_RC
705
700int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw); 706int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw);
701int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw); 707int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw);
702int em28xx_get_key_pinnacle_usb_grey(struct IR_i2c *ir, u32 *ir_key, 708int em28xx_get_key_pinnacle_usb_grey(struct IR_i2c *ir, u32 *ir_key,
@@ -709,6 +715,20 @@ void em28xx_deregister_snapshot_button(struct em28xx *dev);
709int em28xx_ir_init(struct em28xx *dev); 715int em28xx_ir_init(struct em28xx *dev);
710int em28xx_ir_fini(struct em28xx *dev); 716int em28xx_ir_fini(struct em28xx *dev);
711 717
718#else
719
720#define em28xx_get_key_terratec NULL
721#define em28xx_get_key_em_haup NULL
722#define em28xx_get_key_pinnacle_usb_grey NULL
723#define em28xx_get_key_winfast_usbii_deluxe NULL
724
725static inline void em28xx_register_snapshot_button(struct em28xx *dev) {}
726static inline void em28xx_deregister_snapshot_button(struct em28xx *dev) {}
727static inline int em28xx_ir_init(struct em28xx *dev) { return 0; }
728static inline int em28xx_ir_fini(struct em28xx *dev) { return 0; }
729
730#endif
731
712/* Provided by em28xx-vbi.c */ 732/* Provided by em28xx-vbi.c */
713extern struct videobuf_queue_ops em28xx_vbi_qops; 733extern struct videobuf_queue_ops em28xx_vbi_qops;
714 734
diff --git a/drivers/media/video/et61x251/et61x251.h b/drivers/media/video/et61x251/et61x251.h
index bf66189cb26d..14bb907d650e 100644
--- a/drivers/media/video/et61x251/et61x251.h
+++ b/drivers/media/video/et61x251/et61x251.h
@@ -21,7 +21,6 @@
21#ifndef _ET61X251_H_ 21#ifndef _ET61X251_H_
22#define _ET61X251_H_ 22#define _ET61X251_H_
23 23
24#include <linux/version.h>
25#include <linux/usb.h> 24#include <linux/usb.h>
26#include <linux/videodev2.h> 25#include <linux/videodev2.h>
27#include <media/v4l2-common.h> 26#include <media/v4l2-common.h>
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index a982750dcef1..9a1e80a1e145 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -18,6 +18,7 @@
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
19 ***************************************************************************/ 19 ***************************************************************************/
20 20
21#include <linux/version.h>
21#include <linux/module.h> 22#include <linux/module.h>
22#include <linux/init.h> 23#include <linux/init.h>
23#include <linux/kernel.h> 24#include <linux/kernel.h>
@@ -48,8 +49,7 @@
48#define ET61X251_MODULE_AUTHOR "(C) 2006-2007 Luca Risolia" 49#define ET61X251_MODULE_AUTHOR "(C) 2006-2007 Luca Risolia"
49#define ET61X251_AUTHOR_EMAIL "<luca.risolia@studio.unibo.it>" 50#define ET61X251_AUTHOR_EMAIL "<luca.risolia@studio.unibo.it>"
50#define ET61X251_MODULE_LICENSE "GPL" 51#define ET61X251_MODULE_LICENSE "GPL"
51#define ET61X251_MODULE_VERSION "1:1.09" 52#define ET61X251_MODULE_VERSION "1.1.10"
52#define ET61X251_MODULE_VERSION_CODE KERNEL_VERSION(1, 1, 9)
53 53
54/*****************************************************************************/ 54/*****************************************************************************/
55 55
@@ -1579,7 +1579,7 @@ et61x251_vidioc_querycap(struct et61x251_device* cam, void __user * arg)
1579{ 1579{
1580 struct v4l2_capability cap = { 1580 struct v4l2_capability cap = {
1581 .driver = "et61x251", 1581 .driver = "et61x251",
1582 .version = ET61X251_MODULE_VERSION_CODE, 1582 .version = LINUX_VERSION_CODE,
1583 .capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | 1583 .capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
1584 V4L2_CAP_STREAMING, 1584 V4L2_CAP_STREAMING,
1585 }; 1585 };
@@ -2480,16 +2480,8 @@ static long et61x251_ioctl_v4l2(struct file *filp,
2480 case VIDIOC_S_PARM: 2480 case VIDIOC_S_PARM:
2481 return et61x251_vidioc_s_parm(cam, arg); 2481 return et61x251_vidioc_s_parm(cam, arg);
2482 2482
2483 case VIDIOC_G_STD:
2484 case VIDIOC_S_STD:
2485 case VIDIOC_QUERYSTD:
2486 case VIDIOC_ENUMSTD:
2487 case VIDIOC_QUERYMENU:
2488 case VIDIOC_ENUM_FRAMEINTERVALS:
2489 return -EINVAL;
2490
2491 default: 2483 default:
2492 return -EINVAL; 2484 return -ENOTTY;
2493 2485
2494 } 2486 }
2495} 2487}
diff --git a/drivers/media/video/fsl-viu.c b/drivers/media/video/fsl-viu.c
index 908d7012c3f2..27cb197d0bd6 100644
--- a/drivers/media/video/fsl-viu.c
+++ b/drivers/media/video/fsl-viu.c
@@ -23,19 +23,13 @@
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/of_platform.h> 24#include <linux/of_platform.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/version.h>
27#include <media/v4l2-common.h> 26#include <media/v4l2-common.h>
28#include <media/v4l2-device.h> 27#include <media/v4l2-device.h>
29#include <media/v4l2-ioctl.h> 28#include <media/v4l2-ioctl.h>
30#include <media/videobuf-dma-contig.h> 29#include <media/videobuf-dma-contig.h>
31 30
32#define DRV_NAME "fsl_viu" 31#define DRV_NAME "fsl_viu"
33#define VIU_MAJOR_VERSION 0 32#define VIU_VERSION "0.5.1"
34#define VIU_MINOR_VERSION 5
35#define VIU_RELEASE 0
36#define VIU_VERSION KERNEL_VERSION(VIU_MAJOR_VERSION, \
37 VIU_MINOR_VERSION, \
38 VIU_RELEASE)
39 33
40#define BUFFER_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */ 34#define BUFFER_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */
41 35
@@ -610,7 +604,6 @@ static int vidioc_querycap(struct file *file, void *priv,
610{ 604{
611 strcpy(cap->driver, "viu"); 605 strcpy(cap->driver, "viu");
612 strcpy(cap->card, "viu"); 606 strcpy(cap->card, "viu");
613 cap->version = VIU_VERSION;
614 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | 607 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
615 V4L2_CAP_STREAMING | 608 V4L2_CAP_STREAMING |
616 V4L2_CAP_VIDEO_OVERLAY | 609 V4L2_CAP_VIDEO_OVERLAY |
@@ -1684,3 +1677,4 @@ module_exit(viu_exit);
1684MODULE_DESCRIPTION("Freescale Video-In(VIU)"); 1677MODULE_DESCRIPTION("Freescale Video-In(VIU)");
1685MODULE_AUTHOR("Hongjun Chen"); 1678MODULE_AUTHOR("Hongjun Chen");
1686MODULE_LICENSE("GPL"); 1679MODULE_LICENSE("GPL");
1680MODULE_VERSION(VIU_VERSION);
diff --git a/drivers/media/video/gspca/Kconfig b/drivers/media/video/gspca/Kconfig
index 34ae2c299799..43d9a20caebc 100644
--- a/drivers/media/video/gspca/Kconfig
+++ b/drivers/media/video/gspca/Kconfig
@@ -179,6 +179,16 @@ config USB_GSPCA_PAC7311
179 To compile this driver as a module, choose M here: the 179 To compile this driver as a module, choose M here: the
180 module will be called gspca_pac7311. 180 module will be called gspca_pac7311.
181 181
182config USB_GSPCA_SE401
183 tristate "SE401 USB Camera Driver"
184 depends on VIDEO_V4L2 && USB_GSPCA
185 help
186 Say Y here if you want support for cameras based on the
187 Endpoints (formerly known as AOX) se401 chip.
188
189 To compile this driver as a module, choose M here: the
190 module will be called gspca_se401.
191
182config USB_GSPCA_SN9C2028 192config USB_GSPCA_SN9C2028
183 tristate "SONIX Dual-Mode USB Camera Driver" 193 tristate "SONIX Dual-Mode USB Camera Driver"
184 depends on VIDEO_V4L2 && USB_GSPCA 194 depends on VIDEO_V4L2 && USB_GSPCA
diff --git a/drivers/media/video/gspca/Makefile b/drivers/media/video/gspca/Makefile
index 802fbe1bff4a..d6364a86333a 100644
--- a/drivers/media/video/gspca/Makefile
+++ b/drivers/media/video/gspca/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_USB_GSPCA_OV534_9) += gspca_ov534_9.o
16obj-$(CONFIG_USB_GSPCA_PAC207) += gspca_pac207.o 16obj-$(CONFIG_USB_GSPCA_PAC207) += gspca_pac207.o
17obj-$(CONFIG_USB_GSPCA_PAC7302) += gspca_pac7302.o 17obj-$(CONFIG_USB_GSPCA_PAC7302) += gspca_pac7302.o
18obj-$(CONFIG_USB_GSPCA_PAC7311) += gspca_pac7311.o 18obj-$(CONFIG_USB_GSPCA_PAC7311) += gspca_pac7311.o
19obj-$(CONFIG_USB_GSPCA_SE401) += gspca_se401.o
19obj-$(CONFIG_USB_GSPCA_SN9C2028) += gspca_sn9c2028.o 20obj-$(CONFIG_USB_GSPCA_SN9C2028) += gspca_sn9c2028.o
20obj-$(CONFIG_USB_GSPCA_SN9C20X) += gspca_sn9c20x.o 21obj-$(CONFIG_USB_GSPCA_SN9C20X) += gspca_sn9c20x.o
21obj-$(CONFIG_USB_GSPCA_SONIXB) += gspca_sonixb.o 22obj-$(CONFIG_USB_GSPCA_SONIXB) += gspca_sonixb.o
@@ -58,6 +59,7 @@ gspca_ov534_9-objs := ov534_9.o
58gspca_pac207-objs := pac207.o 59gspca_pac207-objs := pac207.o
59gspca_pac7302-objs := pac7302.o 60gspca_pac7302-objs := pac7302.o
60gspca_pac7311-objs := pac7311.o 61gspca_pac7311-objs := pac7311.o
62gspca_se401-objs := se401.o
61gspca_sn9c2028-objs := sn9c2028.o 63gspca_sn9c2028-objs := sn9c2028.o
62gspca_sn9c20x-objs := sn9c20x.o 64gspca_sn9c20x-objs := sn9c20x.o
63gspca_sonixb-objs := sonixb.o 65gspca_sonixb-objs := sonixb.o
diff --git a/drivers/media/video/gspca/gl860/gl860.h b/drivers/media/video/gspca/gl860/gl860.h
index 49ad4acbf602..0330a0293b9c 100644
--- a/drivers/media/video/gspca/gl860/gl860.h
+++ b/drivers/media/video/gspca/gl860/gl860.h
@@ -18,7 +18,6 @@
18 */ 18 */
19#ifndef GL860_DEV_H 19#ifndef GL860_DEV_H
20#define GL860_DEV_H 20#define GL860_DEV_H
21#include <linux/version.h>
22 21
23#include "gspca.h" 22#include "gspca.h"
24 23
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 08ce9948d99b..5da4879f47f2 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -24,7 +24,6 @@
24#define MODULE_NAME "gspca" 24#define MODULE_NAME "gspca"
25 25
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/version.h>
28#include <linux/fs.h> 27#include <linux/fs.h>
29#include <linux/vmalloc.h> 28#include <linux/vmalloc.h>
30#include <linux/sched.h> 29#include <linux/sched.h>
@@ -51,11 +50,12 @@
51#error "DEF_NURBS too big" 50#error "DEF_NURBS too big"
52#endif 51#endif
53 52
53#define DRIVER_VERSION_NUMBER "2.13.0"
54
54MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>"); 55MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
55MODULE_DESCRIPTION("GSPCA USB Camera Driver"); 56MODULE_DESCRIPTION("GSPCA USB Camera Driver");
56MODULE_LICENSE("GPL"); 57MODULE_LICENSE("GPL");
57 58MODULE_VERSION(DRIVER_VERSION_NUMBER);
58#define DRIVER_VERSION_NUMBER KERNEL_VERSION(2, 13, 0)
59 59
60#ifdef GSPCA_DEBUG 60#ifdef GSPCA_DEBUG
61int gspca_debug = D_ERR | D_PROBE; 61int gspca_debug = D_ERR | D_PROBE;
@@ -443,8 +443,11 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
443 } else { 443 } else {
444 switch (gspca_dev->last_packet_type) { 444 switch (gspca_dev->last_packet_type) {
445 case DISCARD_PACKET: 445 case DISCARD_PACKET:
446 if (packet_type == LAST_PACKET) 446 if (packet_type == LAST_PACKET) {
447 gspca_dev->last_packet_type = packet_type; 447 gspca_dev->last_packet_type = packet_type;
448 gspca_dev->image = NULL;
449 gspca_dev->image_len = 0;
450 }
448 return; 451 return;
449 case LAST_PACKET: 452 case LAST_PACKET:
450 return; 453 return;
@@ -1278,10 +1281,10 @@ static int vidioc_querycap(struct file *file, void *priv,
1278 ret = -ENODEV; 1281 ret = -ENODEV;
1279 goto out; 1282 goto out;
1280 } 1283 }
1281 strncpy((char *) cap->driver, gspca_dev->sd_desc->name, 1284 strlcpy((char *) cap->driver, gspca_dev->sd_desc->name,
1282 sizeof cap->driver); 1285 sizeof cap->driver);
1283 if (gspca_dev->dev->product != NULL) { 1286 if (gspca_dev->dev->product != NULL) {
1284 strncpy((char *) cap->card, gspca_dev->dev->product, 1287 strlcpy((char *) cap->card, gspca_dev->dev->product,
1285 sizeof cap->card); 1288 sizeof cap->card);
1286 } else { 1289 } else {
1287 snprintf((char *) cap->card, sizeof cap->card, 1290 snprintf((char *) cap->card, sizeof cap->card,
@@ -1291,7 +1294,6 @@ static int vidioc_querycap(struct file *file, void *priv,
1291 } 1294 }
1292 usb_make_path(gspca_dev->dev, (char *) cap->bus_info, 1295 usb_make_path(gspca_dev->dev, (char *) cap->bus_info,
1293 sizeof(cap->bus_info)); 1296 sizeof(cap->bus_info));
1294 cap->version = DRIVER_VERSION_NUMBER;
1295 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE 1297 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE
1296 | V4L2_CAP_STREAMING 1298 | V4L2_CAP_STREAMING
1297 | V4L2_CAP_READWRITE; 1299 | V4L2_CAP_READWRITE;
@@ -1460,7 +1462,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
1460 return -EINVAL; 1462 return -EINVAL;
1461 input->type = V4L2_INPUT_TYPE_CAMERA; 1463 input->type = V4L2_INPUT_TYPE_CAMERA;
1462 input->status = gspca_dev->cam.input_flags; 1464 input->status = gspca_dev->cam.input_flags;
1463 strncpy(input->name, gspca_dev->sd_desc->name, 1465 strlcpy(input->name, gspca_dev->sd_desc->name,
1464 sizeof input->name); 1466 sizeof input->name);
1465 return 0; 1467 return 0;
1466} 1468}
@@ -2478,10 +2480,7 @@ EXPORT_SYMBOL(gspca_auto_gain_n_exposure);
2478/* -- module insert / remove -- */ 2480/* -- module insert / remove -- */
2479static int __init gspca_init(void) 2481static int __init gspca_init(void)
2480{ 2482{
2481 info("v%d.%d.%d registered", 2483 info("v" DRIVER_VERSION_NUMBER " registered");
2482 (DRIVER_VERSION_NUMBER >> 16) & 0xff,
2483 (DRIVER_VERSION_NUMBER >> 8) & 0xff,
2484 DRIVER_VERSION_NUMBER & 0xff);
2485 return 0; 2484 return 0;
2486} 2485}
2487static void __exit gspca_exit(void) 2486static void __exit gspca_exit(void)
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 057e287b9152..0800433b2092 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -134,6 +134,7 @@ enum sensors {
134 SEN_OV7670, 134 SEN_OV7670,
135 SEN_OV76BE, 135 SEN_OV76BE,
136 SEN_OV8610, 136 SEN_OV8610,
137 SEN_OV9600,
137}; 138};
138 139
139/* Note this is a bit of a hack, but the w9968cf driver needs the code for all 140/* Note this is a bit of a hack, but the w9968cf driver needs the code for all
@@ -340,6 +341,10 @@ static const unsigned ctrl_dis[] = {
340 (1 << EXPOSURE) | 341 (1 << EXPOSURE) |
341 (1 << AUTOGAIN) | 342 (1 << AUTOGAIN) |
342 (1 << FREQ), 343 (1 << FREQ),
344[SEN_OV9600] = ((1 << NCTRL) - 1) /* no control */
345 ^ ((1 << EXPOSURE) /* but exposure */
346 | (1 << AUTOGAIN)), /* and autogain */
347
343}; 348};
344 349
345static const struct v4l2_pix_format ov519_vga_mode[] = { 350static const struct v4l2_pix_format ov519_vga_mode[] = {
@@ -525,6 +530,17 @@ static const struct v4l2_pix_format ovfx2_ov3610_mode[] = {
525 .colorspace = V4L2_COLORSPACE_SRGB, 530 .colorspace = V4L2_COLORSPACE_SRGB,
526 .priv = 0}, 531 .priv = 0},
527}; 532};
533static const struct v4l2_pix_format ovfx2_ov9600_mode[] = {
534 {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
535 .bytesperline = 640,
536 .sizeimage = 640 * 480,
537 .colorspace = V4L2_COLORSPACE_SRGB,
538 .priv = 1},
539 {1280, 1024, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
540 .bytesperline = 1280,
541 .sizeimage = 1280 * 1024,
542 .colorspace = V4L2_COLORSPACE_SRGB},
543};
528 544
529/* Registers common to OV511 / OV518 */ 545/* Registers common to OV511 / OV518 */
530#define R51x_FIFO_PSIZE 0x30 /* 2 bytes wide w/ OV518(+) */ 546#define R51x_FIFO_PSIZE 0x30 /* 2 bytes wide w/ OV518(+) */
@@ -1807,6 +1823,22 @@ static const struct ov_i2c_regvals norm_7660[] = {
1807 | OV7670_COM8_AEC}, 1823 | OV7670_COM8_AEC},
1808 {0xa1, 0xc8} 1824 {0xa1, 0xc8}
1809}; 1825};
1826static const struct ov_i2c_regvals norm_9600[] = {
1827 {0x12, 0x80},
1828 {0x0c, 0x28},
1829 {0x11, 0x80},
1830 {0x13, 0xb5},
1831 {0x14, 0x3e},
1832 {0x1b, 0x04},
1833 {0x24, 0xb0},
1834 {0x25, 0x90},
1835 {0x26, 0x94},
1836 {0x35, 0x90},
1837 {0x37, 0x07},
1838 {0x38, 0x08},
1839 {0x01, 0x8e},
1840 {0x02, 0x85}
1841};
1810 1842
1811/* 7670. Defaults taken from OmniVision provided data, 1843/* 7670. Defaults taken from OmniVision provided data,
1812* as provided by Jonathan Corbet of OLPC */ 1844* as provided by Jonathan Corbet of OLPC */
@@ -2400,9 +2432,12 @@ static int ov518_i2c_r(struct sd *sd, u8 reg)
2400 2432
2401 /* Initiate 2-byte write cycle */ 2433 /* Initiate 2-byte write cycle */
2402 reg_w(sd, R518_I2C_CTL, 0x03); 2434 reg_w(sd, R518_I2C_CTL, 0x03);
2435 reg_r8(sd, R518_I2C_CTL);
2403 2436
2404 /* Initiate 2-byte read cycle */ 2437 /* Initiate 2-byte read cycle */
2405 reg_w(sd, R518_I2C_CTL, 0x05); 2438 reg_w(sd, R518_I2C_CTL, 0x05);
2439 reg_r8(sd, R518_I2C_CTL);
2440
2406 value = reg_r(sd, R51x_I2C_DATA); 2441 value = reg_r(sd, R51x_I2C_DATA);
2407 PDEBUG(D_USBI, "ov518_i2c_r %02x %02x", reg, value); 2442 PDEBUG(D_USBI, "ov518_i2c_r %02x %02x", reg, value);
2408 return value; 2443 return value;
@@ -2686,7 +2721,7 @@ static void write_i2c_regvals(struct sd *sd,
2686 * 2721 *
2687 ***************************************************************************/ 2722 ***************************************************************************/
2688 2723
2689/* This initializes the OV2x10 / OV3610 / OV3620 */ 2724/* This initializes the OV2x10 / OV3610 / OV3620 / OV9600 */
2690static void ov_hires_configure(struct sd *sd) 2725static void ov_hires_configure(struct sd *sd)
2691{ 2726{
2692 int high, low; 2727 int high, low;
@@ -2702,19 +2737,32 @@ static void ov_hires_configure(struct sd *sd)
2702 high = i2c_r(sd, 0x0a); 2737 high = i2c_r(sd, 0x0a);
2703 low = i2c_r(sd, 0x0b); 2738 low = i2c_r(sd, 0x0b);
2704 /* info("%x, %x", high, low); */ 2739 /* info("%x, %x", high, low); */
2705 if (high == 0x96 && low == 0x40) { 2740 switch (high) {
2706 PDEBUG(D_PROBE, "Sensor is an OV2610"); 2741 case 0x96:
2707 sd->sensor = SEN_OV2610; 2742 switch (low) {
2708 } else if (high == 0x96 && low == 0x41) { 2743 case 0x40:
2709 PDEBUG(D_PROBE, "Sensor is an OV2610AE"); 2744 PDEBUG(D_PROBE, "Sensor is a OV2610");
2710 sd->sensor = SEN_OV2610AE; 2745 sd->sensor = SEN_OV2610;
2711 } else if (high == 0x36 && (low & 0x0f) == 0x00) { 2746 return;
2712 PDEBUG(D_PROBE, "Sensor is an OV3610"); 2747 case 0x41:
2713 sd->sensor = SEN_OV3610; 2748 PDEBUG(D_PROBE, "Sensor is a OV2610AE");
2714 } else { 2749 sd->sensor = SEN_OV2610AE;
2715 err("Error unknown sensor type: %02x%02x", 2750 return;
2716 high, low); 2751 case 0xb1:
2752 PDEBUG(D_PROBE, "Sensor is a OV9600");
2753 sd->sensor = SEN_OV9600;
2754 return;
2755 }
2756 break;
2757 case 0x36:
2758 if ((low & 0x0f) == 0x00) {
2759 PDEBUG(D_PROBE, "Sensor is a OV3610");
2760 sd->sensor = SEN_OV3610;
2761 return;
2762 }
2763 break;
2717 } 2764 }
2765 err("Error unknown sensor type: %02x%02x", high, low);
2718} 2766}
2719 2767
2720/* This initializes the OV8110, OV8610 sensor. The OV8110 uses 2768/* This initializes the OV8110, OV8610 sensor. The OV8110 uses
@@ -3400,6 +3448,10 @@ static int sd_init(struct gspca_dev *gspca_dev)
3400 cam->cam_mode = ovfx2_ov3610_mode; 3448 cam->cam_mode = ovfx2_ov3610_mode;
3401 cam->nmodes = ARRAY_SIZE(ovfx2_ov3610_mode); 3449 cam->nmodes = ARRAY_SIZE(ovfx2_ov3610_mode);
3402 break; 3450 break;
3451 case SEN_OV9600:
3452 cam->cam_mode = ovfx2_ov9600_mode;
3453 cam->nmodes = ARRAY_SIZE(ovfx2_ov9600_mode);
3454 break;
3403 default: 3455 default:
3404 if (sd->sif) { 3456 if (sd->sif) {
3405 cam->cam_mode = ov519_sif_mode; 3457 cam->cam_mode = ov519_sif_mode;
@@ -3497,6 +3549,12 @@ static int sd_init(struct gspca_dev *gspca_dev)
3497 case SEN_OV8610: 3549 case SEN_OV8610:
3498 write_i2c_regvals(sd, norm_8610, ARRAY_SIZE(norm_8610)); 3550 write_i2c_regvals(sd, norm_8610, ARRAY_SIZE(norm_8610));
3499 break; 3551 break;
3552 case SEN_OV9600:
3553 write_i2c_regvals(sd, norm_9600, ARRAY_SIZE(norm_9600));
3554
3555 /* enable autoexpo */
3556/* i2c_w_mask(sd, 0x13, 0x05, 0x05); */
3557 break;
3500 } 3558 }
3501 return gspca_dev->usb_err; 3559 return gspca_dev->usb_err;
3502error: 3560error:
@@ -4085,6 +4143,33 @@ static void mode_init_ov_sensor_regs(struct sd *sd)
4085 i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20); 4143 i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
4086 i2c_w_mask(sd, 0x12, 0x04, 0x06); /* AWB: 1 Test pattern: 0 */ 4144 i2c_w_mask(sd, 0x12, 0x04, 0x06); /* AWB: 1 Test pattern: 0 */
4087 break; 4145 break;
4146 case SEN_OV9600: {
4147 const struct ov_i2c_regvals *vals;
4148 static const struct ov_i2c_regvals sxga_15[] = {
4149 {0x11, 0x80}, {0x14, 0x3e}, {0x24, 0x85}, {0x25, 0x75}
4150 };
4151 static const struct ov_i2c_regvals sxga_7_5[] = {
4152 {0x11, 0x81}, {0x14, 0x3e}, {0x24, 0x85}, {0x25, 0x75}
4153 };
4154 static const struct ov_i2c_regvals vga_30[] = {
4155 {0x11, 0x81}, {0x14, 0x7e}, {0x24, 0x70}, {0x25, 0x60}
4156 };
4157 static const struct ov_i2c_regvals vga_15[] = {
4158 {0x11, 0x83}, {0x14, 0x3e}, {0x24, 0x80}, {0x25, 0x70}
4159 };
4160
4161 /* frame rates:
4162 * 15fps / 7.5 fps for 1280x1024
4163 * 30fps / 15fps for 640x480
4164 */
4165 i2c_w_mask(sd, 0x12, qvga ? 0x40 : 0x00, 0x40);
4166 if (qvga)
4167 vals = sd->frame_rate < 30 ? vga_15 : vga_30;
4168 else
4169 vals = sd->frame_rate < 15 ? sxga_7_5 : sxga_15;
4170 write_i2c_regvals(sd, vals, ARRAY_SIZE(sxga_15));
4171 return;
4172 }
4088 default: 4173 default:
4089 return; 4174 return;
4090 } 4175 }
@@ -4120,6 +4205,7 @@ static void set_ov_sensor_window(struct sd *sd)
4120 case SEN_OV2610AE: 4205 case SEN_OV2610AE:
4121 case SEN_OV3610: 4206 case SEN_OV3610:
4122 case SEN_OV7670: 4207 case SEN_OV7670:
4208 case SEN_OV9600:
4123 mode_init_ov_sensor_regs(sd); 4209 mode_init_ov_sensor_regs(sd);
4124 return; 4210 return;
4125 case SEN_OV7660: 4211 case SEN_OV7660:
@@ -4920,7 +5006,8 @@ static const struct sd_desc sd_desc = {
4920static const struct usb_device_id device_table[] = { 5006static const struct usb_device_id device_table[] = {
4921 {USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF }, 5007 {USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF },
4922 {USB_DEVICE(0x041e, 0x4052), .driver_info = BRIDGE_OV519 }, 5008 {USB_DEVICE(0x041e, 0x4052), .driver_info = BRIDGE_OV519 },
4923 {USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 }, 5009 {USB_DEVICE(0x041e, 0x405f),
5010 .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
4924 {USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 }, 5011 {USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 },
4925 {USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 }, 5012 {USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 },
4926 {USB_DEVICE(0x041e, 0x4064), 5013 {USB_DEVICE(0x041e, 0x4064),
diff --git a/drivers/media/video/gspca/se401.c b/drivers/media/video/gspca/se401.c
new file mode 100644
index 000000000000..4c283c24c752
--- /dev/null
+++ b/drivers/media/video/gspca/se401.c
@@ -0,0 +1,774 @@
1/*
2 * GSPCA Endpoints (formerly known as AOX) se401 USB Camera sub Driver
3 *
4 * Copyright (C) 2011 Hans de Goede <hdegoede@redhat.com>
5 *
6 * Based on the v4l1 se401 driver which is:
7 *
8 * Copyright (c) 2000 Jeroen B. Vreeken (pe1rxq@amsat.org)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 */
25
26#define MODULE_NAME "se401"
27
28#define BULK_SIZE 4096
29#define PACKET_SIZE 1024
30#define READ_REQ_SIZE 64
31#define MAX_MODES ((READ_REQ_SIZE - 6) / 4)
32/* The se401 compression algorithm uses a fixed quant factor, which
33 can be configured by setting the high nibble of the SE401_OPERATINGMODE
34 feature. This needs to exactly match what is in libv4l! */
35#define SE401_QUANT_FACT 8
36
37#include <linux/input.h>
38#include <linux/slab.h>
39#include "gspca.h"
40#include "se401.h"
41
42MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
43MODULE_DESCRIPTION("Endpoints se401");
44MODULE_LICENSE("GPL");
45
46/* controls */
47enum e_ctrl {
48 BRIGHTNESS,
49 GAIN,
50 EXPOSURE,
51 FREQ,
52 NCTRL /* number of controls */
53};
54
55/* exposure change state machine states */
56enum {
57 EXPO_CHANGED,
58 EXPO_DROP_FRAME,
59 EXPO_NO_CHANGE,
60};
61
62/* specific webcam descriptor */
63struct sd {
64 struct gspca_dev gspca_dev; /* !! must be the first item */
65 struct gspca_ctrl ctrls[NCTRL];
66 struct v4l2_pix_format fmts[MAX_MODES];
67 int pixels_read;
68 int packet_read;
69 u8 packet[PACKET_SIZE];
70 u8 restart_stream;
71 u8 button_state;
72 u8 resetlevel;
73 u8 resetlevel_frame_count;
74 int resetlevel_adjust_dir;
75 int expo_change_state;
76};
77
78static void setbrightness(struct gspca_dev *gspca_dev);
79static void setgain(struct gspca_dev *gspca_dev);
80static void setexposure(struct gspca_dev *gspca_dev);
81
82static const struct ctrl sd_ctrls[NCTRL] = {
83[BRIGHTNESS] = {
84 {
85 .id = V4L2_CID_BRIGHTNESS,
86 .type = V4L2_CTRL_TYPE_INTEGER,
87 .name = "Brightness",
88 .minimum = 0,
89 .maximum = 255,
90 .step = 1,
91 .default_value = 15,
92 },
93 .set_control = setbrightness
94 },
95[GAIN] = {
96 {
97 .id = V4L2_CID_GAIN,
98 .type = V4L2_CTRL_TYPE_INTEGER,
99 .name = "Gain",
100 .minimum = 0,
101 .maximum = 50, /* Really 63 but > 50 is not pretty */
102 .step = 1,
103 .default_value = 25,
104 },
105 .set_control = setgain
106 },
107[EXPOSURE] = {
108 {
109 .id = V4L2_CID_EXPOSURE,
110 .type = V4L2_CTRL_TYPE_INTEGER,
111 .name = "Exposure",
112 .minimum = 0,
113 .maximum = 32767,
114 .step = 1,
115 .default_value = 15000,
116 },
117 .set_control = setexposure
118 },
119[FREQ] = {
120 {
121 .id = V4L2_CID_POWER_LINE_FREQUENCY,
122 .type = V4L2_CTRL_TYPE_MENU,
123 .name = "Light frequency filter",
124 .minimum = 0,
125 .maximum = 2,
126 .step = 1,
127 .default_value = 0,
128 },
129 .set_control = setexposure
130 },
131};
132
133static void se401_write_req(struct gspca_dev *gspca_dev, u16 req, u16 value,
134 int silent)
135{
136 int err;
137
138 if (gspca_dev->usb_err < 0)
139 return;
140
141 err = usb_control_msg(gspca_dev->dev,
142 usb_sndctrlpipe(gspca_dev->dev, 0), req,
143 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
144 value, 0, NULL, 0, 1000);
145 if (err < 0) {
146 if (!silent)
147 err("write req failed req %#04x val %#04x error %d",
148 req, value, err);
149 gspca_dev->usb_err = err;
150 }
151}
152
153static void se401_read_req(struct gspca_dev *gspca_dev, u16 req, int silent)
154{
155 int err;
156
157 if (gspca_dev->usb_err < 0)
158 return;
159
160 if (USB_BUF_SZ < READ_REQ_SIZE) {
161 err("USB_BUF_SZ too small!!");
162 gspca_dev->usb_err = -ENOBUFS;
163 return;
164 }
165
166 err = usb_control_msg(gspca_dev->dev,
167 usb_rcvctrlpipe(gspca_dev->dev, 0), req,
168 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
169 0, 0, gspca_dev->usb_buf, READ_REQ_SIZE, 1000);
170 if (err < 0) {
171 if (!silent)
172 err("read req failed req %#04x error %d", req, err);
173 gspca_dev->usb_err = err;
174 }
175}
176
177static void se401_set_feature(struct gspca_dev *gspca_dev,
178 u16 selector, u16 param)
179{
180 int err;
181
182 if (gspca_dev->usb_err < 0)
183 return;
184
185 err = usb_control_msg(gspca_dev->dev,
186 usb_sndctrlpipe(gspca_dev->dev, 0),
187 SE401_REQ_SET_EXT_FEATURE,
188 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
189 param, selector, NULL, 0, 1000);
190 if (err < 0) {
191 err("set feature failed sel %#04x param %#04x error %d",
192 selector, param, err);
193 gspca_dev->usb_err = err;
194 }
195}
196
197static int se401_get_feature(struct gspca_dev *gspca_dev, u16 selector)
198{
199 int err;
200
201 if (gspca_dev->usb_err < 0)
202 return gspca_dev->usb_err;
203
204 if (USB_BUF_SZ < 2) {
205 err("USB_BUF_SZ too small!!");
206 gspca_dev->usb_err = -ENOBUFS;
207 return gspca_dev->usb_err;
208 }
209
210 err = usb_control_msg(gspca_dev->dev,
211 usb_rcvctrlpipe(gspca_dev->dev, 0),
212 SE401_REQ_GET_EXT_FEATURE,
213 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
214 0, selector, gspca_dev->usb_buf, 2, 1000);
215 if (err < 0) {
216 err("get feature failed sel %#04x error %d", selector, err);
217 gspca_dev->usb_err = err;
218 return err;
219 }
220 return gspca_dev->usb_buf[0] | (gspca_dev->usb_buf[1] << 8);
221}
222
223static void setbrightness(struct gspca_dev *gspca_dev)
224{
225 struct sd *sd = (struct sd *) gspca_dev;
226
227 if (gspca_dev->ctrl_dis & (1 << BRIGHTNESS))
228 return;
229
230 /* HDG: this does not seem to do anything on my cam */
231 se401_write_req(gspca_dev, SE401_REQ_SET_BRT,
232 sd->ctrls[BRIGHTNESS].val, 0);
233}
234
235static void setgain(struct gspca_dev *gspca_dev)
236{
237 struct sd *sd = (struct sd *) gspca_dev;
238 u16 gain = 63 - sd->ctrls[GAIN].val;
239
240 /* red color gain */
241 se401_set_feature(gspca_dev, HV7131_REG_ARCG, gain);
242 /* green color gain */
243 se401_set_feature(gspca_dev, HV7131_REG_AGCG, gain);
244 /* blue color gain */
245 se401_set_feature(gspca_dev, HV7131_REG_ABCG, gain);
246}
247
248static void setexposure(struct gspca_dev *gspca_dev)
249{
250 struct sd *sd = (struct sd *) gspca_dev;
251 int integration = sd->ctrls[EXPOSURE].val << 6;
252 u8 expose_h, expose_m, expose_l;
253
254 /* Do this before the set_feature calls, for proper timing wrt
255 the interrupt driven pkt_scan. Note we may still race but that
256 is not a big issue, the expo change state machine is merely for
257 avoiding underexposed frames getting send out, if one sneaks
258 through so be it */
259 sd->expo_change_state = EXPO_CHANGED;
260
261 if (sd->ctrls[FREQ].val == V4L2_CID_POWER_LINE_FREQUENCY_50HZ)
262 integration = integration - integration % 106667;
263 if (sd->ctrls[FREQ].val == V4L2_CID_POWER_LINE_FREQUENCY_60HZ)
264 integration = integration - integration % 88889;
265
266 expose_h = (integration >> 16);
267 expose_m = (integration >> 8);
268 expose_l = integration;
269
270 /* integration time low */
271 se401_set_feature(gspca_dev, HV7131_REG_TITL, expose_l);
272 /* integration time mid */
273 se401_set_feature(gspca_dev, HV7131_REG_TITM, expose_m);
274 /* integration time high */
275 se401_set_feature(gspca_dev, HV7131_REG_TITU, expose_h);
276}
277
278static int sd_config(struct gspca_dev *gspca_dev,
279 const struct usb_device_id *id)
280{
281 struct sd *sd = (struct sd *)gspca_dev;
282 struct cam *cam = &gspca_dev->cam;
283 u8 *cd = gspca_dev->usb_buf;
284 int i, j, n;
285 int widths[MAX_MODES], heights[MAX_MODES];
286
287 /* Read the camera descriptor */
288 se401_read_req(gspca_dev, SE401_REQ_GET_CAMERA_DESCRIPTOR, 1);
289 if (gspca_dev->usb_err) {
290 /* Sometimes after being idle for a while the se401 won't
291 respond and needs a good kicking */
292 usb_reset_device(gspca_dev->dev);
293 gspca_dev->usb_err = 0;
294 se401_read_req(gspca_dev, SE401_REQ_GET_CAMERA_DESCRIPTOR, 0);
295 }
296
297 /* Some cameras start with their LED on */
298 se401_write_req(gspca_dev, SE401_REQ_LED_CONTROL, 0, 0);
299 if (gspca_dev->usb_err)
300 return gspca_dev->usb_err;
301
302 if (cd[1] != 0x41) {
303 err("Wrong descriptor type");
304 return -ENODEV;
305 }
306
307 if (!(cd[2] & SE401_FORMAT_BAYER)) {
308 err("Bayer format not supported!");
309 return -ENODEV;
310 }
311
312 if (cd[3])
313 info("ExtraFeatures: %d", cd[3]);
314
315 n = cd[4] | (cd[5] << 8);
316 if (n > MAX_MODES) {
317 err("Too many frame sizes");
318 return -ENODEV;
319 }
320
321 for (i = 0; i < n ; i++) {
322 widths[i] = cd[6 + i * 4 + 0] | (cd[6 + i * 4 + 1] << 8);
323 heights[i] = cd[6 + i * 4 + 2] | (cd[6 + i * 4 + 3] << 8);
324 }
325
326 for (i = 0; i < n ; i++) {
327 sd->fmts[i].width = widths[i];
328 sd->fmts[i].height = heights[i];
329 sd->fmts[i].field = V4L2_FIELD_NONE;
330 sd->fmts[i].colorspace = V4L2_COLORSPACE_SRGB;
331 sd->fmts[i].priv = 1;
332
333 /* janggu compression only works for 1/4th or 1/16th res */
334 for (j = 0; j < n; j++) {
335 if (widths[j] / 2 == widths[i] &&
336 heights[j] / 2 == heights[i]) {
337 sd->fmts[i].priv = 2;
338 break;
339 }
340 }
341 /* 1/16th if available too is better then 1/4th, because
342 we then use a larger area of the sensor */
343 for (j = 0; j < n; j++) {
344 if (widths[j] / 4 == widths[i] &&
345 heights[j] / 4 == heights[i]) {
346 sd->fmts[i].priv = 4;
347 break;
348 }
349 }
350
351 if (sd->fmts[i].priv == 1) {
352 /* Not a 1/4th or 1/16th res, use bayer */
353 sd->fmts[i].pixelformat = V4L2_PIX_FMT_SBGGR8;
354 sd->fmts[i].bytesperline = widths[i];
355 sd->fmts[i].sizeimage = widths[i] * heights[i];
356 info("Frame size: %dx%d bayer", widths[i], heights[i]);
357 } else {
358 /* Found a match use janggu compression */
359 sd->fmts[i].pixelformat = V4L2_PIX_FMT_SE401;
360 sd->fmts[i].bytesperline = 0;
361 sd->fmts[i].sizeimage = widths[i] * heights[i] * 3;
362 info("Frame size: %dx%d 1/%dth janggu",
363 widths[i], heights[i],
364 sd->fmts[i].priv * sd->fmts[i].priv);
365 }
366 }
367
368 cam->cam_mode = sd->fmts;
369 cam->nmodes = n;
370 cam->bulk = 1;
371 cam->bulk_size = BULK_SIZE;
372 cam->bulk_nurbs = 4;
373 cam->ctrls = sd->ctrls;
374 gspca_dev->nbalt = 1; /* Ignore the bogus isoc alt settings */
375 sd->resetlevel = 0x2d; /* Set initial resetlevel */
376
377 /* See if the camera supports brightness */
378 se401_read_req(gspca_dev, SE401_REQ_GET_BRT, 1);
379 if (gspca_dev->usb_err) {
380 gspca_dev->ctrl_dis = (1 << BRIGHTNESS);
381 gspca_dev->usb_err = 0;
382 }
383
384 return 0;
385}
386
387/* this function is called at probe and resume time */
388static int sd_init(struct gspca_dev *gspca_dev)
389{
390 return 0;
391}
392
393/* -- start the camera -- */
394static int sd_start(struct gspca_dev *gspca_dev)
395{
396 struct sd *sd = (struct sd *)gspca_dev;
397 int mult = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
398 int mode = 0;
399
400 se401_write_req(gspca_dev, SE401_REQ_CAMERA_POWER, 1, 1);
401 if (gspca_dev->usb_err) {
402 /* Sometimes after being idle for a while the se401 won't
403 respond and needs a good kicking */
404 usb_reset_device(gspca_dev->dev);
405 gspca_dev->usb_err = 0;
406 se401_write_req(gspca_dev, SE401_REQ_CAMERA_POWER, 1, 0);
407 }
408 se401_write_req(gspca_dev, SE401_REQ_LED_CONTROL, 1, 0);
409
410 se401_set_feature(gspca_dev, HV7131_REG_MODE_B, 0x05);
411
412 /* set size + mode */
413 se401_write_req(gspca_dev, SE401_REQ_SET_WIDTH,
414 gspca_dev->width * mult, 0);
415 se401_write_req(gspca_dev, SE401_REQ_SET_HEIGHT,
416 gspca_dev->height * mult, 0);
417 /*
418 * HDG: disabled this as it does not seem to do anything
419 * se401_write_req(gspca_dev, SE401_REQ_SET_OUTPUT_MODE,
420 * SE401_FORMAT_BAYER, 0);
421 */
422
423 switch (mult) {
424 case 1: /* Raw bayer */
425 mode = 0x03; break;
426 case 2: /* 1/4th janggu */
427 mode = SE401_QUANT_FACT << 4; break;
428 case 4: /* 1/16th janggu */
429 mode = (SE401_QUANT_FACT << 4) | 0x02; break;
430 }
431 se401_set_feature(gspca_dev, SE401_OPERATINGMODE, mode);
432
433 setbrightness(gspca_dev);
434 setgain(gspca_dev);
435 setexposure(gspca_dev);
436 se401_set_feature(gspca_dev, HV7131_REG_ARLV, sd->resetlevel);
437
438 sd->packet_read = 0;
439 sd->pixels_read = 0;
440 sd->restart_stream = 0;
441 sd->resetlevel_frame_count = 0;
442 sd->resetlevel_adjust_dir = 0;
443 sd->expo_change_state = EXPO_NO_CHANGE;
444
445 se401_write_req(gspca_dev, SE401_REQ_START_CONTINUOUS_CAPTURE, 0, 0);
446
447 return gspca_dev->usb_err;
448}
449
450static void sd_stopN(struct gspca_dev *gspca_dev)
451{
452 se401_write_req(gspca_dev, SE401_REQ_STOP_CONTINUOUS_CAPTURE, 0, 0);
453 se401_write_req(gspca_dev, SE401_REQ_LED_CONTROL, 0, 0);
454 se401_write_req(gspca_dev, SE401_REQ_CAMERA_POWER, 0, 0);
455}
456
457static void sd_dq_callback(struct gspca_dev *gspca_dev)
458{
459 struct sd *sd = (struct sd *)gspca_dev;
460 unsigned int ahrc, alrc;
461 int oldreset, adjust_dir;
462
463 /* Restart the stream if requested do so by pkt_scan */
464 if (sd->restart_stream) {
465 sd_stopN(gspca_dev);
466 sd_start(gspca_dev);
467 sd->restart_stream = 0;
468 }
469
470 /* Automatically adjust sensor reset level
471 Hyundai have some really nice docs about this and other sensor
472 related stuff on their homepage: www.hei.co.kr */
473 sd->resetlevel_frame_count++;
474 if (sd->resetlevel_frame_count < 20)
475 return;
476
477 /* For some reason this normally read-only register doesn't get reset
478 to zero after reading them just once... */
479 se401_get_feature(gspca_dev, HV7131_REG_HIREFNOH);
480 se401_get_feature(gspca_dev, HV7131_REG_HIREFNOL);
481 se401_get_feature(gspca_dev, HV7131_REG_LOREFNOH);
482 se401_get_feature(gspca_dev, HV7131_REG_LOREFNOL);
483 ahrc = 256*se401_get_feature(gspca_dev, HV7131_REG_HIREFNOH) +
484 se401_get_feature(gspca_dev, HV7131_REG_HIREFNOL);
485 alrc = 256*se401_get_feature(gspca_dev, HV7131_REG_LOREFNOH) +
486 se401_get_feature(gspca_dev, HV7131_REG_LOREFNOL);
487
488 /* Not an exact science, but it seems to work pretty well... */
489 oldreset = sd->resetlevel;
490 if (alrc > 10) {
491 while (alrc >= 10 && sd->resetlevel < 63) {
492 sd->resetlevel++;
493 alrc /= 2;
494 }
495 } else if (ahrc > 20) {
496 while (ahrc >= 20 && sd->resetlevel > 0) {
497 sd->resetlevel--;
498 ahrc /= 2;
499 }
500 }
501 /* Detect ping-pong-ing and halve adjustment to avoid overshoot */
502 if (sd->resetlevel > oldreset)
503 adjust_dir = 1;
504 else
505 adjust_dir = -1;
506 if (sd->resetlevel_adjust_dir &&
507 sd->resetlevel_adjust_dir != adjust_dir)
508 sd->resetlevel = oldreset + (sd->resetlevel - oldreset) / 2;
509
510 if (sd->resetlevel != oldreset) {
511 sd->resetlevel_adjust_dir = adjust_dir;
512 se401_set_feature(gspca_dev, HV7131_REG_ARLV, sd->resetlevel);
513 }
514
515 sd->resetlevel_frame_count = 0;
516}
517
518static void sd_complete_frame(struct gspca_dev *gspca_dev, u8 *data, int len)
519{
520 struct sd *sd = (struct sd *)gspca_dev;
521
522 switch (sd->expo_change_state) {
523 case EXPO_CHANGED:
524 /* The exposure was changed while this frame
525 was being send, so this frame is ok */
526 sd->expo_change_state = EXPO_DROP_FRAME;
527 break;
528 case EXPO_DROP_FRAME:
529 /* The exposure was changed while this frame
530 was being captured, drop it! */
531 gspca_dev->last_packet_type = DISCARD_PACKET;
532 sd->expo_change_state = EXPO_NO_CHANGE;
533 break;
534 case EXPO_NO_CHANGE:
535 break;
536 }
537 gspca_frame_add(gspca_dev, LAST_PACKET, data, len);
538}
539
540static void sd_pkt_scan_janggu(struct gspca_dev *gspca_dev, u8 *data, int len)
541{
542 struct sd *sd = (struct sd *)gspca_dev;
543 int imagesize = gspca_dev->width * gspca_dev->height;
544 int i, plen, bits, pixels, info, count;
545
546 if (sd->restart_stream)
547 return;
548
549 /* Sometimes a 1024 bytes garbage bulk packet is send between frames */
550 if (gspca_dev->last_packet_type == LAST_PACKET && len == 1024) {
551 gspca_dev->last_packet_type = DISCARD_PACKET;
552 return;
553 }
554
555 i = 0;
556 while (i < len) {
557 /* Read header if not already be present from prev bulk pkt */
558 if (sd->packet_read < 4) {
559 count = 4 - sd->packet_read;
560 if (count > len - i)
561 count = len - i;
562 memcpy(&sd->packet[sd->packet_read], &data[i], count);
563 sd->packet_read += count;
564 i += count;
565 if (sd->packet_read < 4)
566 break;
567 }
568 bits = sd->packet[3] + (sd->packet[2] << 8);
569 pixels = sd->packet[1] + ((sd->packet[0] & 0x3f) << 8);
570 info = (sd->packet[0] & 0xc0) >> 6;
571 plen = ((bits + 47) >> 4) << 1;
572 /* Sanity checks */
573 if (plen > 1024) {
574 err("invalid packet len %d restarting stream", plen);
575 goto error;
576 }
577 if (info == 3) {
578 err("unknown frame info value restarting stream");
579 goto error;
580 }
581
582 /* Read (remainder of) packet contents */
583 count = plen - sd->packet_read;
584 if (count > len - i)
585 count = len - i;
586 memcpy(&sd->packet[sd->packet_read], &data[i], count);
587 sd->packet_read += count;
588 i += count;
589 if (sd->packet_read < plen)
590 break;
591
592 sd->pixels_read += pixels;
593 sd->packet_read = 0;
594
595 switch (info) {
596 case 0: /* Frame data */
597 gspca_frame_add(gspca_dev, INTER_PACKET, sd->packet,
598 plen);
599 break;
600 case 1: /* EOF */
601 if (sd->pixels_read != imagesize) {
602 err("frame size %d expected %d",
603 sd->pixels_read, imagesize);
604 goto error;
605 }
606 sd_complete_frame(gspca_dev, sd->packet, plen);
607 return; /* Discard the rest of the bulk packet !! */
608 case 2: /* SOF */
609 gspca_frame_add(gspca_dev, FIRST_PACKET, sd->packet,
610 plen);
611 sd->pixels_read = pixels;
612 break;
613 }
614 }
615 return;
616
617error:
618 sd->restart_stream = 1;
619 /* Give userspace a 0 bytes frame, so our dq callback gets
620 called and it can restart the stream */
621 gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0);
622 gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
623}
624
625static void sd_pkt_scan_bayer(struct gspca_dev *gspca_dev, u8 *data, int len)
626{
627 struct cam *cam = &gspca_dev->cam;
628 int imagesize = cam->cam_mode[gspca_dev->curr_mode].sizeimage;
629
630 if (gspca_dev->image_len == 0) {
631 gspca_frame_add(gspca_dev, FIRST_PACKET, data, len);
632 return;
633 }
634
635 if (gspca_dev->image_len + len >= imagesize) {
636 sd_complete_frame(gspca_dev, data, len);
637 return;
638 }
639
640 gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
641}
642
643static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len)
644{
645 int mult = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
646
647 if (len == 0)
648 return;
649
650 if (mult == 1) /* mult == 1 means raw bayer */
651 sd_pkt_scan_bayer(gspca_dev, data, len);
652 else
653 sd_pkt_scan_janggu(gspca_dev, data, len);
654}
655
656static int sd_querymenu(struct gspca_dev *gspca_dev,
657 struct v4l2_querymenu *menu)
658{
659 switch (menu->id) {
660 case V4L2_CID_POWER_LINE_FREQUENCY:
661 switch (menu->index) {
662 case V4L2_CID_POWER_LINE_FREQUENCY_DISABLED:
663 strcpy((char *) menu->name, "NoFliker");
664 return 0;
665 case V4L2_CID_POWER_LINE_FREQUENCY_50HZ:
666 strcpy((char *) menu->name, "50 Hz");
667 return 0;
668 case V4L2_CID_POWER_LINE_FREQUENCY_60HZ:
669 strcpy((char *) menu->name, "60 Hz");
670 return 0;
671 }
672 break;
673 }
674 return -EINVAL;
675}
676
677#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
678static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len)
679{
680 struct sd *sd = (struct sd *)gspca_dev;
681 u8 state;
682
683 if (len != 2)
684 return -EINVAL;
685
686 switch (data[0]) {
687 case 0:
688 case 1:
689 state = data[0];
690 break;
691 default:
692 return -EINVAL;
693 }
694 if (sd->button_state != state) {
695 input_report_key(gspca_dev->input_dev, KEY_CAMERA, state);
696 input_sync(gspca_dev->input_dev);
697 sd->button_state = state;
698 }
699
700 return 0;
701}
702#endif
703
704/* sub-driver description */
705static const struct sd_desc sd_desc = {
706 .name = MODULE_NAME,
707 .ctrls = sd_ctrls,
708 .nctrls = ARRAY_SIZE(sd_ctrls),
709 .config = sd_config,
710 .init = sd_init,
711 .start = sd_start,
712 .stopN = sd_stopN,
713 .dq_callback = sd_dq_callback,
714 .pkt_scan = sd_pkt_scan,
715 .querymenu = sd_querymenu,
716#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
717 .int_pkt_scan = sd_int_pkt_scan,
718#endif
719};
720
721/* -- module initialisation -- */
722static const struct usb_device_id device_table[] = {
723 {USB_DEVICE(0x03e8, 0x0004)}, /* Endpoints/Aox SE401 */
724 {USB_DEVICE(0x0471, 0x030b)}, /* Philips PCVC665K */
725 {USB_DEVICE(0x047d, 0x5001)}, /* Kensington 67014 */
726 {USB_DEVICE(0x047d, 0x5002)}, /* Kensington 6701(5/7) */
727 {USB_DEVICE(0x047d, 0x5003)}, /* Kensington 67016 */
728 {}
729};
730MODULE_DEVICE_TABLE(usb, device_table);
731
732/* -- device connect -- */
733static int sd_probe(struct usb_interface *intf,
734 const struct usb_device_id *id)
735{
736 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
737 THIS_MODULE);
738}
739
740static int sd_pre_reset(struct usb_interface *intf)
741{
742 return 0;
743}
744
745static int sd_post_reset(struct usb_interface *intf)
746{
747 return 0;
748}
749
750static struct usb_driver sd_driver = {
751 .name = MODULE_NAME,
752 .id_table = device_table,
753 .probe = sd_probe,
754 .disconnect = gspca_disconnect,
755#ifdef CONFIG_PM
756 .suspend = gspca_suspend,
757 .resume = gspca_resume,
758#endif
759 .pre_reset = sd_pre_reset,
760 .post_reset = sd_post_reset,
761};
762
763/* -- module insert / remove -- */
764static int __init sd_mod_init(void)
765{
766 return usb_register(&sd_driver);
767}
768static void __exit sd_mod_exit(void)
769{
770 usb_deregister(&sd_driver);
771}
772
773module_init(sd_mod_init);
774module_exit(sd_mod_exit);
diff --git a/drivers/media/video/gspca/se401.h b/drivers/media/video/gspca/se401.h
new file mode 100644
index 000000000000..96d8ebf3cf59
--- /dev/null
+++ b/drivers/media/video/gspca/se401.h
@@ -0,0 +1,90 @@
1/*
2 * GSPCA Endpoints (formerly known as AOX) se401 USB Camera sub Driver
3 *
4 * Copyright (C) 2011 Hans de Goede <hdegoede@redhat.com>
5 *
6 * Based on the v4l1 se401 driver which is:
7 *
8 * Copyright (c) 2000 Jeroen B. Vreeken (pe1rxq@amsat.org)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 */
25
26#define SE401_REQ_GET_CAMERA_DESCRIPTOR 0x06
27#define SE401_REQ_START_CONTINUOUS_CAPTURE 0x41
28#define SE401_REQ_STOP_CONTINUOUS_CAPTURE 0x42
29#define SE401_REQ_CAPTURE_FRAME 0x43
30#define SE401_REQ_GET_BRT 0x44
31#define SE401_REQ_SET_BRT 0x45
32#define SE401_REQ_GET_WIDTH 0x4c
33#define SE401_REQ_SET_WIDTH 0x4d
34#define SE401_REQ_GET_HEIGHT 0x4e
35#define SE401_REQ_SET_HEIGHT 0x4f
36#define SE401_REQ_GET_OUTPUT_MODE 0x50
37#define SE401_REQ_SET_OUTPUT_MODE 0x51
38#define SE401_REQ_GET_EXT_FEATURE 0x52
39#define SE401_REQ_SET_EXT_FEATURE 0x53
40#define SE401_REQ_CAMERA_POWER 0x56
41#define SE401_REQ_LED_CONTROL 0x57
42#define SE401_REQ_BIOS 0xff
43
44#define SE401_BIOS_READ 0x07
45
46#define SE401_FORMAT_BAYER 0x40
47
48/* Hyundai hv7131b registers
49 7121 and 7141 should be the same (haven't really checked...) */
50/* Mode registers: */
51#define HV7131_REG_MODE_A 0x00
52#define HV7131_REG_MODE_B 0x01
53#define HV7131_REG_MODE_C 0x02
54/* Frame registers: */
55#define HV7131_REG_FRSU 0x10
56#define HV7131_REG_FRSL 0x11
57#define HV7131_REG_FCSU 0x12
58#define HV7131_REG_FCSL 0x13
59#define HV7131_REG_FWHU 0x14
60#define HV7131_REG_FWHL 0x15
61#define HV7131_REG_FWWU 0x16
62#define HV7131_REG_FWWL 0x17
63/* Timing registers: */
64#define HV7131_REG_THBU 0x20
65#define HV7131_REG_THBL 0x21
66#define HV7131_REG_TVBU 0x22
67#define HV7131_REG_TVBL 0x23
68#define HV7131_REG_TITU 0x25
69#define HV7131_REG_TITM 0x26
70#define HV7131_REG_TITL 0x27
71#define HV7131_REG_TMCD 0x28
72/* Adjust Registers: */
73#define HV7131_REG_ARLV 0x30
74#define HV7131_REG_ARCG 0x31
75#define HV7131_REG_AGCG 0x32
76#define HV7131_REG_ABCG 0x33
77#define HV7131_REG_APBV 0x34
78#define HV7131_REG_ASLP 0x54
79/* Offset Registers: */
80#define HV7131_REG_OFSR 0x50
81#define HV7131_REG_OFSG 0x51
82#define HV7131_REG_OFSB 0x52
83/* REset level statistics registers: */
84#define HV7131_REG_LOREFNOH 0x57
85#define HV7131_REG_LOREFNOL 0x58
86#define HV7131_REG_HIREFNOH 0x59
87#define HV7131_REG_HIREFNOL 0x5a
88
89/* se401 registers */
90#define SE401_OPERATINGMODE 0x2000
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index b089c0d3ee9f..6ec232902183 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -247,7 +247,6 @@ static const struct cmd spca504A_clicksmart420_init_data[] = {
247 {0x30, 0x0004, 0x000a}, 247 {0x30, 0x0004, 0x000a},
248 {0xb0, 0x0001, 0x0000}, 248 {0xb0, 0x0001, 0x0000},
249 249
250
251 {0xa1, 0x0080, 0x0001}, 250 {0xa1, 0x0080, 0x0001},
252 {0x30, 0x0049, 0x0000}, 251 {0x30, 0x0049, 0x0000},
253 {0x30, 0x0060, 0x0005}, 252 {0x30, 0x0060, 0x0005},
@@ -256,8 +255,6 @@ static const struct cmd spca504A_clicksmart420_init_data[] = {
256 {0x00, 0x0000, 0x2000}, 255 {0x00, 0x0000, 0x2000},
257 {0x00, 0x0013, 0x2301}, 256 {0x00, 0x0013, 0x2301},
258 {0x00, 0x0003, 0x2000}, 257 {0x00, 0x0003, 0x2000},
259 {0x00, 0x0000, 0x2000},
260
261}; 258};
262 259
263/* clicksmart 420 open data ? */ 260/* clicksmart 420 open data ? */
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index 7e762d551099..d1d733b9359b 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -1387,7 +1387,7 @@ static int sd_querymenu(struct gspca_dev *gspca_dev,
1387 return 0; 1387 return 0;
1388 case V4L2_CID_EFFECTS: 1388 case V4L2_CID_EFFECTS:
1389 if ((unsigned) menu->index < ARRAY_SIZE(effects_control)) { 1389 if ((unsigned) menu->index < ARRAY_SIZE(effects_control)) {
1390 strncpy((char *) menu->name, 1390 strlcpy((char *) menu->name,
1391 effects_control[menu->index], 1391 effects_control[menu->index],
1392 sizeof menu->name); 1392 sizeof menu->name);
1393 return 0; 1393 return 0;
diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c
index a27d93b503a5..441dacf642bb 100644
--- a/drivers/media/video/hdpvr/hdpvr-core.c
+++ b/drivers/media/video/hdpvr/hdpvr-core.c
@@ -17,7 +17,7 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/uaccess.h> 19#include <linux/uaccess.h>
20#include <asm/atomic.h> 20#include <linux/atomic.h>
21#include <linux/usb.h> 21#include <linux/usb.h>
22#include <linux/mutex.h> 22#include <linux/mutex.h>
23#include <linux/i2c.h> 23#include <linux/i2c.h>
@@ -474,5 +474,6 @@ module_init(hdpvr_init);
474module_exit(hdpvr_exit); 474module_exit(hdpvr_exit);
475 475
476MODULE_LICENSE("GPL"); 476MODULE_LICENSE("GPL");
477MODULE_VERSION("0.2.1");
477MODULE_AUTHOR("Janne Grunau"); 478MODULE_AUTHOR("Janne Grunau");
478MODULE_DESCRIPTION("Hauppauge HD PVR driver"); 479MODULE_DESCRIPTION("Hauppauge HD PVR driver");
diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c
index 514aea76eaa5..087f7c08cb85 100644
--- a/drivers/media/video/hdpvr/hdpvr-video.c
+++ b/drivers/media/video/hdpvr/hdpvr-video.c
@@ -17,7 +17,6 @@
17#include <linux/uaccess.h> 17#include <linux/uaccess.h>
18#include <linux/usb.h> 18#include <linux/usb.h>
19#include <linux/mutex.h> 19#include <linux/mutex.h>
20#include <linux/version.h>
21#include <linux/workqueue.h> 20#include <linux/workqueue.h>
22 21
23#include <linux/videodev2.h> 22#include <linux/videodev2.h>
@@ -574,7 +573,6 @@ static int vidioc_querycap(struct file *file, void *priv,
574 strcpy(cap->driver, "hdpvr"); 573 strcpy(cap->driver, "hdpvr");
575 strcpy(cap->card, "Hauppauge HD PVR"); 574 strcpy(cap->card, "Hauppauge HD PVR");
576 usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info)); 575 usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
577 cap->version = HDPVR_VERSION;
578 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | 576 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
579 V4L2_CAP_AUDIO | 577 V4L2_CAP_AUDIO |
580 V4L2_CAP_READWRITE; 578 V4L2_CAP_READWRITE;
diff --git a/drivers/media/video/hdpvr/hdpvr.h b/drivers/media/video/hdpvr/hdpvr.h
index 072f23c570f3..d6439db1d18b 100644
--- a/drivers/media/video/hdpvr/hdpvr.h
+++ b/drivers/media/video/hdpvr/hdpvr.h
@@ -18,12 +18,6 @@
18#include <media/v4l2-device.h> 18#include <media/v4l2-device.h>
19#include <media/ir-kbd-i2c.h> 19#include <media/ir-kbd-i2c.h>
20 20
21#define HDPVR_MAJOR_VERSION 0
22#define HDPVR_MINOR_VERSION 2
23#define HDPVR_RELEASE 0
24#define HDPVR_VERSION \
25 KERNEL_VERSION(HDPVR_MAJOR_VERSION, HDPVR_MINOR_VERSION, HDPVR_RELEASE)
26
27#define HDPVR_MAX 8 21#define HDPVR_MAX 8
28#define HDPVR_I2C_MAX_SIZE 128 22#define HDPVR_I2C_MAX_SIZE 128
29 23
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index 84bdf0f42a8e..8f9cc17b518e 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -36,7 +36,6 @@
36 * using information provided by Jiun-Kuei Jung @ AVerMedia. 36 * using information provided by Jiun-Kuei Jung @ AVerMedia.
37 */ 37 */
38 38
39#include <linux/version.h>
40#include <linux/module.h> 39#include <linux/module.h>
41#include <linux/init.h> 40#include <linux/init.h>
42#include <linux/delay.h> 41#include <linux/delay.h>
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index a7f54b010a5c..38f052257f46 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -722,8 +722,8 @@ unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
722 722
723 /* If there are subscribed events, then only use the new event 723 /* If there are subscribed events, then only use the new event
724 API instead of the old video.h based API. */ 724 API instead of the old video.h based API. */
725 if (!list_empty(&id->fh.events->subscribed)) { 725 if (!list_empty(&id->fh.subscribed)) {
726 poll_wait(filp, &id->fh.events->wait, wait); 726 poll_wait(filp, &id->fh.wait, wait);
727 /* Turn off the old-style vsync events */ 727 /* Turn off the old-style vsync events */
728 clear_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags); 728 clear_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags);
729 if (v4l2_event_pending(&id->fh)) 729 if (v4l2_event_pending(&id->fh))
@@ -750,6 +750,7 @@ unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait)
750 struct ivtv *itv = id->itv; 750 struct ivtv *itv = id->itv;
751 struct ivtv_stream *s = &itv->streams[id->type]; 751 struct ivtv_stream *s = &itv->streams[id->type];
752 int eof = test_bit(IVTV_F_S_STREAMOFF, &s->s_flags); 752 int eof = test_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
753 unsigned res = 0;
753 754
754 /* Start a capture if there is none */ 755 /* Start a capture if there is none */
755 if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) { 756 if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
@@ -769,12 +770,16 @@ unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait)
769 /* add stream's waitq to the poll list */ 770 /* add stream's waitq to the poll list */
770 IVTV_DEBUG_HI_FILE("Encoder poll\n"); 771 IVTV_DEBUG_HI_FILE("Encoder poll\n");
771 poll_wait(filp, &s->waitq, wait); 772 poll_wait(filp, &s->waitq, wait);
773 if (v4l2_event_pending(&id->fh))
774 res |= POLLPRI;
775 else
776 poll_wait(filp, &id->fh.wait, wait);
772 777
773 if (s->q_full.length || s->q_io.length) 778 if (s->q_full.length || s->q_io.length)
774 return POLLIN | POLLRDNORM; 779 return res | POLLIN | POLLRDNORM;
775 if (eof) 780 if (eof)
776 return POLLHUP; 781 return res | POLLHUP;
777 return 0; 782 return res;
778} 783}
779 784
780void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end) 785void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end)
@@ -961,10 +966,6 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
961 return -ENOMEM; 966 return -ENOMEM;
962 } 967 }
963 v4l2_fh_init(&item->fh, s->vdev); 968 v4l2_fh_init(&item->fh, s->vdev);
964 if (s->type == IVTV_DEC_STREAM_TYPE_YUV ||
965 s->type == IVTV_DEC_STREAM_TYPE_MPG) {
966 res = v4l2_event_alloc(&item->fh, 60);
967 }
968 if (res < 0) { 969 if (res < 0) {
969 v4l2_fh_exit(&item->fh); 970 v4l2_fh_exit(&item->fh);
970 kfree(item); 971 kfree(item);
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 120c7d8e0895..3e5c090af112 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -757,7 +757,6 @@ static int ivtv_querycap(struct file *file, void *fh, struct v4l2_capability *vc
757 strlcpy(vcap->driver, IVTV_DRIVER_NAME, sizeof(vcap->driver)); 757 strlcpy(vcap->driver, IVTV_DRIVER_NAME, sizeof(vcap->driver));
758 strlcpy(vcap->card, itv->card_name, sizeof(vcap->card)); 758 strlcpy(vcap->card, itv->card_name, sizeof(vcap->card));
759 snprintf(vcap->bus_info, sizeof(vcap->bus_info), "PCI:%s", pci_name(itv->pdev)); 759 snprintf(vcap->bus_info, sizeof(vcap->bus_info), "PCI:%s", pci_name(itv->pdev));
760 vcap->version = IVTV_DRIVER_VERSION; /* version */
761 vcap->capabilities = itv->v4l2_cap; /* capabilities */ 760 vcap->capabilities = itv->v4l2_cap; /* capabilities */
762 return 0; 761 return 0;
763} 762}
@@ -1451,11 +1450,11 @@ static int ivtv_subscribe_event(struct v4l2_fh *fh, struct v4l2_event_subscripti
1451 switch (sub->type) { 1450 switch (sub->type) {
1452 case V4L2_EVENT_VSYNC: 1451 case V4L2_EVENT_VSYNC:
1453 case V4L2_EVENT_EOS: 1452 case V4L2_EVENT_EOS:
1454 break; 1453 case V4L2_EVENT_CTRL:
1454 return v4l2_event_subscribe(fh, sub, 0);
1455 default: 1455 default:
1456 return -EINVAL; 1456 return -EINVAL;
1457 } 1457 }
1458 return v4l2_event_subscribe(fh, sub);
1459} 1458}
1460 1459
1461static int ivtv_log_status(struct file *file, void *fh) 1460static int ivtv_log_status(struct file *file, void *fh)
diff --git a/drivers/media/video/ivtv/ivtv-version.h b/drivers/media/video/ivtv/ivtv-version.h
index b67a4048f5aa..a20f346fcad8 100644
--- a/drivers/media/video/ivtv/ivtv-version.h
+++ b/drivers/media/video/ivtv/ivtv-version.h
@@ -21,11 +21,6 @@
21#define IVTV_VERSION_H 21#define IVTV_VERSION_H
22 22
23#define IVTV_DRIVER_NAME "ivtv" 23#define IVTV_DRIVER_NAME "ivtv"
24#define IVTV_DRIVER_VERSION_MAJOR 1 24#define IVTV_VERSION "1.4.3"
25#define IVTV_DRIVER_VERSION_MINOR 4
26#define IVTV_DRIVER_VERSION_PATCHLEVEL 2
27
28#define IVTV_VERSION __stringify(IVTV_DRIVER_VERSION_MAJOR) "." __stringify(IVTV_DRIVER_VERSION_MINOR) "." __stringify(IVTV_DRIVER_VERSION_PATCHLEVEL)
29#define IVTV_DRIVER_VERSION KERNEL_VERSION(IVTV_DRIVER_VERSION_MAJOR,IVTV_DRIVER_VERSION_MINOR,IVTV_DRIVER_VERSION_PATCHLEVEL)
30 25
31#endif 26#endif
diff --git a/drivers/media/video/m5mols/m5mols_capture.c b/drivers/media/video/m5mols/m5mols_capture.c
index a45d8f098e02..3248ac805711 100644
--- a/drivers/media/video/m5mols/m5mols_capture.c
+++ b/drivers/media/video/m5mols/m5mols_capture.c
@@ -18,7 +18,6 @@
18#include <linux/irq.h> 18#include <linux/irq.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/version.h>
22#include <linux/gpio.h> 21#include <linux/gpio.h>
23#include <linux/regulator/consumer.h> 22#include <linux/regulator/consumer.h>
24#include <linux/videodev2.h> 23#include <linux/videodev2.h>
diff --git a/drivers/media/video/m5mols/m5mols_core.c b/drivers/media/video/m5mols/m5mols_core.c
index 43c68f51c5ce..fb8e4a7a9dd2 100644
--- a/drivers/media/video/m5mols/m5mols_core.c
+++ b/drivers/media/video/m5mols/m5mols_core.c
@@ -18,7 +18,6 @@
18#include <linux/irq.h> 18#include <linux/irq.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/version.h>
22#include <linux/gpio.h> 21#include <linux/gpio.h>
23#include <linux/regulator/consumer.h> 22#include <linux/regulator/consumer.h>
24#include <linux/videodev2.h> 23#include <linux/videodev2.h>
diff --git a/drivers/media/video/marvell-ccic/Kconfig b/drivers/media/video/marvell-ccic/Kconfig
new file mode 100644
index 000000000000..bf739e3b3398
--- /dev/null
+++ b/drivers/media/video/marvell-ccic/Kconfig
@@ -0,0 +1,23 @@
1config VIDEO_CAFE_CCIC
2 tristate "Marvell 88ALP01 (Cafe) CMOS Camera Controller support"
3 depends on PCI && I2C && VIDEO_V4L2
4 select VIDEO_OV7670
5 select VIDEOBUF2_VMALLOC
6 select VIDEOBUF2_DMA_CONTIG
7 ---help---
8 This is a video4linux2 driver for the Marvell 88ALP01 integrated
9 CMOS camera controller. This is the controller found on first-
10 generation OLPC systems.
11
12config VIDEO_MMP_CAMERA
13 tristate "Marvell Armada 610 integrated camera controller support"
14 depends on ARCH_MMP && I2C && VIDEO_V4L2
15 select VIDEO_OV7670
16 select I2C_GPIO
17 select VIDEOBUF2_DMA_SG
18 ---help---
19 This is a Video4Linux2 driver for the integrated camera
20 controller found on Marvell Armada 610 application
21 processors (and likely beyond). This is the controller found
22 in OLPC XO 1.75 systems.
23
diff --git a/drivers/media/video/marvell-ccic/Makefile b/drivers/media/video/marvell-ccic/Makefile
new file mode 100644
index 000000000000..05a792c579a2
--- /dev/null
+++ b/drivers/media/video/marvell-ccic/Makefile
@@ -0,0 +1,6 @@
1obj-$(CONFIG_VIDEO_CAFE_CCIC) += cafe_ccic.o
2cafe_ccic-y := cafe-driver.o mcam-core.o
3
4obj-$(CONFIG_VIDEO_MMP_CAMERA) += mmp_camera.o
5mmp_camera-y := mmp-driver.o mcam-core.o
6
diff --git a/drivers/media/video/marvell-ccic/cafe-driver.c b/drivers/media/video/marvell-ccic/cafe-driver.c
new file mode 100644
index 000000000000..d030f9beae88
--- /dev/null
+++ b/drivers/media/video/marvell-ccic/cafe-driver.c
@@ -0,0 +1,654 @@
1/*
2 * A driver for the CMOS camera controller in the Marvell 88ALP01 "cafe"
3 * multifunction chip. Currently works with the Omnivision OV7670
4 * sensor.
5 *
6 * The data sheet for this device can be found at:
7 * http://www.marvell.com/products/pc_connectivity/88alp01/
8 *
9 * Copyright 2006-11 One Laptop Per Child Association, Inc.
10 * Copyright 2006-11 Jonathan Corbet <corbet@lwn.net>
11 *
12 * Written by Jonathan Corbet, corbet@lwn.net.
13 *
14 * v4l2_device/v4l2_subdev conversion by:
15 * Copyright (C) 2009 Hans Verkuil <hverkuil@xs4all.nl>
16 *
17 * This file may be distributed under the terms of the GNU General
18 * Public License, version 2.
19 */
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/pci.h>
24#include <linux/i2c.h>
25#include <linux/interrupt.h>
26#include <linux/spinlock.h>
27#include <linux/slab.h>
28#include <linux/videodev2.h>
29#include <media/v4l2-device.h>
30#include <media/v4l2-chip-ident.h>
31#include <linux/device.h>
32#include <linux/wait.h>
33#include <linux/delay.h>
34#include <linux/io.h>
35
36#include "mcam-core.h"
37
38#define CAFE_VERSION 0x000002
39
40
41/*
42 * Parameters.
43 */
44MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
45MODULE_DESCRIPTION("Marvell 88ALP01 CMOS Camera Controller driver");
46MODULE_LICENSE("GPL");
47MODULE_SUPPORTED_DEVICE("Video");
48
49
50
51
52struct cafe_camera {
53 int registered; /* Fully initialized? */
54 struct mcam_camera mcam;
55 struct pci_dev *pdev;
56 wait_queue_head_t smbus_wait; /* Waiting on i2c events */
57};
58
59/*
60 * Most of the camera controller registers are defined in mcam-core.h,
61 * but the Cafe platform has some additional registers of its own;
62 * they are described here.
63 */
64
65/*
66 * "General purpose register" has a couple of GPIOs used for sensor
67 * power and reset on OLPC XO 1.0 systems.
68 */
69#define REG_GPR 0xb4
70#define GPR_C1EN 0x00000020 /* Pad 1 (power down) enable */
71#define GPR_C0EN 0x00000010 /* Pad 0 (reset) enable */
72#define GPR_C1 0x00000002 /* Control 1 value */
73/*
74 * Control 0 is wired to reset on OLPC machines. For ov7x sensors,
75 * it is active low.
76 */
77#define GPR_C0 0x00000001 /* Control 0 value */
78
79/*
80 * These registers control the SMBUS module for communicating
81 * with the sensor.
82 */
83#define REG_TWSIC0 0xb8 /* TWSI (smbus) control 0 */
84#define TWSIC0_EN 0x00000001 /* TWSI enable */
85#define TWSIC0_MODE 0x00000002 /* 1 = 16-bit, 0 = 8-bit */
86#define TWSIC0_SID 0x000003fc /* Slave ID */
87/*
88 * Subtle trickery: the slave ID field starts with bit 2. But the
89 * Linux i2c stack wants to treat the bottommost bit as a separate
90 * read/write bit, which is why slave ID's are usually presented
91 * >>1. For consistency with that behavior, we shift over three
92 * bits instead of two.
93 */
94#define TWSIC0_SID_SHIFT 3
95#define TWSIC0_CLKDIV 0x0007fc00 /* Clock divider */
96#define TWSIC0_MASKACK 0x00400000 /* Mask ack from sensor */
97#define TWSIC0_OVMAGIC 0x00800000 /* Make it work on OV sensors */
98
99#define REG_TWSIC1 0xbc /* TWSI control 1 */
100#define TWSIC1_DATA 0x0000ffff /* Data to/from camchip */
101#define TWSIC1_ADDR 0x00ff0000 /* Address (register) */
102#define TWSIC1_ADDR_SHIFT 16
103#define TWSIC1_READ 0x01000000 /* Set for read op */
104#define TWSIC1_WSTAT 0x02000000 /* Write status */
105#define TWSIC1_RVALID 0x04000000 /* Read data valid */
106#define TWSIC1_ERROR 0x08000000 /* Something screwed up */
107
108/*
109 * Here's the weird global control registers
110 */
111#define REG_GL_CSR 0x3004 /* Control/status register */
112#define GCSR_SRS 0x00000001 /* SW Reset set */
113#define GCSR_SRC 0x00000002 /* SW Reset clear */
114#define GCSR_MRS 0x00000004 /* Master reset set */
115#define GCSR_MRC 0x00000008 /* HW Reset clear */
116#define GCSR_CCIC_EN 0x00004000 /* CCIC Clock enable */
117#define REG_GL_IMASK 0x300c /* Interrupt mask register */
118#define GIMSK_CCIC_EN 0x00000004 /* CCIC Interrupt enable */
119
120#define REG_GL_FCR 0x3038 /* GPIO functional control register */
121#define GFCR_GPIO_ON 0x08 /* Camera GPIO enabled */
122#define REG_GL_GPIOR 0x315c /* GPIO register */
123#define GGPIO_OUT 0x80000 /* GPIO output */
124#define GGPIO_VAL 0x00008 /* Output pin value */
125
126#define REG_LEN (REG_GL_IMASK + 4)
127
128
129/*
130 * Debugging and related.
131 */
132#define cam_err(cam, fmt, arg...) \
133 dev_err(&(cam)->pdev->dev, fmt, ##arg);
134#define cam_warn(cam, fmt, arg...) \
135 dev_warn(&(cam)->pdev->dev, fmt, ##arg);
136
137/* -------------------------------------------------------------------- */
138/*
139 * The I2C/SMBUS interface to the camera itself starts here. The
140 * controller handles SMBUS itself, presenting a relatively simple register
141 * interface; all we have to do is to tell it where to route the data.
142 */
143#define CAFE_SMBUS_TIMEOUT (HZ) /* generous */
144
145static inline struct cafe_camera *to_cam(struct v4l2_device *dev)
146{
147 struct mcam_camera *m = container_of(dev, struct mcam_camera, v4l2_dev);
148 return container_of(m, struct cafe_camera, mcam);
149}
150
151
152static int cafe_smbus_write_done(struct mcam_camera *mcam)
153{
154 unsigned long flags;
155 int c1;
156
157 /*
158 * We must delay after the interrupt, or the controller gets confused
159 * and never does give us good status. Fortunately, we don't do this
160 * often.
161 */
162 udelay(20);
163 spin_lock_irqsave(&mcam->dev_lock, flags);
164 c1 = mcam_reg_read(mcam, REG_TWSIC1);
165 spin_unlock_irqrestore(&mcam->dev_lock, flags);
166 return (c1 & (TWSIC1_WSTAT|TWSIC1_ERROR)) != TWSIC1_WSTAT;
167}
168
169static int cafe_smbus_write_data(struct cafe_camera *cam,
170 u16 addr, u8 command, u8 value)
171{
172 unsigned int rval;
173 unsigned long flags;
174 struct mcam_camera *mcam = &cam->mcam;
175
176 spin_lock_irqsave(&mcam->dev_lock, flags);
177 rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
178 rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
179 /*
180 * Marvell sez set clkdiv to all 1's for now.
181 */
182 rval |= TWSIC0_CLKDIV;
183 mcam_reg_write(mcam, REG_TWSIC0, rval);
184 (void) mcam_reg_read(mcam, REG_TWSIC1); /* force write */
185 rval = value | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
186 mcam_reg_write(mcam, REG_TWSIC1, rval);
187 spin_unlock_irqrestore(&mcam->dev_lock, flags);
188
189 /* Unfortunately, reading TWSIC1 too soon after sending a command
190 * causes the device to die.
191 * Use a busy-wait because we often send a large quantity of small
192 * commands at-once; using msleep() would cause a lot of context
193 * switches which take longer than 2ms, resulting in a noticeable
194 * boot-time and capture-start delays.
195 */
196 mdelay(2);
197
198 /*
199 * Another sad fact is that sometimes, commands silently complete but
200 * cafe_smbus_write_done() never becomes aware of this.
201 * This happens at random and appears to possible occur with any
202 * command.
203 * We don't understand why this is. We work around this issue
204 * with the timeout in the wait below, assuming that all commands
205 * complete within the timeout.
206 */
207 wait_event_timeout(cam->smbus_wait, cafe_smbus_write_done(mcam),
208 CAFE_SMBUS_TIMEOUT);
209
210 spin_lock_irqsave(&mcam->dev_lock, flags);
211 rval = mcam_reg_read(mcam, REG_TWSIC1);
212 spin_unlock_irqrestore(&mcam->dev_lock, flags);
213
214 if (rval & TWSIC1_WSTAT) {
215 cam_err(cam, "SMBUS write (%02x/%02x/%02x) timed out\n", addr,
216 command, value);
217 return -EIO;
218 }
219 if (rval & TWSIC1_ERROR) {
220 cam_err(cam, "SMBUS write (%02x/%02x/%02x) error\n", addr,
221 command, value);
222 return -EIO;
223 }
224 return 0;
225}
226
227
228
229static int cafe_smbus_read_done(struct mcam_camera *mcam)
230{
231 unsigned long flags;
232 int c1;
233
234 /*
235 * We must delay after the interrupt, or the controller gets confused
236 * and never does give us good status. Fortunately, we don't do this
237 * often.
238 */
239 udelay(20);
240 spin_lock_irqsave(&mcam->dev_lock, flags);
241 c1 = mcam_reg_read(mcam, REG_TWSIC1);
242 spin_unlock_irqrestore(&mcam->dev_lock, flags);
243 return c1 & (TWSIC1_RVALID|TWSIC1_ERROR);
244}
245
246
247
248static int cafe_smbus_read_data(struct cafe_camera *cam,
249 u16 addr, u8 command, u8 *value)
250{
251 unsigned int rval;
252 unsigned long flags;
253 struct mcam_camera *mcam = &cam->mcam;
254
255 spin_lock_irqsave(&mcam->dev_lock, flags);
256 rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
257 rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
258 /*
259 * Marvel sez set clkdiv to all 1's for now.
260 */
261 rval |= TWSIC0_CLKDIV;
262 mcam_reg_write(mcam, REG_TWSIC0, rval);
263 (void) mcam_reg_read(mcam, REG_TWSIC1); /* force write */
264 rval = TWSIC1_READ | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
265 mcam_reg_write(mcam, REG_TWSIC1, rval);
266 spin_unlock_irqrestore(&mcam->dev_lock, flags);
267
268 wait_event_timeout(cam->smbus_wait,
269 cafe_smbus_read_done(mcam), CAFE_SMBUS_TIMEOUT);
270 spin_lock_irqsave(&mcam->dev_lock, flags);
271 rval = mcam_reg_read(mcam, REG_TWSIC1);
272 spin_unlock_irqrestore(&mcam->dev_lock, flags);
273
274 if (rval & TWSIC1_ERROR) {
275 cam_err(cam, "SMBUS read (%02x/%02x) error\n", addr, command);
276 return -EIO;
277 }
278 if (!(rval & TWSIC1_RVALID)) {
279 cam_err(cam, "SMBUS read (%02x/%02x) timed out\n", addr,
280 command);
281 return -EIO;
282 }
283 *value = rval & 0xff;
284 return 0;
285}
286
287/*
288 * Perform a transfer over SMBUS. This thing is called under
289 * the i2c bus lock, so we shouldn't race with ourselves...
290 */
291static int cafe_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
292 unsigned short flags, char rw, u8 command,
293 int size, union i2c_smbus_data *data)
294{
295 struct cafe_camera *cam = i2c_get_adapdata(adapter);
296 int ret = -EINVAL;
297
298 /*
299 * This interface would appear to only do byte data ops. OK
300 * it can do word too, but the cam chip has no use for that.
301 */
302 if (size != I2C_SMBUS_BYTE_DATA) {
303 cam_err(cam, "funky xfer size %d\n", size);
304 return -EINVAL;
305 }
306
307 if (rw == I2C_SMBUS_WRITE)
308 ret = cafe_smbus_write_data(cam, addr, command, data->byte);
309 else if (rw == I2C_SMBUS_READ)
310 ret = cafe_smbus_read_data(cam, addr, command, &data->byte);
311 return ret;
312}
313
314
315static void cafe_smbus_enable_irq(struct cafe_camera *cam)
316{
317 unsigned long flags;
318
319 spin_lock_irqsave(&cam->mcam.dev_lock, flags);
320 mcam_reg_set_bit(&cam->mcam, REG_IRQMASK, TWSIIRQS);
321 spin_unlock_irqrestore(&cam->mcam.dev_lock, flags);
322}
323
324static u32 cafe_smbus_func(struct i2c_adapter *adapter)
325{
326 return I2C_FUNC_SMBUS_READ_BYTE_DATA |
327 I2C_FUNC_SMBUS_WRITE_BYTE_DATA;
328}
329
330static struct i2c_algorithm cafe_smbus_algo = {
331 .smbus_xfer = cafe_smbus_xfer,
332 .functionality = cafe_smbus_func
333};
334
335static int cafe_smbus_setup(struct cafe_camera *cam)
336{
337 struct i2c_adapter *adap;
338 int ret;
339
340 adap = kzalloc(sizeof(*adap), GFP_KERNEL);
341 if (adap == NULL)
342 return -ENOMEM;
343 cam->mcam.i2c_adapter = adap;
344 cafe_smbus_enable_irq(cam);
345 adap->owner = THIS_MODULE;
346 adap->algo = &cafe_smbus_algo;
347 strcpy(adap->name, "cafe_ccic");
348 adap->dev.parent = &cam->pdev->dev;
349 i2c_set_adapdata(adap, cam);
350 ret = i2c_add_adapter(adap);
351 if (ret)
352 printk(KERN_ERR "Unable to register cafe i2c adapter\n");
353 return ret;
354}
355
356static void cafe_smbus_shutdown(struct cafe_camera *cam)
357{
358 i2c_del_adapter(cam->mcam.i2c_adapter);
359 kfree(cam->mcam.i2c_adapter);
360}
361
362
363/*
364 * Controller-level stuff
365 */
366
367static void cafe_ctlr_init(struct mcam_camera *mcam)
368{
369 unsigned long flags;
370
371 spin_lock_irqsave(&mcam->dev_lock, flags);
372 /*
373 * Added magic to bring up the hardware on the B-Test board
374 */
375 mcam_reg_write(mcam, 0x3038, 0x8);
376 mcam_reg_write(mcam, 0x315c, 0x80008);
377 /*
378 * Go through the dance needed to wake the device up.
379 * Note that these registers are global and shared
380 * with the NAND and SD devices. Interaction between the
381 * three still needs to be examined.
382 */
383 mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRS|GCSR_MRS); /* Needed? */
384 mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRC|GCSR_MRC);
385 mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRC|GCSR_MRS);
386 /*
387 * Here we must wait a bit for the controller to come around.
388 */
389 spin_unlock_irqrestore(&mcam->dev_lock, flags);
390 msleep(5);
391 spin_lock_irqsave(&mcam->dev_lock, flags);
392
393 mcam_reg_write(mcam, REG_GL_CSR, GCSR_CCIC_EN|GCSR_SRC|GCSR_MRC);
394 mcam_reg_set_bit(mcam, REG_GL_IMASK, GIMSK_CCIC_EN);
395 /*
396 * Mask all interrupts.
397 */
398 mcam_reg_write(mcam, REG_IRQMASK, 0);
399 spin_unlock_irqrestore(&mcam->dev_lock, flags);
400}
401
402
403static void cafe_ctlr_power_up(struct mcam_camera *mcam)
404{
405 /*
406 * Part one of the sensor dance: turn the global
407 * GPIO signal on.
408 */
409 mcam_reg_write(mcam, REG_GL_FCR, GFCR_GPIO_ON);
410 mcam_reg_write(mcam, REG_GL_GPIOR, GGPIO_OUT|GGPIO_VAL);
411 /*
412 * Put the sensor into operational mode (assumes OLPC-style
413 * wiring). Control 0 is reset - set to 1 to operate.
414 * Control 1 is power down, set to 0 to operate.
415 */
416 mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN); /* pwr up, reset */
417 mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C0);
418}
419
420static void cafe_ctlr_power_down(struct mcam_camera *mcam)
421{
422 mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C1);
423 mcam_reg_write(mcam, REG_GL_FCR, GFCR_GPIO_ON);
424 mcam_reg_write(mcam, REG_GL_GPIOR, GGPIO_OUT);
425}
426
427
428
429/*
430 * The platform interrupt handler.
431 */
432static irqreturn_t cafe_irq(int irq, void *data)
433{
434 struct cafe_camera *cam = data;
435 struct mcam_camera *mcam = &cam->mcam;
436 unsigned int irqs, handled;
437
438 spin_lock(&mcam->dev_lock);
439 irqs = mcam_reg_read(mcam, REG_IRQSTAT);
440 handled = cam->registered && mccic_irq(mcam, irqs);
441 if (irqs & TWSIIRQS) {
442 mcam_reg_write(mcam, REG_IRQSTAT, TWSIIRQS);
443 wake_up(&cam->smbus_wait);
444 handled = 1;
445 }
446 spin_unlock(&mcam->dev_lock);
447 return IRQ_RETVAL(handled);
448}
449
450
451/* -------------------------------------------------------------------------- */
452/*
453 * PCI interface stuff.
454 */
455
456static int cafe_pci_probe(struct pci_dev *pdev,
457 const struct pci_device_id *id)
458{
459 int ret;
460 struct cafe_camera *cam;
461 struct mcam_camera *mcam;
462
463 /*
464 * Start putting together one of our big camera structures.
465 */
466 ret = -ENOMEM;
467 cam = kzalloc(sizeof(struct cafe_camera), GFP_KERNEL);
468 if (cam == NULL)
469 goto out;
470 cam->pdev = pdev;
471 mcam = &cam->mcam;
472 mcam->chip_id = V4L2_IDENT_CAFE;
473 spin_lock_init(&mcam->dev_lock);
474 init_waitqueue_head(&cam->smbus_wait);
475 mcam->plat_power_up = cafe_ctlr_power_up;
476 mcam->plat_power_down = cafe_ctlr_power_down;
477 mcam->dev = &pdev->dev;
478 /*
479 * Set the clock speed for the XO 1; I don't believe this
480 * driver has ever run anywhere else.
481 */
482 mcam->clock_speed = 45;
483 mcam->use_smbus = 1;
484 /*
485 * Vmalloc mode for buffers is traditional with this driver.
486 * We *might* be able to run DMA_contig, especially on a system
487 * with CMA in it.
488 */
489 mcam->buffer_mode = B_vmalloc;
490 /*
491 * Get set up on the PCI bus.
492 */
493 ret = pci_enable_device(pdev);
494 if (ret)
495 goto out_free;
496 pci_set_master(pdev);
497
498 ret = -EIO;
499 mcam->regs = pci_iomap(pdev, 0, 0);
500 if (!mcam->regs) {
501 printk(KERN_ERR "Unable to ioremap cafe-ccic regs\n");
502 goto out_disable;
503 }
504 ret = request_irq(pdev->irq, cafe_irq, IRQF_SHARED, "cafe-ccic", cam);
505 if (ret)
506 goto out_iounmap;
507
508 /*
509 * Initialize the controller and leave it powered up. It will
510 * stay that way until the sensor driver shows up.
511 */
512 cafe_ctlr_init(mcam);
513 cafe_ctlr_power_up(mcam);
514 /*
515 * Set up I2C/SMBUS communications. We have to drop the mutex here
516 * because the sensor could attach in this call chain, leading to
517 * unsightly deadlocks.
518 */
519 ret = cafe_smbus_setup(cam);
520 if (ret)
521 goto out_pdown;
522
523 ret = mccic_register(mcam);
524 if (ret == 0) {
525 cam->registered = 1;
526 return 0;
527 }
528
529 cafe_smbus_shutdown(cam);
530out_pdown:
531 cafe_ctlr_power_down(mcam);
532 free_irq(pdev->irq, cam);
533out_iounmap:
534 pci_iounmap(pdev, mcam->regs);
535out_disable:
536 pci_disable_device(pdev);
537out_free:
538 kfree(cam);
539out:
540 return ret;
541}
542
543
544/*
545 * Shut down an initialized device
546 */
547static void cafe_shutdown(struct cafe_camera *cam)
548{
549 mccic_shutdown(&cam->mcam);
550 cafe_smbus_shutdown(cam);
551 free_irq(cam->pdev->irq, cam);
552 pci_iounmap(cam->pdev, cam->mcam.regs);
553}
554
555
556static void cafe_pci_remove(struct pci_dev *pdev)
557{
558 struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
559 struct cafe_camera *cam = to_cam(v4l2_dev);
560
561 if (cam == NULL) {
562 printk(KERN_WARNING "pci_remove on unknown pdev %p\n", pdev);
563 return;
564 }
565 cafe_shutdown(cam);
566 kfree(cam);
567}
568
569
570#ifdef CONFIG_PM
571/*
572 * Basic power management.
573 */
574static int cafe_pci_suspend(struct pci_dev *pdev, pm_message_t state)
575{
576 struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
577 struct cafe_camera *cam = to_cam(v4l2_dev);
578 int ret;
579
580 ret = pci_save_state(pdev);
581 if (ret)
582 return ret;
583 mccic_suspend(&cam->mcam);
584 pci_disable_device(pdev);
585 return 0;
586}
587
588
589static int cafe_pci_resume(struct pci_dev *pdev)
590{
591 struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
592 struct cafe_camera *cam = to_cam(v4l2_dev);
593 int ret = 0;
594
595 pci_restore_state(pdev);
596 ret = pci_enable_device(pdev);
597
598 if (ret) {
599 cam_warn(cam, "Unable to re-enable device on resume!\n");
600 return ret;
601 }
602 cafe_ctlr_init(&cam->mcam);
603 return mccic_resume(&cam->mcam);
604}
605
606#endif /* CONFIG_PM */
607
608static struct pci_device_id cafe_ids[] = {
609 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL,
610 PCI_DEVICE_ID_MARVELL_88ALP01_CCIC) },
611 { 0, }
612};
613
614MODULE_DEVICE_TABLE(pci, cafe_ids);
615
616static struct pci_driver cafe_pci_driver = {
617 .name = "cafe1000-ccic",
618 .id_table = cafe_ids,
619 .probe = cafe_pci_probe,
620 .remove = cafe_pci_remove,
621#ifdef CONFIG_PM
622 .suspend = cafe_pci_suspend,
623 .resume = cafe_pci_resume,
624#endif
625};
626
627
628
629
630static int __init cafe_init(void)
631{
632 int ret;
633
634 printk(KERN_NOTICE "Marvell M88ALP01 'CAFE' Camera Controller version %d\n",
635 CAFE_VERSION);
636 ret = pci_register_driver(&cafe_pci_driver);
637 if (ret) {
638 printk(KERN_ERR "Unable to register cafe_ccic driver\n");
639 goto out;
640 }
641 ret = 0;
642
643out:
644 return ret;
645}
646
647
648static void __exit cafe_exit(void)
649{
650 pci_unregister_driver(&cafe_pci_driver);
651}
652
653module_init(cafe_init);
654module_exit(cafe_exit);
diff --git a/drivers/media/video/marvell-ccic/mcam-core.c b/drivers/media/video/marvell-ccic/mcam-core.c
new file mode 100644
index 000000000000..83c14514cd52
--- /dev/null
+++ b/drivers/media/video/marvell-ccic/mcam-core.c
@@ -0,0 +1,1843 @@
1/*
2 * The Marvell camera core. This device appears in a number of settings,
3 * so it needs platform-specific support outside of the core.
4 *
5 * Copyright 2011 Jonathan Corbet corbet@lwn.net
6 */
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/fs.h>
10#include <linux/mm.h>
11#include <linux/i2c.h>
12#include <linux/interrupt.h>
13#include <linux/spinlock.h>
14#include <linux/slab.h>
15#include <linux/device.h>
16#include <linux/wait.h>
17#include <linux/list.h>
18#include <linux/dma-mapping.h>
19#include <linux/delay.h>
20#include <linux/vmalloc.h>
21#include <linux/io.h>
22#include <linux/videodev2.h>
23#include <media/v4l2-device.h>
24#include <media/v4l2-ioctl.h>
25#include <media/v4l2-chip-ident.h>
26#include <media/ov7670.h>
27#include <media/videobuf2-vmalloc.h>
28#include <media/videobuf2-dma-contig.h>
29#include <media/videobuf2-dma-sg.h>
30
31#include "mcam-core.h"
32
33/*
34 * Basic frame stats - to be deleted shortly
35 */
36static int frames;
37static int singles;
38static int delivered;
39
40#ifdef MCAM_MODE_VMALLOC
41/*
42 * Internal DMA buffer management. Since the controller cannot do S/G I/O,
43 * we must have physically contiguous buffers to bring frames into.
44 * These parameters control how many buffers we use, whether we
45 * allocate them at load time (better chance of success, but nails down
46 * memory) or when somebody tries to use the camera (riskier), and,
47 * for load-time allocation, how big they should be.
48 *
49 * The controller can cycle through three buffers. We could use
50 * more by flipping pointers around, but it probably makes little
51 * sense.
52 */
53
54static int alloc_bufs_at_read;
55module_param(alloc_bufs_at_read, bool, 0444);
56MODULE_PARM_DESC(alloc_bufs_at_read,
57 "Non-zero value causes DMA buffers to be allocated when the "
58 "video capture device is read, rather than at module load "
59 "time. This saves memory, but decreases the chances of "
60 "successfully getting those buffers. This parameter is "
61 "only used in the vmalloc buffer mode");
62
63static int n_dma_bufs = 3;
64module_param(n_dma_bufs, uint, 0644);
65MODULE_PARM_DESC(n_dma_bufs,
66 "The number of DMA buffers to allocate. Can be either two "
67 "(saves memory, makes timing tighter) or three.");
68
69static int dma_buf_size = VGA_WIDTH * VGA_HEIGHT * 2; /* Worst case */
70module_param(dma_buf_size, uint, 0444);
71MODULE_PARM_DESC(dma_buf_size,
72 "The size of the allocated DMA buffers. If actual operating "
73 "parameters require larger buffers, an attempt to reallocate "
74 "will be made.");
75#else /* MCAM_MODE_VMALLOC */
76static const int alloc_bufs_at_read = 0;
77static const int n_dma_bufs = 3; /* Used by S/G_PARM */
78#endif /* MCAM_MODE_VMALLOC */
79
80static int flip;
81module_param(flip, bool, 0444);
82MODULE_PARM_DESC(flip,
83 "If set, the sensor will be instructed to flip the image "
84 "vertically.");
85
86static int buffer_mode = -1;
87module_param(buffer_mode, int, 0444);
88MODULE_PARM_DESC(buffer_mode,
89 "Set the buffer mode to be used; default is to go with what "
90 "the platform driver asks for. Set to 0 for vmalloc, 1 for "
91 "DMA contiguous.");
92
93/*
94 * Status flags. Always manipulated with bit operations.
95 */
96#define CF_BUF0_VALID 0 /* Buffers valid - first three */
97#define CF_BUF1_VALID 1
98#define CF_BUF2_VALID 2
99#define CF_DMA_ACTIVE 3 /* A frame is incoming */
100#define CF_CONFIG_NEEDED 4 /* Must configure hardware */
101#define CF_SINGLE_BUFFER 5 /* Running with a single buffer */
102#define CF_SG_RESTART 6 /* SG restart needed */
103
104#define sensor_call(cam, o, f, args...) \
105 v4l2_subdev_call(cam->sensor, o, f, ##args)
106
107static struct mcam_format_struct {
108 __u8 *desc;
109 __u32 pixelformat;
110 int bpp; /* Bytes per pixel */
111 enum v4l2_mbus_pixelcode mbus_code;
112} mcam_formats[] = {
113 {
114 .desc = "YUYV 4:2:2",
115 .pixelformat = V4L2_PIX_FMT_YUYV,
116 .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
117 .bpp = 2,
118 },
119 {
120 .desc = "RGB 444",
121 .pixelformat = V4L2_PIX_FMT_RGB444,
122 .mbus_code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE,
123 .bpp = 2,
124 },
125 {
126 .desc = "RGB 565",
127 .pixelformat = V4L2_PIX_FMT_RGB565,
128 .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE,
129 .bpp = 2,
130 },
131 {
132 .desc = "Raw RGB Bayer",
133 .pixelformat = V4L2_PIX_FMT_SBGGR8,
134 .mbus_code = V4L2_MBUS_FMT_SBGGR8_1X8,
135 .bpp = 1
136 },
137};
138#define N_MCAM_FMTS ARRAY_SIZE(mcam_formats)
139
140static struct mcam_format_struct *mcam_find_format(u32 pixelformat)
141{
142 unsigned i;
143
144 for (i = 0; i < N_MCAM_FMTS; i++)
145 if (mcam_formats[i].pixelformat == pixelformat)
146 return mcam_formats + i;
147 /* Not found? Then return the first format. */
148 return mcam_formats;
149}
150
151/*
152 * The default format we use until somebody says otherwise.
153 */
154static const struct v4l2_pix_format mcam_def_pix_format = {
155 .width = VGA_WIDTH,
156 .height = VGA_HEIGHT,
157 .pixelformat = V4L2_PIX_FMT_YUYV,
158 .field = V4L2_FIELD_NONE,
159 .bytesperline = VGA_WIDTH*2,
160 .sizeimage = VGA_WIDTH*VGA_HEIGHT*2,
161};
162
163static const enum v4l2_mbus_pixelcode mcam_def_mbus_code =
164 V4L2_MBUS_FMT_YUYV8_2X8;
165
166
167/*
168 * The two-word DMA descriptor format used by the Armada 610 and like. There
169 * Is a three-word format as well (set C1_DESC_3WORD) where the third
170 * word is a pointer to the next descriptor, but we don't use it. Two-word
171 * descriptors have to be contiguous in memory.
172 */
173struct mcam_dma_desc {
174 u32 dma_addr;
175 u32 segment_len;
176};
177
178/*
179 * Our buffer type for working with videobuf2. Note that the vb2
180 * developers have decreed that struct vb2_buffer must be at the
181 * beginning of this structure.
182 */
183struct mcam_vb_buffer {
184 struct vb2_buffer vb_buf;
185 struct list_head queue;
186 struct mcam_dma_desc *dma_desc; /* Descriptor virtual address */
187 dma_addr_t dma_desc_pa; /* Descriptor physical address */
188 int dma_desc_nent; /* Number of mapped descriptors */
189};
190
191static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_buffer *vb)
192{
193 return container_of(vb, struct mcam_vb_buffer, vb_buf);
194}
195
196/*
197 * Hand a completed buffer back to user space.
198 */
199static void mcam_buffer_done(struct mcam_camera *cam, int frame,
200 struct vb2_buffer *vbuf)
201{
202 vbuf->v4l2_buf.bytesused = cam->pix_format.sizeimage;
203 vbuf->v4l2_buf.sequence = cam->buf_seq[frame];
204 vb2_set_plane_payload(vbuf, 0, cam->pix_format.sizeimage);
205 vb2_buffer_done(vbuf, VB2_BUF_STATE_DONE);
206}
207
208
209
210/*
211 * Debugging and related.
212 */
213#define cam_err(cam, fmt, arg...) \
214 dev_err((cam)->dev, fmt, ##arg);
215#define cam_warn(cam, fmt, arg...) \
216 dev_warn((cam)->dev, fmt, ##arg);
217#define cam_dbg(cam, fmt, arg...) \
218 dev_dbg((cam)->dev, fmt, ##arg);
219
220
221/*
222 * Flag manipulation helpers
223 */
224static void mcam_reset_buffers(struct mcam_camera *cam)
225{
226 int i;
227
228 cam->next_buf = -1;
229 for (i = 0; i < cam->nbufs; i++)
230 clear_bit(i, &cam->flags);
231}
232
233static inline int mcam_needs_config(struct mcam_camera *cam)
234{
235 return test_bit(CF_CONFIG_NEEDED, &cam->flags);
236}
237
238static void mcam_set_config_needed(struct mcam_camera *cam, int needed)
239{
240 if (needed)
241 set_bit(CF_CONFIG_NEEDED, &cam->flags);
242 else
243 clear_bit(CF_CONFIG_NEEDED, &cam->flags);
244}
245
246/* ------------------------------------------------------------------- */
247/*
248 * Make the controller start grabbing images. Everything must
249 * be set up before doing this.
250 */
251static void mcam_ctlr_start(struct mcam_camera *cam)
252{
253 /* set_bit performs a read, so no other barrier should be
254 needed here */
255 mcam_reg_set_bit(cam, REG_CTRL0, C0_ENABLE);
256}
257
258static void mcam_ctlr_stop(struct mcam_camera *cam)
259{
260 mcam_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
261}
262
263/* ------------------------------------------------------------------- */
264
265#ifdef MCAM_MODE_VMALLOC
266/*
267 * Code specific to the vmalloc buffer mode.
268 */
269
270/*
271 * Allocate in-kernel DMA buffers for vmalloc mode.
272 */
273static int mcam_alloc_dma_bufs(struct mcam_camera *cam, int loadtime)
274{
275 int i;
276
277 mcam_set_config_needed(cam, 1);
278 if (loadtime)
279 cam->dma_buf_size = dma_buf_size;
280 else
281 cam->dma_buf_size = cam->pix_format.sizeimage;
282 if (n_dma_bufs > 3)
283 n_dma_bufs = 3;
284
285 cam->nbufs = 0;
286 for (i = 0; i < n_dma_bufs; i++) {
287 cam->dma_bufs[i] = dma_alloc_coherent(cam->dev,
288 cam->dma_buf_size, cam->dma_handles + i,
289 GFP_KERNEL);
290 if (cam->dma_bufs[i] == NULL) {
291 cam_warn(cam, "Failed to allocate DMA buffer\n");
292 break;
293 }
294 (cam->nbufs)++;
295 }
296
297 switch (cam->nbufs) {
298 case 1:
299 dma_free_coherent(cam->dev, cam->dma_buf_size,
300 cam->dma_bufs[0], cam->dma_handles[0]);
301 cam->nbufs = 0;
302 case 0:
303 cam_err(cam, "Insufficient DMA buffers, cannot operate\n");
304 return -ENOMEM;
305
306 case 2:
307 if (n_dma_bufs > 2)
308 cam_warn(cam, "Will limp along with only 2 buffers\n");
309 break;
310 }
311 return 0;
312}
313
314static void mcam_free_dma_bufs(struct mcam_camera *cam)
315{
316 int i;
317
318 for (i = 0; i < cam->nbufs; i++) {
319 dma_free_coherent(cam->dev, cam->dma_buf_size,
320 cam->dma_bufs[i], cam->dma_handles[i]);
321 cam->dma_bufs[i] = NULL;
322 }
323 cam->nbufs = 0;
324}
325
326
327/*
328 * Set up DMA buffers when operating in vmalloc mode
329 */
330static void mcam_ctlr_dma_vmalloc(struct mcam_camera *cam)
331{
332 /*
333 * Store the first two Y buffers (we aren't supporting
334 * planar formats for now, so no UV bufs). Then either
335 * set the third if it exists, or tell the controller
336 * to just use two.
337 */
338 mcam_reg_write(cam, REG_Y0BAR, cam->dma_handles[0]);
339 mcam_reg_write(cam, REG_Y1BAR, cam->dma_handles[1]);
340 if (cam->nbufs > 2) {
341 mcam_reg_write(cam, REG_Y2BAR, cam->dma_handles[2]);
342 mcam_reg_clear_bit(cam, REG_CTRL1, C1_TWOBUFS);
343 } else
344 mcam_reg_set_bit(cam, REG_CTRL1, C1_TWOBUFS);
345 if (cam->chip_id == V4L2_IDENT_CAFE)
346 mcam_reg_write(cam, REG_UBAR, 0); /* 32 bits only */
347}
348
349/*
350 * Copy data out to user space in the vmalloc case
351 */
352static void mcam_frame_tasklet(unsigned long data)
353{
354 struct mcam_camera *cam = (struct mcam_camera *) data;
355 int i;
356 unsigned long flags;
357 struct mcam_vb_buffer *buf;
358
359 spin_lock_irqsave(&cam->dev_lock, flags);
360 for (i = 0; i < cam->nbufs; i++) {
361 int bufno = cam->next_buf;
362
363 if (cam->state != S_STREAMING || bufno < 0)
364 break; /* I/O got stopped */
365 if (++(cam->next_buf) >= cam->nbufs)
366 cam->next_buf = 0;
367 if (!test_bit(bufno, &cam->flags))
368 continue;
369 if (list_empty(&cam->buffers)) {
370 singles++;
371 break; /* Leave it valid, hope for better later */
372 }
373 delivered++;
374 clear_bit(bufno, &cam->flags);
375 buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer,
376 queue);
377 list_del_init(&buf->queue);
378 /*
379 * Drop the lock during the big copy. This *should* be safe...
380 */
381 spin_unlock_irqrestore(&cam->dev_lock, flags);
382 memcpy(vb2_plane_vaddr(&buf->vb_buf, 0), cam->dma_bufs[bufno],
383 cam->pix_format.sizeimage);
384 mcam_buffer_done(cam, bufno, &buf->vb_buf);
385 spin_lock_irqsave(&cam->dev_lock, flags);
386 }
387 spin_unlock_irqrestore(&cam->dev_lock, flags);
388}
389
390
391/*
392 * Make sure our allocated buffers are up to the task.
393 */
394static int mcam_check_dma_buffers(struct mcam_camera *cam)
395{
396 if (cam->nbufs > 0 && cam->dma_buf_size < cam->pix_format.sizeimage)
397 mcam_free_dma_bufs(cam);
398 if (cam->nbufs == 0)
399 return mcam_alloc_dma_bufs(cam, 0);
400 return 0;
401}
402
403static void mcam_vmalloc_done(struct mcam_camera *cam, int frame)
404{
405 tasklet_schedule(&cam->s_tasklet);
406}
407
408#else /* MCAM_MODE_VMALLOC */
409
410static inline int mcam_alloc_dma_bufs(struct mcam_camera *cam, int loadtime)
411{
412 return 0;
413}
414
415static inline void mcam_free_dma_bufs(struct mcam_camera *cam)
416{
417 return;
418}
419
420static inline int mcam_check_dma_buffers(struct mcam_camera *cam)
421{
422 return 0;
423}
424
425
426
427#endif /* MCAM_MODE_VMALLOC */
428
429
430#ifdef MCAM_MODE_DMA_CONTIG
431/* ---------------------------------------------------------------------- */
432/*
433 * DMA-contiguous code.
434 */
435/*
436 * Set up a contiguous buffer for the given frame. Here also is where
437 * the underrun strategy is set: if there is no buffer available, reuse
438 * the buffer from the other BAR and set the CF_SINGLE_BUFFER flag to
439 * keep the interrupt handler from giving that buffer back to user
440 * space. In this way, we always have a buffer to DMA to and don't
441 * have to try to play games stopping and restarting the controller.
442 */
443static void mcam_set_contig_buffer(struct mcam_camera *cam, int frame)
444{
445 struct mcam_vb_buffer *buf;
446 /*
447 * If there are no available buffers, go into single mode
448 */
449 if (list_empty(&cam->buffers)) {
450 buf = cam->vb_bufs[frame ^ 0x1];
451 cam->vb_bufs[frame] = buf;
452 mcam_reg_write(cam, frame == 0 ? REG_Y0BAR : REG_Y1BAR,
453 vb2_dma_contig_plane_paddr(&buf->vb_buf, 0));
454 set_bit(CF_SINGLE_BUFFER, &cam->flags);
455 singles++;
456 return;
457 }
458 /*
459 * OK, we have a buffer we can use.
460 */
461 buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
462 list_del_init(&buf->queue);
463 mcam_reg_write(cam, frame == 0 ? REG_Y0BAR : REG_Y1BAR,
464 vb2_dma_contig_plane_paddr(&buf->vb_buf, 0));
465 cam->vb_bufs[frame] = buf;
466 clear_bit(CF_SINGLE_BUFFER, &cam->flags);
467}
468
469/*
470 * Initial B_DMA_contig setup.
471 */
472static void mcam_ctlr_dma_contig(struct mcam_camera *cam)
473{
474 mcam_reg_set_bit(cam, REG_CTRL1, C1_TWOBUFS);
475 cam->nbufs = 2;
476 mcam_set_contig_buffer(cam, 0);
477 mcam_set_contig_buffer(cam, 1);
478}
479
480/*
481 * Frame completion handling.
482 */
483static void mcam_dma_contig_done(struct mcam_camera *cam, int frame)
484{
485 struct mcam_vb_buffer *buf = cam->vb_bufs[frame];
486
487 if (!test_bit(CF_SINGLE_BUFFER, &cam->flags)) {
488 delivered++;
489 mcam_buffer_done(cam, frame, &buf->vb_buf);
490 }
491 mcam_set_contig_buffer(cam, frame);
492}
493
494#endif /* MCAM_MODE_DMA_CONTIG */
495
496#ifdef MCAM_MODE_DMA_SG
497/* ---------------------------------------------------------------------- */
498/*
499 * Scatter/gather-specific code.
500 */
501
502/*
503 * Set up the next buffer for S/G I/O; caller should be sure that
504 * the controller is stopped and a buffer is available.
505 */
506static void mcam_sg_next_buffer(struct mcam_camera *cam)
507{
508 struct mcam_vb_buffer *buf;
509
510 buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
511 list_del_init(&buf->queue);
512 mcam_reg_write(cam, REG_DMA_DESC_Y, buf->dma_desc_pa);
513 mcam_reg_write(cam, REG_DESC_LEN_Y,
514 buf->dma_desc_nent*sizeof(struct mcam_dma_desc));
515 mcam_reg_write(cam, REG_DESC_LEN_U, 0);
516 mcam_reg_write(cam, REG_DESC_LEN_V, 0);
517 cam->vb_bufs[0] = buf;
518}
519
520/*
521 * Initial B_DMA_sg setup
522 */
523static void mcam_ctlr_dma_sg(struct mcam_camera *cam)
524{
525 mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_3WORD);
526 mcam_sg_next_buffer(cam);
527 mcam_reg_set_bit(cam, REG_CTRL1, C1_DESC_ENA);
528 cam->nbufs = 3;
529}
530
531
532/*
533 * Frame completion with S/G is trickier. We can't muck with
534 * a descriptor chain on the fly, since the controller buffers it
535 * internally. So we have to actually stop and restart; Marvell
536 * says this is the way to do it.
537 *
538 * Of course, stopping is easier said than done; experience shows
539 * that the controller can start a frame *after* C0_ENABLE has been
540 * cleared. So when running in S/G mode, the controller is "stopped"
541 * on receipt of the start-of-frame interrupt. That means we can
542 * safely change the DMA descriptor array here and restart things
543 * (assuming there's another buffer waiting to go).
544 */
545static void mcam_dma_sg_done(struct mcam_camera *cam, int frame)
546{
547 struct mcam_vb_buffer *buf = cam->vb_bufs[0];
548
549 /*
550 * Very Bad Not Good Things happen if you don't clear
551 * C1_DESC_ENA before making any descriptor changes.
552 */
553 mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_ENA);
554 /*
555 * If we have another buffer available, put it in and
556 * restart the engine.
557 */
558 if (!list_empty(&cam->buffers)) {
559 mcam_sg_next_buffer(cam);
560 mcam_reg_set_bit(cam, REG_CTRL1, C1_DESC_ENA);
561 mcam_ctlr_start(cam);
562 /*
563 * Otherwise set CF_SG_RESTART and the controller will
564 * be restarted once another buffer shows up.
565 */
566 } else {
567 set_bit(CF_SG_RESTART, &cam->flags);
568 singles++;
569 }
570 /*
571 * Now we can give the completed frame back to user space.
572 */
573 delivered++;
574 mcam_buffer_done(cam, frame, &buf->vb_buf);
575}
576
577
578/*
579 * Scatter/gather mode requires stopping the controller between
580 * frames so we can put in a new DMA descriptor array. If no new
581 * buffer exists at frame completion, the controller is left stopped;
582 * this function is charged with gettig things going again.
583 */
584static void mcam_sg_restart(struct mcam_camera *cam)
585{
586 mcam_ctlr_dma_sg(cam);
587 mcam_ctlr_start(cam);
588 clear_bit(CF_SG_RESTART, &cam->flags);
589}
590
591#else /* MCAM_MODE_DMA_SG */
592
593static inline void mcam_sg_restart(struct mcam_camera *cam)
594{
595 return;
596}
597
598#endif /* MCAM_MODE_DMA_SG */
599
600/* ---------------------------------------------------------------------- */
601/*
602 * Buffer-mode-independent controller code.
603 */
604
605/*
606 * Image format setup
607 */
608static void mcam_ctlr_image(struct mcam_camera *cam)
609{
610 int imgsz;
611 struct v4l2_pix_format *fmt = &cam->pix_format;
612
613 imgsz = ((fmt->height << IMGSZ_V_SHIFT) & IMGSZ_V_MASK) |
614 (fmt->bytesperline & IMGSZ_H_MASK);
615 mcam_reg_write(cam, REG_IMGSIZE, imgsz);
616 mcam_reg_write(cam, REG_IMGOFFSET, 0);
617 /* YPITCH just drops the last two bits */
618 mcam_reg_write_mask(cam, REG_IMGPITCH, fmt->bytesperline,
619 IMGP_YP_MASK);
620 /*
621 * Tell the controller about the image format we are using.
622 */
623 switch (cam->pix_format.pixelformat) {
624 case V4L2_PIX_FMT_YUYV:
625 mcam_reg_write_mask(cam, REG_CTRL0,
626 C0_DF_YUV|C0_YUV_PACKED|C0_YUVE_YUYV,
627 C0_DF_MASK);
628 break;
629
630 case V4L2_PIX_FMT_RGB444:
631 mcam_reg_write_mask(cam, REG_CTRL0,
632 C0_DF_RGB|C0_RGBF_444|C0_RGB4_XRGB,
633 C0_DF_MASK);
634 /* Alpha value? */
635 break;
636
637 case V4L2_PIX_FMT_RGB565:
638 mcam_reg_write_mask(cam, REG_CTRL0,
639 C0_DF_RGB|C0_RGBF_565|C0_RGB5_BGGR,
640 C0_DF_MASK);
641 break;
642
643 default:
644 cam_err(cam, "Unknown format %x\n", cam->pix_format.pixelformat);
645 break;
646 }
647 /*
648 * Make sure it knows we want to use hsync/vsync.
649 */
650 mcam_reg_write_mask(cam, REG_CTRL0, C0_SIF_HVSYNC,
651 C0_SIFM_MASK);
652}
653
654
655/*
656 * Configure the controller for operation; caller holds the
657 * device mutex.
658 */
659static int mcam_ctlr_configure(struct mcam_camera *cam)
660{
661 unsigned long flags;
662
663 spin_lock_irqsave(&cam->dev_lock, flags);
664 cam->dma_setup(cam);
665 mcam_ctlr_image(cam);
666 mcam_set_config_needed(cam, 0);
667 clear_bit(CF_SG_RESTART, &cam->flags);
668 spin_unlock_irqrestore(&cam->dev_lock, flags);
669 return 0;
670}
671
672static void mcam_ctlr_irq_enable(struct mcam_camera *cam)
673{
674 /*
675 * Clear any pending interrupts, since we do not
676 * expect to have I/O active prior to enabling.
677 */
678 mcam_reg_write(cam, REG_IRQSTAT, FRAMEIRQS);
679 mcam_reg_set_bit(cam, REG_IRQMASK, FRAMEIRQS);
680}
681
682static void mcam_ctlr_irq_disable(struct mcam_camera *cam)
683{
684 mcam_reg_clear_bit(cam, REG_IRQMASK, FRAMEIRQS);
685}
686
687
688
689static void mcam_ctlr_init(struct mcam_camera *cam)
690{
691 unsigned long flags;
692
693 spin_lock_irqsave(&cam->dev_lock, flags);
694 /*
695 * Make sure it's not powered down.
696 */
697 mcam_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
698 /*
699 * Turn off the enable bit. It sure should be off anyway,
700 * but it's good to be sure.
701 */
702 mcam_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
703 /*
704 * Clock the sensor appropriately. Controller clock should
705 * be 48MHz, sensor "typical" value is half that.
706 */
707 mcam_reg_write_mask(cam, REG_CLKCTRL, 2, CLK_DIV_MASK);
708 spin_unlock_irqrestore(&cam->dev_lock, flags);
709}
710
711
712/*
713 * Stop the controller, and don't return until we're really sure that no
714 * further DMA is going on.
715 */
716static void mcam_ctlr_stop_dma(struct mcam_camera *cam)
717{
718 unsigned long flags;
719
720 /*
721 * Theory: stop the camera controller (whether it is operating
722 * or not). Delay briefly just in case we race with the SOF
723 * interrupt, then wait until no DMA is active.
724 */
725 spin_lock_irqsave(&cam->dev_lock, flags);
726 clear_bit(CF_SG_RESTART, &cam->flags);
727 mcam_ctlr_stop(cam);
728 cam->state = S_IDLE;
729 spin_unlock_irqrestore(&cam->dev_lock, flags);
730 msleep(40);
731 if (test_bit(CF_DMA_ACTIVE, &cam->flags))
732 cam_err(cam, "Timeout waiting for DMA to end\n");
733 /* This would be bad news - what now? */
734 spin_lock_irqsave(&cam->dev_lock, flags);
735 mcam_ctlr_irq_disable(cam);
736 spin_unlock_irqrestore(&cam->dev_lock, flags);
737}
738
739/*
740 * Power up and down.
741 */
742static void mcam_ctlr_power_up(struct mcam_camera *cam)
743{
744 unsigned long flags;
745
746 spin_lock_irqsave(&cam->dev_lock, flags);
747 cam->plat_power_up(cam);
748 mcam_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
749 spin_unlock_irqrestore(&cam->dev_lock, flags);
750 msleep(5); /* Just to be sure */
751}
752
753static void mcam_ctlr_power_down(struct mcam_camera *cam)
754{
755 unsigned long flags;
756
757 spin_lock_irqsave(&cam->dev_lock, flags);
758 /*
759 * School of hard knocks department: be sure we do any register
760 * twiddling on the controller *before* calling the platform
761 * power down routine.
762 */
763 mcam_reg_set_bit(cam, REG_CTRL1, C1_PWRDWN);
764 cam->plat_power_down(cam);
765 spin_unlock_irqrestore(&cam->dev_lock, flags);
766}
767
768/* -------------------------------------------------------------------- */
769/*
770 * Communications with the sensor.
771 */
772
773static int __mcam_cam_reset(struct mcam_camera *cam)
774{
775 return sensor_call(cam, core, reset, 0);
776}
777
778/*
779 * We have found the sensor on the i2c. Let's try to have a
780 * conversation.
781 */
782static int mcam_cam_init(struct mcam_camera *cam)
783{
784 struct v4l2_dbg_chip_ident chip;
785 int ret;
786
787 mutex_lock(&cam->s_mutex);
788 if (cam->state != S_NOTREADY)
789 cam_warn(cam, "Cam init with device in funky state %d",
790 cam->state);
791 ret = __mcam_cam_reset(cam);
792 if (ret)
793 goto out;
794 chip.ident = V4L2_IDENT_NONE;
795 chip.match.type = V4L2_CHIP_MATCH_I2C_ADDR;
796 chip.match.addr = cam->sensor_addr;
797 ret = sensor_call(cam, core, g_chip_ident, &chip);
798 if (ret)
799 goto out;
800 cam->sensor_type = chip.ident;
801 if (cam->sensor_type != V4L2_IDENT_OV7670) {
802 cam_err(cam, "Unsupported sensor type 0x%x", cam->sensor_type);
803 ret = -EINVAL;
804 goto out;
805 }
806/* Get/set parameters? */
807 ret = 0;
808 cam->state = S_IDLE;
809out:
810 mcam_ctlr_power_down(cam);
811 mutex_unlock(&cam->s_mutex);
812 return ret;
813}
814
815/*
816 * Configure the sensor to match the parameters we have. Caller should
817 * hold s_mutex
818 */
819static int mcam_cam_set_flip(struct mcam_camera *cam)
820{
821 struct v4l2_control ctrl;
822
823 memset(&ctrl, 0, sizeof(ctrl));
824 ctrl.id = V4L2_CID_VFLIP;
825 ctrl.value = flip;
826 return sensor_call(cam, core, s_ctrl, &ctrl);
827}
828
829
830static int mcam_cam_configure(struct mcam_camera *cam)
831{
832 struct v4l2_mbus_framefmt mbus_fmt;
833 int ret;
834
835 v4l2_fill_mbus_format(&mbus_fmt, &cam->pix_format, cam->mbus_code);
836 ret = sensor_call(cam, core, init, 0);
837 if (ret == 0)
838 ret = sensor_call(cam, video, s_mbus_fmt, &mbus_fmt);
839 /*
840 * OV7670 does weird things if flip is set *before* format...
841 */
842 ret += mcam_cam_set_flip(cam);
843 return ret;
844}
845
846/*
847 * Get everything ready, and start grabbing frames.
848 */
849static int mcam_read_setup(struct mcam_camera *cam)
850{
851 int ret;
852 unsigned long flags;
853
854 /*
855 * Configuration. If we still don't have DMA buffers,
856 * make one last, desperate attempt.
857 */
858 if (cam->buffer_mode == B_vmalloc && cam->nbufs == 0 &&
859 mcam_alloc_dma_bufs(cam, 0))
860 return -ENOMEM;
861
862 if (mcam_needs_config(cam)) {
863 mcam_cam_configure(cam);
864 ret = mcam_ctlr_configure(cam);
865 if (ret)
866 return ret;
867 }
868
869 /*
870 * Turn it loose.
871 */
872 spin_lock_irqsave(&cam->dev_lock, flags);
873 mcam_reset_buffers(cam);
874 mcam_ctlr_irq_enable(cam);
875 cam->state = S_STREAMING;
876 mcam_ctlr_start(cam);
877 spin_unlock_irqrestore(&cam->dev_lock, flags);
878 return 0;
879}
880
881/* ----------------------------------------------------------------------- */
882/*
883 * Videobuf2 interface code.
884 */
885
886static int mcam_vb_queue_setup(struct vb2_queue *vq, unsigned int *nbufs,
887 unsigned int *num_planes, unsigned long sizes[],
888 void *alloc_ctxs[])
889{
890 struct mcam_camera *cam = vb2_get_drv_priv(vq);
891 int minbufs = (cam->buffer_mode == B_DMA_contig) ? 3 : 2;
892
893 sizes[0] = cam->pix_format.sizeimage;
894 *num_planes = 1; /* Someday we have to support planar formats... */
895 if (*nbufs < minbufs)
896 *nbufs = minbufs;
897 if (cam->buffer_mode == B_DMA_contig)
898 alloc_ctxs[0] = cam->vb_alloc_ctx;
899 return 0;
900}
901
902
903static void mcam_vb_buf_queue(struct vb2_buffer *vb)
904{
905 struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
906 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
907 unsigned long flags;
908 int start;
909
910 spin_lock_irqsave(&cam->dev_lock, flags);
911 start = (cam->state == S_BUFWAIT) && !list_empty(&cam->buffers);
912 list_add(&mvb->queue, &cam->buffers);
913 if (test_bit(CF_SG_RESTART, &cam->flags))
914 mcam_sg_restart(cam);
915 spin_unlock_irqrestore(&cam->dev_lock, flags);
916 if (start)
917 mcam_read_setup(cam);
918}
919
920
921/*
922 * vb2 uses these to release the mutex when waiting in dqbuf. I'm
923 * not actually sure we need to do this (I'm not sure that vb2_dqbuf() needs
924 * to be called with the mutex held), but better safe than sorry.
925 */
926static void mcam_vb_wait_prepare(struct vb2_queue *vq)
927{
928 struct mcam_camera *cam = vb2_get_drv_priv(vq);
929
930 mutex_unlock(&cam->s_mutex);
931}
932
933static void mcam_vb_wait_finish(struct vb2_queue *vq)
934{
935 struct mcam_camera *cam = vb2_get_drv_priv(vq);
936
937 mutex_lock(&cam->s_mutex);
938}
939
940/*
941 * These need to be called with the mutex held from vb2
942 */
943static int mcam_vb_start_streaming(struct vb2_queue *vq)
944{
945 struct mcam_camera *cam = vb2_get_drv_priv(vq);
946
947 if (cam->state != S_IDLE)
948 return -EINVAL;
949 cam->sequence = 0;
950 /*
951 * Videobuf2 sneakily hoards all the buffers and won't
952 * give them to us until *after* streaming starts. But
953 * we can't actually start streaming until we have a
954 * destination. So go into a wait state and hope they
955 * give us buffers soon.
956 */
957 if (cam->buffer_mode != B_vmalloc && list_empty(&cam->buffers)) {
958 cam->state = S_BUFWAIT;
959 return 0;
960 }
961 return mcam_read_setup(cam);
962}
963
964static int mcam_vb_stop_streaming(struct vb2_queue *vq)
965{
966 struct mcam_camera *cam = vb2_get_drv_priv(vq);
967 unsigned long flags;
968
969 if (cam->state == S_BUFWAIT) {
970 /* They never gave us buffers */
971 cam->state = S_IDLE;
972 return 0;
973 }
974 if (cam->state != S_STREAMING)
975 return -EINVAL;
976 mcam_ctlr_stop_dma(cam);
977 /*
978 * VB2 reclaims the buffers, so we need to forget
979 * about them.
980 */
981 spin_lock_irqsave(&cam->dev_lock, flags);
982 INIT_LIST_HEAD(&cam->buffers);
983 spin_unlock_irqrestore(&cam->dev_lock, flags);
984 return 0;
985}
986
987
988static const struct vb2_ops mcam_vb2_ops = {
989 .queue_setup = mcam_vb_queue_setup,
990 .buf_queue = mcam_vb_buf_queue,
991 .start_streaming = mcam_vb_start_streaming,
992 .stop_streaming = mcam_vb_stop_streaming,
993 .wait_prepare = mcam_vb_wait_prepare,
994 .wait_finish = mcam_vb_wait_finish,
995};
996
997
998#ifdef MCAM_MODE_DMA_SG
999/*
1000 * Scatter/gather mode uses all of the above functions plus a
1001 * few extras to deal with DMA mapping.
1002 */
1003static int mcam_vb_sg_buf_init(struct vb2_buffer *vb)
1004{
1005 struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
1006 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
1007 int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1;
1008
1009 mvb->dma_desc = dma_alloc_coherent(cam->dev,
1010 ndesc * sizeof(struct mcam_dma_desc),
1011 &mvb->dma_desc_pa, GFP_KERNEL);
1012 if (mvb->dma_desc == NULL) {
1013 cam_err(cam, "Unable to get DMA descriptor array\n");
1014 return -ENOMEM;
1015 }
1016 return 0;
1017}
1018
1019static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
1020{
1021 struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
1022 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
1023 struct vb2_dma_sg_desc *sgd = vb2_dma_sg_plane_desc(vb, 0);
1024 struct mcam_dma_desc *desc = mvb->dma_desc;
1025 struct scatterlist *sg;
1026 int i;
1027
1028 mvb->dma_desc_nent = dma_map_sg(cam->dev, sgd->sglist, sgd->num_pages,
1029 DMA_FROM_DEVICE);
1030 if (mvb->dma_desc_nent <= 0)
1031 return -EIO; /* Not sure what's right here */
1032 for_each_sg(sgd->sglist, sg, mvb->dma_desc_nent, i) {
1033 desc->dma_addr = sg_dma_address(sg);
1034 desc->segment_len = sg_dma_len(sg);
1035 desc++;
1036 }
1037 return 0;
1038}
1039
1040static int mcam_vb_sg_buf_finish(struct vb2_buffer *vb)
1041{
1042 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
1043 struct vb2_dma_sg_desc *sgd = vb2_dma_sg_plane_desc(vb, 0);
1044
1045 dma_unmap_sg(cam->dev, sgd->sglist, sgd->num_pages, DMA_FROM_DEVICE);
1046 return 0;
1047}
1048
1049static void mcam_vb_sg_buf_cleanup(struct vb2_buffer *vb)
1050{
1051 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
1052 struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
1053 int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1;
1054
1055 dma_free_coherent(cam->dev, ndesc * sizeof(struct mcam_dma_desc),
1056 mvb->dma_desc, mvb->dma_desc_pa);
1057}
1058
1059
1060static const struct vb2_ops mcam_vb2_sg_ops = {
1061 .queue_setup = mcam_vb_queue_setup,
1062 .buf_init = mcam_vb_sg_buf_init,
1063 .buf_prepare = mcam_vb_sg_buf_prepare,
1064 .buf_queue = mcam_vb_buf_queue,
1065 .buf_finish = mcam_vb_sg_buf_finish,
1066 .buf_cleanup = mcam_vb_sg_buf_cleanup,
1067 .start_streaming = mcam_vb_start_streaming,
1068 .stop_streaming = mcam_vb_stop_streaming,
1069 .wait_prepare = mcam_vb_wait_prepare,
1070 .wait_finish = mcam_vb_wait_finish,
1071};
1072
1073#endif /* MCAM_MODE_DMA_SG */
1074
1075static int mcam_setup_vb2(struct mcam_camera *cam)
1076{
1077 struct vb2_queue *vq = &cam->vb_queue;
1078
1079 memset(vq, 0, sizeof(*vq));
1080 vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1081 vq->drv_priv = cam;
1082 INIT_LIST_HEAD(&cam->buffers);
1083 switch (cam->buffer_mode) {
1084 case B_DMA_contig:
1085#ifdef MCAM_MODE_DMA_CONTIG
1086 vq->ops = &mcam_vb2_ops;
1087 vq->mem_ops = &vb2_dma_contig_memops;
1088 cam->vb_alloc_ctx = vb2_dma_contig_init_ctx(cam->dev);
1089 vq->io_modes = VB2_MMAP | VB2_USERPTR;
1090 cam->dma_setup = mcam_ctlr_dma_contig;
1091 cam->frame_complete = mcam_dma_contig_done;
1092#endif
1093 break;
1094 case B_DMA_sg:
1095#ifdef MCAM_MODE_DMA_SG
1096 vq->ops = &mcam_vb2_sg_ops;
1097 vq->mem_ops = &vb2_dma_sg_memops;
1098 vq->io_modes = VB2_MMAP | VB2_USERPTR;
1099 cam->dma_setup = mcam_ctlr_dma_sg;
1100 cam->frame_complete = mcam_dma_sg_done;
1101#endif
1102 break;
1103 case B_vmalloc:
1104#ifdef MCAM_MODE_VMALLOC
1105 tasklet_init(&cam->s_tasklet, mcam_frame_tasklet,
1106 (unsigned long) cam);
1107 vq->ops = &mcam_vb2_ops;
1108 vq->mem_ops = &vb2_vmalloc_memops;
1109 vq->buf_struct_size = sizeof(struct mcam_vb_buffer);
1110 vq->io_modes = VB2_MMAP;
1111 cam->dma_setup = mcam_ctlr_dma_vmalloc;
1112 cam->frame_complete = mcam_vmalloc_done;
1113#endif
1114 break;
1115 }
1116 return vb2_queue_init(vq);
1117}
1118
1119static void mcam_cleanup_vb2(struct mcam_camera *cam)
1120{
1121 vb2_queue_release(&cam->vb_queue);
1122#ifdef MCAM_MODE_DMA_CONTIG
1123 if (cam->buffer_mode == B_DMA_contig)
1124 vb2_dma_contig_cleanup_ctx(cam->vb_alloc_ctx);
1125#endif
1126}
1127
1128
1129/* ---------------------------------------------------------------------- */
1130/*
1131 * The long list of V4L2 ioctl() operations.
1132 */
1133
1134static int mcam_vidioc_streamon(struct file *filp, void *priv,
1135 enum v4l2_buf_type type)
1136{
1137 struct mcam_camera *cam = filp->private_data;
1138 int ret;
1139
1140 mutex_lock(&cam->s_mutex);
1141 ret = vb2_streamon(&cam->vb_queue, type);
1142 mutex_unlock(&cam->s_mutex);
1143 return ret;
1144}
1145
1146
1147static int mcam_vidioc_streamoff(struct file *filp, void *priv,
1148 enum v4l2_buf_type type)
1149{
1150 struct mcam_camera *cam = filp->private_data;
1151 int ret;
1152
1153 mutex_lock(&cam->s_mutex);
1154 ret = vb2_streamoff(&cam->vb_queue, type);
1155 mutex_unlock(&cam->s_mutex);
1156 return ret;
1157}
1158
1159
1160static int mcam_vidioc_reqbufs(struct file *filp, void *priv,
1161 struct v4l2_requestbuffers *req)
1162{
1163 struct mcam_camera *cam = filp->private_data;
1164 int ret;
1165
1166 mutex_lock(&cam->s_mutex);
1167 ret = vb2_reqbufs(&cam->vb_queue, req);
1168 mutex_unlock(&cam->s_mutex);
1169 return ret;
1170}
1171
1172
1173static int mcam_vidioc_querybuf(struct file *filp, void *priv,
1174 struct v4l2_buffer *buf)
1175{
1176 struct mcam_camera *cam = filp->private_data;
1177 int ret;
1178
1179 mutex_lock(&cam->s_mutex);
1180 ret = vb2_querybuf(&cam->vb_queue, buf);
1181 mutex_unlock(&cam->s_mutex);
1182 return ret;
1183}
1184
1185static int mcam_vidioc_qbuf(struct file *filp, void *priv,
1186 struct v4l2_buffer *buf)
1187{
1188 struct mcam_camera *cam = filp->private_data;
1189 int ret;
1190
1191 mutex_lock(&cam->s_mutex);
1192 ret = vb2_qbuf(&cam->vb_queue, buf);
1193 mutex_unlock(&cam->s_mutex);
1194 return ret;
1195}
1196
1197static int mcam_vidioc_dqbuf(struct file *filp, void *priv,
1198 struct v4l2_buffer *buf)
1199{
1200 struct mcam_camera *cam = filp->private_data;
1201 int ret;
1202
1203 mutex_lock(&cam->s_mutex);
1204 ret = vb2_dqbuf(&cam->vb_queue, buf, filp->f_flags & O_NONBLOCK);
1205 mutex_unlock(&cam->s_mutex);
1206 return ret;
1207}
1208
1209
1210
1211static int mcam_vidioc_queryctrl(struct file *filp, void *priv,
1212 struct v4l2_queryctrl *qc)
1213{
1214 struct mcam_camera *cam = priv;
1215 int ret;
1216
1217 mutex_lock(&cam->s_mutex);
1218 ret = sensor_call(cam, core, queryctrl, qc);
1219 mutex_unlock(&cam->s_mutex);
1220 return ret;
1221}
1222
1223
1224static int mcam_vidioc_g_ctrl(struct file *filp, void *priv,
1225 struct v4l2_control *ctrl)
1226{
1227 struct mcam_camera *cam = priv;
1228 int ret;
1229
1230 mutex_lock(&cam->s_mutex);
1231 ret = sensor_call(cam, core, g_ctrl, ctrl);
1232 mutex_unlock(&cam->s_mutex);
1233 return ret;
1234}
1235
1236
1237static int mcam_vidioc_s_ctrl(struct file *filp, void *priv,
1238 struct v4l2_control *ctrl)
1239{
1240 struct mcam_camera *cam = priv;
1241 int ret;
1242
1243 mutex_lock(&cam->s_mutex);
1244 ret = sensor_call(cam, core, s_ctrl, ctrl);
1245 mutex_unlock(&cam->s_mutex);
1246 return ret;
1247}
1248
1249
1250static int mcam_vidioc_querycap(struct file *file, void *priv,
1251 struct v4l2_capability *cap)
1252{
1253 strcpy(cap->driver, "marvell_ccic");
1254 strcpy(cap->card, "marvell_ccic");
1255 cap->version = 1;
1256 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
1257 V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
1258 return 0;
1259}
1260
1261
1262static int mcam_vidioc_enum_fmt_vid_cap(struct file *filp,
1263 void *priv, struct v4l2_fmtdesc *fmt)
1264{
1265 if (fmt->index >= N_MCAM_FMTS)
1266 return -EINVAL;
1267 strlcpy(fmt->description, mcam_formats[fmt->index].desc,
1268 sizeof(fmt->description));
1269 fmt->pixelformat = mcam_formats[fmt->index].pixelformat;
1270 return 0;
1271}
1272
1273static int mcam_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
1274 struct v4l2_format *fmt)
1275{
1276 struct mcam_camera *cam = priv;
1277 struct mcam_format_struct *f;
1278 struct v4l2_pix_format *pix = &fmt->fmt.pix;
1279 struct v4l2_mbus_framefmt mbus_fmt;
1280 int ret;
1281
1282 f = mcam_find_format(pix->pixelformat);
1283 pix->pixelformat = f->pixelformat;
1284 v4l2_fill_mbus_format(&mbus_fmt, pix, f->mbus_code);
1285 mutex_lock(&cam->s_mutex);
1286 ret = sensor_call(cam, video, try_mbus_fmt, &mbus_fmt);
1287 mutex_unlock(&cam->s_mutex);
1288 v4l2_fill_pix_format(pix, &mbus_fmt);
1289 pix->bytesperline = pix->width * f->bpp;
1290 pix->sizeimage = pix->height * pix->bytesperline;
1291 return ret;
1292}
1293
1294static int mcam_vidioc_s_fmt_vid_cap(struct file *filp, void *priv,
1295 struct v4l2_format *fmt)
1296{
1297 struct mcam_camera *cam = priv;
1298 struct mcam_format_struct *f;
1299 int ret;
1300
1301 /*
1302 * Can't do anything if the device is not idle
1303 * Also can't if there are streaming buffers in place.
1304 */
1305 if (cam->state != S_IDLE || cam->vb_queue.num_buffers > 0)
1306 return -EBUSY;
1307
1308 f = mcam_find_format(fmt->fmt.pix.pixelformat);
1309
1310 /*
1311 * See if the formatting works in principle.
1312 */
1313 ret = mcam_vidioc_try_fmt_vid_cap(filp, priv, fmt);
1314 if (ret)
1315 return ret;
1316 /*
1317 * Now we start to change things for real, so let's do it
1318 * under lock.
1319 */
1320 mutex_lock(&cam->s_mutex);
1321 cam->pix_format = fmt->fmt.pix;
1322 cam->mbus_code = f->mbus_code;
1323
1324 /*
1325 * Make sure we have appropriate DMA buffers.
1326 */
1327 if (cam->buffer_mode == B_vmalloc) {
1328 ret = mcam_check_dma_buffers(cam);
1329 if (ret)
1330 goto out;
1331 }
1332 mcam_set_config_needed(cam, 1);
1333 ret = 0;
1334out:
1335 mutex_unlock(&cam->s_mutex);
1336 return ret;
1337}
1338
1339/*
1340 * Return our stored notion of how the camera is/should be configured.
1341 * The V4l2 spec wants us to be smarter, and actually get this from
1342 * the camera (and not mess with it at open time). Someday.
1343 */
1344static int mcam_vidioc_g_fmt_vid_cap(struct file *filp, void *priv,
1345 struct v4l2_format *f)
1346{
1347 struct mcam_camera *cam = priv;
1348
1349 f->fmt.pix = cam->pix_format;
1350 return 0;
1351}
1352
1353/*
1354 * We only have one input - the sensor - so minimize the nonsense here.
1355 */
1356static int mcam_vidioc_enum_input(struct file *filp, void *priv,
1357 struct v4l2_input *input)
1358{
1359 if (input->index != 0)
1360 return -EINVAL;
1361
1362 input->type = V4L2_INPUT_TYPE_CAMERA;
1363 input->std = V4L2_STD_ALL; /* Not sure what should go here */
1364 strcpy(input->name, "Camera");
1365 return 0;
1366}
1367
1368static int mcam_vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
1369{
1370 *i = 0;
1371 return 0;
1372}
1373
1374static int mcam_vidioc_s_input(struct file *filp, void *priv, unsigned int i)
1375{
1376 if (i != 0)
1377 return -EINVAL;
1378 return 0;
1379}
1380
1381/* from vivi.c */
1382static int mcam_vidioc_s_std(struct file *filp, void *priv, v4l2_std_id *a)
1383{
1384 return 0;
1385}
1386
1387/*
1388 * G/S_PARM. Most of this is done by the sensor, but we are
1389 * the level which controls the number of read buffers.
1390 */
1391static int mcam_vidioc_g_parm(struct file *filp, void *priv,
1392 struct v4l2_streamparm *parms)
1393{
1394 struct mcam_camera *cam = priv;
1395 int ret;
1396
1397 mutex_lock(&cam->s_mutex);
1398 ret = sensor_call(cam, video, g_parm, parms);
1399 mutex_unlock(&cam->s_mutex);
1400 parms->parm.capture.readbuffers = n_dma_bufs;
1401 return ret;
1402}
1403
1404static int mcam_vidioc_s_parm(struct file *filp, void *priv,
1405 struct v4l2_streamparm *parms)
1406{
1407 struct mcam_camera *cam = priv;
1408 int ret;
1409
1410 mutex_lock(&cam->s_mutex);
1411 ret = sensor_call(cam, video, s_parm, parms);
1412 mutex_unlock(&cam->s_mutex);
1413 parms->parm.capture.readbuffers = n_dma_bufs;
1414 return ret;
1415}
1416
1417static int mcam_vidioc_g_chip_ident(struct file *file, void *priv,
1418 struct v4l2_dbg_chip_ident *chip)
1419{
1420 struct mcam_camera *cam = priv;
1421
1422 chip->ident = V4L2_IDENT_NONE;
1423 chip->revision = 0;
1424 if (v4l2_chip_match_host(&chip->match)) {
1425 chip->ident = cam->chip_id;
1426 return 0;
1427 }
1428 return sensor_call(cam, core, g_chip_ident, chip);
1429}
1430
1431static int mcam_vidioc_enum_framesizes(struct file *filp, void *priv,
1432 struct v4l2_frmsizeenum *sizes)
1433{
1434 struct mcam_camera *cam = priv;
1435 int ret;
1436
1437 mutex_lock(&cam->s_mutex);
1438 ret = sensor_call(cam, video, enum_framesizes, sizes);
1439 mutex_unlock(&cam->s_mutex);
1440 return ret;
1441}
1442
1443static int mcam_vidioc_enum_frameintervals(struct file *filp, void *priv,
1444 struct v4l2_frmivalenum *interval)
1445{
1446 struct mcam_camera *cam = priv;
1447 int ret;
1448
1449 mutex_lock(&cam->s_mutex);
1450 ret = sensor_call(cam, video, enum_frameintervals, interval);
1451 mutex_unlock(&cam->s_mutex);
1452 return ret;
1453}
1454
1455#ifdef CONFIG_VIDEO_ADV_DEBUG
1456static int mcam_vidioc_g_register(struct file *file, void *priv,
1457 struct v4l2_dbg_register *reg)
1458{
1459 struct mcam_camera *cam = priv;
1460
1461 if (v4l2_chip_match_host(&reg->match)) {
1462 reg->val = mcam_reg_read(cam, reg->reg);
1463 reg->size = 4;
1464 return 0;
1465 }
1466 return sensor_call(cam, core, g_register, reg);
1467}
1468
1469static int mcam_vidioc_s_register(struct file *file, void *priv,
1470 struct v4l2_dbg_register *reg)
1471{
1472 struct mcam_camera *cam = priv;
1473
1474 if (v4l2_chip_match_host(&reg->match)) {
1475 mcam_reg_write(cam, reg->reg, reg->val);
1476 return 0;
1477 }
1478 return sensor_call(cam, core, s_register, reg);
1479}
1480#endif
1481
1482static const struct v4l2_ioctl_ops mcam_v4l_ioctl_ops = {
1483 .vidioc_querycap = mcam_vidioc_querycap,
1484 .vidioc_enum_fmt_vid_cap = mcam_vidioc_enum_fmt_vid_cap,
1485 .vidioc_try_fmt_vid_cap = mcam_vidioc_try_fmt_vid_cap,
1486 .vidioc_s_fmt_vid_cap = mcam_vidioc_s_fmt_vid_cap,
1487 .vidioc_g_fmt_vid_cap = mcam_vidioc_g_fmt_vid_cap,
1488 .vidioc_enum_input = mcam_vidioc_enum_input,
1489 .vidioc_g_input = mcam_vidioc_g_input,
1490 .vidioc_s_input = mcam_vidioc_s_input,
1491 .vidioc_s_std = mcam_vidioc_s_std,
1492 .vidioc_reqbufs = mcam_vidioc_reqbufs,
1493 .vidioc_querybuf = mcam_vidioc_querybuf,
1494 .vidioc_qbuf = mcam_vidioc_qbuf,
1495 .vidioc_dqbuf = mcam_vidioc_dqbuf,
1496 .vidioc_streamon = mcam_vidioc_streamon,
1497 .vidioc_streamoff = mcam_vidioc_streamoff,
1498 .vidioc_queryctrl = mcam_vidioc_queryctrl,
1499 .vidioc_g_ctrl = mcam_vidioc_g_ctrl,
1500 .vidioc_s_ctrl = mcam_vidioc_s_ctrl,
1501 .vidioc_g_parm = mcam_vidioc_g_parm,
1502 .vidioc_s_parm = mcam_vidioc_s_parm,
1503 .vidioc_enum_framesizes = mcam_vidioc_enum_framesizes,
1504 .vidioc_enum_frameintervals = mcam_vidioc_enum_frameintervals,
1505 .vidioc_g_chip_ident = mcam_vidioc_g_chip_ident,
1506#ifdef CONFIG_VIDEO_ADV_DEBUG
1507 .vidioc_g_register = mcam_vidioc_g_register,
1508 .vidioc_s_register = mcam_vidioc_s_register,
1509#endif
1510};
1511
1512/* ---------------------------------------------------------------------- */
1513/*
1514 * Our various file operations.
1515 */
1516static int mcam_v4l_open(struct file *filp)
1517{
1518 struct mcam_camera *cam = video_drvdata(filp);
1519 int ret = 0;
1520
1521 filp->private_data = cam;
1522
1523 frames = singles = delivered = 0;
1524 mutex_lock(&cam->s_mutex);
1525 if (cam->users == 0) {
1526 ret = mcam_setup_vb2(cam);
1527 if (ret)
1528 goto out;
1529 mcam_ctlr_power_up(cam);
1530 __mcam_cam_reset(cam);
1531 mcam_set_config_needed(cam, 1);
1532 }
1533 (cam->users)++;
1534out:
1535 mutex_unlock(&cam->s_mutex);
1536 return ret;
1537}
1538
1539
1540static int mcam_v4l_release(struct file *filp)
1541{
1542 struct mcam_camera *cam = filp->private_data;
1543
1544 cam_err(cam, "Release, %d frames, %d singles, %d delivered\n", frames,
1545 singles, delivered);
1546 mutex_lock(&cam->s_mutex);
1547 (cam->users)--;
1548 if (filp == cam->owner) {
1549 mcam_ctlr_stop_dma(cam);
1550 cam->owner = NULL;
1551 }
1552 if (cam->users == 0) {
1553 mcam_cleanup_vb2(cam);
1554 mcam_ctlr_power_down(cam);
1555 if (cam->buffer_mode == B_vmalloc && alloc_bufs_at_read)
1556 mcam_free_dma_bufs(cam);
1557 }
1558 mutex_unlock(&cam->s_mutex);
1559 return 0;
1560}
1561
1562static ssize_t mcam_v4l_read(struct file *filp,
1563 char __user *buffer, size_t len, loff_t *pos)
1564{
1565 struct mcam_camera *cam = filp->private_data;
1566 int ret;
1567
1568 mutex_lock(&cam->s_mutex);
1569 ret = vb2_read(&cam->vb_queue, buffer, len, pos,
1570 filp->f_flags & O_NONBLOCK);
1571 mutex_unlock(&cam->s_mutex);
1572 return ret;
1573}
1574
1575
1576
1577static unsigned int mcam_v4l_poll(struct file *filp,
1578 struct poll_table_struct *pt)
1579{
1580 struct mcam_camera *cam = filp->private_data;
1581 int ret;
1582
1583 mutex_lock(&cam->s_mutex);
1584 ret = vb2_poll(&cam->vb_queue, filp, pt);
1585 mutex_unlock(&cam->s_mutex);
1586 return ret;
1587}
1588
1589
1590static int mcam_v4l_mmap(struct file *filp, struct vm_area_struct *vma)
1591{
1592 struct mcam_camera *cam = filp->private_data;
1593 int ret;
1594
1595 mutex_lock(&cam->s_mutex);
1596 ret = vb2_mmap(&cam->vb_queue, vma);
1597 mutex_unlock(&cam->s_mutex);
1598 return ret;
1599}
1600
1601
1602
1603static const struct v4l2_file_operations mcam_v4l_fops = {
1604 .owner = THIS_MODULE,
1605 .open = mcam_v4l_open,
1606 .release = mcam_v4l_release,
1607 .read = mcam_v4l_read,
1608 .poll = mcam_v4l_poll,
1609 .mmap = mcam_v4l_mmap,
1610 .unlocked_ioctl = video_ioctl2,
1611};
1612
1613
1614/*
1615 * This template device holds all of those v4l2 methods; we
1616 * clone it for specific real devices.
1617 */
1618static struct video_device mcam_v4l_template = {
1619 .name = "mcam",
1620 .tvnorms = V4L2_STD_NTSC_M,
1621 .current_norm = V4L2_STD_NTSC_M, /* make mplayer happy */
1622
1623 .fops = &mcam_v4l_fops,
1624 .ioctl_ops = &mcam_v4l_ioctl_ops,
1625 .release = video_device_release_empty,
1626};
1627
1628/* ---------------------------------------------------------------------- */
1629/*
1630 * Interrupt handler stuff
1631 */
1632static void mcam_frame_complete(struct mcam_camera *cam, int frame)
1633{
1634 /*
1635 * Basic frame housekeeping.
1636 */
1637 set_bit(frame, &cam->flags);
1638 clear_bit(CF_DMA_ACTIVE, &cam->flags);
1639 cam->next_buf = frame;
1640 cam->buf_seq[frame] = ++(cam->sequence);
1641 frames++;
1642 /*
1643 * "This should never happen"
1644 */
1645 if (cam->state != S_STREAMING)
1646 return;
1647 /*
1648 * Process the frame and set up the next one.
1649 */
1650 cam->frame_complete(cam, frame);
1651}
1652
1653
1654/*
1655 * The interrupt handler; this needs to be called from the
1656 * platform irq handler with the lock held.
1657 */
1658int mccic_irq(struct mcam_camera *cam, unsigned int irqs)
1659{
1660 unsigned int frame, handled = 0;
1661
1662 mcam_reg_write(cam, REG_IRQSTAT, FRAMEIRQS); /* Clear'em all */
1663 /*
1664 * Handle any frame completions. There really should
1665 * not be more than one of these, or we have fallen
1666 * far behind.
1667 *
1668 * When running in S/G mode, the frame number lacks any
1669 * real meaning - there's only one descriptor array - but
1670 * the controller still picks a different one to signal
1671 * each time.
1672 */
1673 for (frame = 0; frame < cam->nbufs; frame++)
1674 if (irqs & (IRQ_EOF0 << frame)) {
1675 mcam_frame_complete(cam, frame);
1676 handled = 1;
1677 }
1678 /*
1679 * If a frame starts, note that we have DMA active. This
1680 * code assumes that we won't get multiple frame interrupts
1681 * at once; may want to rethink that.
1682 */
1683 if (irqs & (IRQ_SOF0 | IRQ_SOF1 | IRQ_SOF2)) {
1684 set_bit(CF_DMA_ACTIVE, &cam->flags);
1685 handled = 1;
1686 if (cam->buffer_mode == B_DMA_sg)
1687 mcam_ctlr_stop(cam);
1688 }
1689 return handled;
1690}
1691
1692/* ---------------------------------------------------------------------- */
1693/*
1694 * Registration and such.
1695 */
1696static struct ov7670_config sensor_cfg = {
1697 /*
1698 * Exclude QCIF mode, because it only captures a tiny portion
1699 * of the sensor FOV
1700 */
1701 .min_width = 320,
1702 .min_height = 240,
1703};
1704
1705
1706int mccic_register(struct mcam_camera *cam)
1707{
1708 struct i2c_board_info ov7670_info = {
1709 .type = "ov7670",
1710 .addr = 0x42 >> 1,
1711 .platform_data = &sensor_cfg,
1712 };
1713 int ret;
1714
1715 /*
1716 * Validate the requested buffer mode.
1717 */
1718 if (buffer_mode >= 0)
1719 cam->buffer_mode = buffer_mode;
1720 if (cam->buffer_mode == B_DMA_sg &&
1721 cam->chip_id == V4L2_IDENT_CAFE) {
1722 printk(KERN_ERR "marvell-cam: Cafe can't do S/G I/O, "
1723 "attempting vmalloc mode instead\n");
1724 cam->buffer_mode = B_vmalloc;
1725 }
1726 if (!mcam_buffer_mode_supported(cam->buffer_mode)) {
1727 printk(KERN_ERR "marvell-cam: buffer mode %d unsupported\n",
1728 cam->buffer_mode);
1729 return -EINVAL;
1730 }
1731 /*
1732 * Register with V4L
1733 */
1734 ret = v4l2_device_register(cam->dev, &cam->v4l2_dev);
1735 if (ret)
1736 return ret;
1737
1738 mutex_init(&cam->s_mutex);
1739 cam->state = S_NOTREADY;
1740 mcam_set_config_needed(cam, 1);
1741 cam->pix_format = mcam_def_pix_format;
1742 cam->mbus_code = mcam_def_mbus_code;
1743 INIT_LIST_HEAD(&cam->buffers);
1744 mcam_ctlr_init(cam);
1745
1746 /*
1747 * Try to find the sensor.
1748 */
1749 sensor_cfg.clock_speed = cam->clock_speed;
1750 sensor_cfg.use_smbus = cam->use_smbus;
1751 cam->sensor_addr = ov7670_info.addr;
1752 cam->sensor = v4l2_i2c_new_subdev_board(&cam->v4l2_dev,
1753 cam->i2c_adapter, &ov7670_info, NULL);
1754 if (cam->sensor == NULL) {
1755 ret = -ENODEV;
1756 goto out_unregister;
1757 }
1758
1759 ret = mcam_cam_init(cam);
1760 if (ret)
1761 goto out_unregister;
1762 /*
1763 * Get the v4l2 setup done.
1764 */
1765 mutex_lock(&cam->s_mutex);
1766 cam->vdev = mcam_v4l_template;
1767 cam->vdev.debug = 0;
1768 cam->vdev.v4l2_dev = &cam->v4l2_dev;
1769 ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
1770 if (ret)
1771 goto out;
1772 video_set_drvdata(&cam->vdev, cam);
1773
1774 /*
1775 * If so requested, try to get our DMA buffers now.
1776 */
1777 if (cam->buffer_mode == B_vmalloc && !alloc_bufs_at_read) {
1778 if (mcam_alloc_dma_bufs(cam, 1))
1779 cam_warn(cam, "Unable to alloc DMA buffers at load"
1780 " will try again later.");
1781 }
1782
1783out:
1784 mutex_unlock(&cam->s_mutex);
1785 return ret;
1786out_unregister:
1787 v4l2_device_unregister(&cam->v4l2_dev);
1788 return ret;
1789}
1790
1791
1792void mccic_shutdown(struct mcam_camera *cam)
1793{
1794 /*
1795 * If we have no users (and we really, really should have no
1796 * users) the device will already be powered down. Trying to
1797 * take it down again will wedge the machine, which is frowned
1798 * upon.
1799 */
1800 if (cam->users > 0) {
1801 cam_warn(cam, "Removing a device with users!\n");
1802 mcam_ctlr_power_down(cam);
1803 }
1804 vb2_queue_release(&cam->vb_queue);
1805 if (cam->buffer_mode == B_vmalloc)
1806 mcam_free_dma_bufs(cam);
1807 video_unregister_device(&cam->vdev);
1808 v4l2_device_unregister(&cam->v4l2_dev);
1809}
1810
1811/*
1812 * Power management
1813 */
1814#ifdef CONFIG_PM
1815
1816void mccic_suspend(struct mcam_camera *cam)
1817{
1818 enum mcam_state cstate = cam->state;
1819
1820 mcam_ctlr_stop_dma(cam);
1821 mcam_ctlr_power_down(cam);
1822 cam->state = cstate;
1823}
1824
1825int mccic_resume(struct mcam_camera *cam)
1826{
1827 int ret = 0;
1828
1829 mutex_lock(&cam->s_mutex);
1830 if (cam->users > 0) {
1831 mcam_ctlr_power_up(cam);
1832 __mcam_cam_reset(cam);
1833 } else {
1834 mcam_ctlr_power_down(cam);
1835 }
1836 mutex_unlock(&cam->s_mutex);
1837
1838 set_bit(CF_CONFIG_NEEDED, &cam->flags);
1839 if (cam->state == S_STREAMING)
1840 ret = mcam_read_setup(cam);
1841 return ret;
1842}
1843#endif /* CONFIG_PM */
diff --git a/drivers/media/video/marvell-ccic/mcam-core.h b/drivers/media/video/marvell-ccic/mcam-core.h
new file mode 100644
index 000000000000..917200e63255
--- /dev/null
+++ b/drivers/media/video/marvell-ccic/mcam-core.h
@@ -0,0 +1,323 @@
1/*
2 * Marvell camera core structures.
3 *
4 * Copyright 2011 Jonathan Corbet corbet@lwn.net
5 */
6#ifndef _MCAM_CORE_H
7#define _MCAM_CORE_H
8
9#include <linux/list.h>
10#include <media/v4l2-common.h>
11#include <media/v4l2-dev.h>
12#include <media/videobuf2-core.h>
13
14/*
15 * Create our own symbols for the supported buffer modes, but, for now,
16 * base them entirely on which videobuf2 options have been selected.
17 */
18#if defined(CONFIG_VIDEOBUF2_VMALLOC) || defined(CONFIG_VIDEOBUF2_VMALLOC_MODULE)
19#define MCAM_MODE_VMALLOC 1
20#endif
21
22#if defined(CONFIG_VIDEOBUF2_DMA_CONTIG) || defined(CONFIG_VIDEOBUF2_DMA_CONTIG_MODULE)
23#define MCAM_MODE_DMA_CONTIG 1
24#endif
25
26#if defined(CONFIG_VIDEOBUF2_DMA_SG) || defined(CONFIG_VIDEOBUF2_DMA_SG_MODULE)
27#define MCAM_MODE_DMA_SG 1
28#endif
29
30#if !defined(MCAM_MODE_VMALLOC) && !defined(MCAM_MODE_DMA_CONTIG) && \
31 !defined(MCAM_MODE_DMA_SG)
32#error One of the videobuf buffer modes must be selected in the config
33#endif
34
35
36enum mcam_state {
37 S_NOTREADY, /* Not yet initialized */
38 S_IDLE, /* Just hanging around */
39 S_FLAKED, /* Some sort of problem */
40 S_STREAMING, /* Streaming data */
41 S_BUFWAIT /* streaming requested but no buffers yet */
42};
43#define MAX_DMA_BUFS 3
44
45/*
46 * Different platforms work best with different buffer modes, so we
47 * let the platform pick.
48 */
49enum mcam_buffer_mode {
50 B_vmalloc = 0,
51 B_DMA_contig = 1,
52 B_DMA_sg = 2
53};
54
55/*
56 * Is a given buffer mode supported by the current kernel configuration?
57 */
58static inline int mcam_buffer_mode_supported(enum mcam_buffer_mode mode)
59{
60 switch (mode) {
61#ifdef MCAM_MODE_VMALLOC
62 case B_vmalloc:
63#endif
64#ifdef MCAM_MODE_DMA_CONTIG
65 case B_DMA_contig:
66#endif
67#ifdef MCAM_MODE_DMA_SG
68 case B_DMA_sg:
69#endif
70 return 1;
71 default:
72 return 0;
73 }
74}
75
76
77/*
78 * A description of one of our devices.
79 * Locking: controlled by s_mutex. Certain fields, however, require
80 * the dev_lock spinlock; they are marked as such by comments.
81 * dev_lock is also required for access to device registers.
82 */
83struct mcam_camera {
84 /*
85 * These fields should be set by the platform code prior to
86 * calling mcam_register().
87 */
88 struct i2c_adapter *i2c_adapter;
89 unsigned char __iomem *regs;
90 spinlock_t dev_lock;
91 struct device *dev; /* For messages, dma alloc */
92 unsigned int chip_id;
93 short int clock_speed; /* Sensor clock speed, default 30 */
94 short int use_smbus; /* SMBUS or straight I2c? */
95 enum mcam_buffer_mode buffer_mode;
96 /*
97 * Callbacks from the core to the platform code.
98 */
99 void (*plat_power_up) (struct mcam_camera *cam);
100 void (*plat_power_down) (struct mcam_camera *cam);
101
102 /*
103 * Everything below here is private to the mcam core and
104 * should not be touched by the platform code.
105 */
106 struct v4l2_device v4l2_dev;
107 enum mcam_state state;
108 unsigned long flags; /* Buffer status, mainly (dev_lock) */
109 int users; /* How many open FDs */
110 struct file *owner; /* Who has data access (v4l2) */
111
112 /*
113 * Subsystem structures.
114 */
115 struct video_device vdev;
116 struct v4l2_subdev *sensor;
117 unsigned short sensor_addr;
118
119 /* Videobuf2 stuff */
120 struct vb2_queue vb_queue;
121 struct list_head buffers; /* Available frames */
122
123 unsigned int nbufs; /* How many are alloc'd */
124 int next_buf; /* Next to consume (dev_lock) */
125
126 /* DMA buffers - vmalloc mode */
127#ifdef MCAM_MODE_VMALLOC
128 unsigned int dma_buf_size; /* allocated size */
129 void *dma_bufs[MAX_DMA_BUFS]; /* Internal buffer addresses */
130 dma_addr_t dma_handles[MAX_DMA_BUFS]; /* Buffer bus addresses */
131 struct tasklet_struct s_tasklet;
132#endif
133 unsigned int sequence; /* Frame sequence number */
134 unsigned int buf_seq[MAX_DMA_BUFS]; /* Sequence for individual bufs */
135
136 /* DMA buffers - DMA modes */
137 struct mcam_vb_buffer *vb_bufs[MAX_DMA_BUFS];
138 struct vb2_alloc_ctx *vb_alloc_ctx;
139
140 /* Mode-specific ops, set at open time */
141 void (*dma_setup)(struct mcam_camera *cam);
142 void (*frame_complete)(struct mcam_camera *cam, int frame);
143
144 /* Current operating parameters */
145 u32 sensor_type; /* Currently ov7670 only */
146 struct v4l2_pix_format pix_format;
147 enum v4l2_mbus_pixelcode mbus_code;
148
149 /* Locks */
150 struct mutex s_mutex; /* Access to this structure */
151};
152
153
154/*
155 * Register I/O functions. These are here because the platform code
156 * may legitimately need to mess with the register space.
157 */
158/*
159 * Device register I/O
160 */
161static inline void mcam_reg_write(struct mcam_camera *cam, unsigned int reg,
162 unsigned int val)
163{
164 iowrite32(val, cam->regs + reg);
165}
166
167static inline unsigned int mcam_reg_read(struct mcam_camera *cam,
168 unsigned int reg)
169{
170 return ioread32(cam->regs + reg);
171}
172
173
174static inline void mcam_reg_write_mask(struct mcam_camera *cam, unsigned int reg,
175 unsigned int val, unsigned int mask)
176{
177 unsigned int v = mcam_reg_read(cam, reg);
178
179 v = (v & ~mask) | (val & mask);
180 mcam_reg_write(cam, reg, v);
181}
182
183static inline void mcam_reg_clear_bit(struct mcam_camera *cam,
184 unsigned int reg, unsigned int val)
185{
186 mcam_reg_write_mask(cam, reg, 0, val);
187}
188
189static inline void mcam_reg_set_bit(struct mcam_camera *cam,
190 unsigned int reg, unsigned int val)
191{
192 mcam_reg_write_mask(cam, reg, val, val);
193}
194
195/*
196 * Functions for use by platform code.
197 */
198int mccic_register(struct mcam_camera *cam);
199int mccic_irq(struct mcam_camera *cam, unsigned int irqs);
200void mccic_shutdown(struct mcam_camera *cam);
201#ifdef CONFIG_PM
202void mccic_suspend(struct mcam_camera *cam);
203int mccic_resume(struct mcam_camera *cam);
204#endif
205
206/*
207 * Register definitions for the m88alp01 camera interface. Offsets in bytes
208 * as given in the spec.
209 */
210#define REG_Y0BAR 0x00
211#define REG_Y1BAR 0x04
212#define REG_Y2BAR 0x08
213/* ... */
214
215#define REG_IMGPITCH 0x24 /* Image pitch register */
216#define IMGP_YP_SHFT 2 /* Y pitch params */
217#define IMGP_YP_MASK 0x00003ffc /* Y pitch field */
218#define IMGP_UVP_SHFT 18 /* UV pitch (planar) */
219#define IMGP_UVP_MASK 0x3ffc0000
220#define REG_IRQSTATRAW 0x28 /* RAW IRQ Status */
221#define IRQ_EOF0 0x00000001 /* End of frame 0 */
222#define IRQ_EOF1 0x00000002 /* End of frame 1 */
223#define IRQ_EOF2 0x00000004 /* End of frame 2 */
224#define IRQ_SOF0 0x00000008 /* Start of frame 0 */
225#define IRQ_SOF1 0x00000010 /* Start of frame 1 */
226#define IRQ_SOF2 0x00000020 /* Start of frame 2 */
227#define IRQ_OVERFLOW 0x00000040 /* FIFO overflow */
228#define IRQ_TWSIW 0x00010000 /* TWSI (smbus) write */
229#define IRQ_TWSIR 0x00020000 /* TWSI read */
230#define IRQ_TWSIE 0x00040000 /* TWSI error */
231#define TWSIIRQS (IRQ_TWSIW|IRQ_TWSIR|IRQ_TWSIE)
232#define FRAMEIRQS (IRQ_EOF0|IRQ_EOF1|IRQ_EOF2|IRQ_SOF0|IRQ_SOF1|IRQ_SOF2)
233#define ALLIRQS (TWSIIRQS|FRAMEIRQS|IRQ_OVERFLOW)
234#define REG_IRQMASK 0x2c /* IRQ mask - same bits as IRQSTAT */
235#define REG_IRQSTAT 0x30 /* IRQ status / clear */
236
237#define REG_IMGSIZE 0x34 /* Image size */
238#define IMGSZ_V_MASK 0x1fff0000
239#define IMGSZ_V_SHIFT 16
240#define IMGSZ_H_MASK 0x00003fff
241#define REG_IMGOFFSET 0x38 /* IMage offset */
242
243#define REG_CTRL0 0x3c /* Control 0 */
244#define C0_ENABLE 0x00000001 /* Makes the whole thing go */
245
246/* Mask for all the format bits */
247#define C0_DF_MASK 0x00fffffc /* Bits 2-23 */
248
249/* RGB ordering */
250#define C0_RGB4_RGBX 0x00000000
251#define C0_RGB4_XRGB 0x00000004
252#define C0_RGB4_BGRX 0x00000008
253#define C0_RGB4_XBGR 0x0000000c
254#define C0_RGB5_RGGB 0x00000000
255#define C0_RGB5_GRBG 0x00000004
256#define C0_RGB5_GBRG 0x00000008
257#define C0_RGB5_BGGR 0x0000000c
258
259/* Spec has two fields for DIN and DOUT, but they must match, so
260 combine them here. */
261#define C0_DF_YUV 0x00000000 /* Data is YUV */
262#define C0_DF_RGB 0x000000a0 /* ... RGB */
263#define C0_DF_BAYER 0x00000140 /* ... Bayer */
264/* 8-8-8 must be missing from the below - ask */
265#define C0_RGBF_565 0x00000000
266#define C0_RGBF_444 0x00000800
267#define C0_RGB_BGR 0x00001000 /* Blue comes first */
268#define C0_YUV_PLANAR 0x00000000 /* YUV 422 planar format */
269#define C0_YUV_PACKED 0x00008000 /* YUV 422 packed */
270#define C0_YUV_420PL 0x0000a000 /* YUV 420 planar */
271/* Think that 420 packed must be 111 - ask */
272#define C0_YUVE_YUYV 0x00000000 /* Y1CbY0Cr */
273#define C0_YUVE_YVYU 0x00010000 /* Y1CrY0Cb */
274#define C0_YUVE_VYUY 0x00020000 /* CrY1CbY0 */
275#define C0_YUVE_UYVY 0x00030000 /* CbY1CrY0 */
276#define C0_YUVE_XYUV 0x00000000 /* 420: .YUV */
277#define C0_YUVE_XYVU 0x00010000 /* 420: .YVU */
278#define C0_YUVE_XUVY 0x00020000 /* 420: .UVY */
279#define C0_YUVE_XVUY 0x00030000 /* 420: .VUY */
280/* Bayer bits 18,19 if needed */
281#define C0_HPOL_LOW 0x01000000 /* HSYNC polarity active low */
282#define C0_VPOL_LOW 0x02000000 /* VSYNC polarity active low */
283#define C0_VCLK_LOW 0x04000000 /* VCLK on falling edge */
284#define C0_DOWNSCALE 0x08000000 /* Enable downscaler */
285#define C0_SIFM_MASK 0xc0000000 /* SIF mode bits */
286#define C0_SIF_HVSYNC 0x00000000 /* Use H/VSYNC */
287#define CO_SOF_NOSYNC 0x40000000 /* Use inband active signaling */
288
289/* Bits below C1_444ALPHA are not present in Cafe */
290#define REG_CTRL1 0x40 /* Control 1 */
291#define C1_CLKGATE 0x00000001 /* Sensor clock gate */
292#define C1_DESC_ENA 0x00000100 /* DMA descriptor enable */
293#define C1_DESC_3WORD 0x00000200 /* Three-word descriptors used */
294#define C1_444ALPHA 0x00f00000 /* Alpha field in RGB444 */
295#define C1_ALPHA_SHFT 20
296#define C1_DMAB32 0x00000000 /* 32-byte DMA burst */
297#define C1_DMAB16 0x02000000 /* 16-byte DMA burst */
298#define C1_DMAB64 0x04000000 /* 64-byte DMA burst */
299#define C1_DMAB_MASK 0x06000000
300#define C1_TWOBUFS 0x08000000 /* Use only two DMA buffers */
301#define C1_PWRDWN 0x10000000 /* Power down */
302
303#define REG_CLKCTRL 0x88 /* Clock control */
304#define CLK_DIV_MASK 0x0000ffff /* Upper bits RW "reserved" */
305
306/* This appears to be a Cafe-only register */
307#define REG_UBAR 0xc4 /* Upper base address register */
308
309/* Armada 610 DMA descriptor registers */
310#define REG_DMA_DESC_Y 0x200
311#define REG_DMA_DESC_U 0x204
312#define REG_DMA_DESC_V 0x208
313#define REG_DESC_LEN_Y 0x20c /* Lengths are in bytes */
314#define REG_DESC_LEN_U 0x210
315#define REG_DESC_LEN_V 0x214
316
317/*
318 * Useful stuff that probably belongs somewhere global.
319 */
320#define VGA_WIDTH 640
321#define VGA_HEIGHT 480
322
323#endif /* _MCAM_CORE_H */
diff --git a/drivers/media/video/marvell-ccic/mmp-driver.c b/drivers/media/video/marvell-ccic/mmp-driver.c
new file mode 100644
index 000000000000..d6b764541375
--- /dev/null
+++ b/drivers/media/video/marvell-ccic/mmp-driver.c
@@ -0,0 +1,340 @@
1/*
2 * Support for the camera device found on Marvell MMP processors; known
3 * to work with the Armada 610 as used in the OLPC 1.75 system.
4 *
5 * Copyright 2011 Jonathan Corbet <corbet@lwn.net>
6 *
7 * This file may be distributed under the terms of the GNU General
8 * Public License, version 2.
9 */
10
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/i2c.h>
15#include <linux/i2c-gpio.h>
16#include <linux/interrupt.h>
17#include <linux/spinlock.h>
18#include <linux/slab.h>
19#include <linux/videodev2.h>
20#include <media/v4l2-device.h>
21#include <media/v4l2-chip-ident.h>
22#include <media/mmp-camera.h>
23#include <linux/device.h>
24#include <linux/platform_device.h>
25#include <linux/gpio.h>
26#include <linux/io.h>
27#include <linux/delay.h>
28#include <linux/list.h>
29
30#include "mcam-core.h"
31
32MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
33MODULE_LICENSE("GPL");
34
35struct mmp_camera {
36 void *power_regs;
37 struct platform_device *pdev;
38 struct mcam_camera mcam;
39 struct list_head devlist;
40 int irq;
41};
42
43static inline struct mmp_camera *mcam_to_cam(struct mcam_camera *mcam)
44{
45 return container_of(mcam, struct mmp_camera, mcam);
46}
47
48/*
49 * A silly little infrastructure so we can keep track of our devices.
50 * Chances are that we will never have more than one of them, but
51 * the Armada 610 *does* have two controllers...
52 */
53
54static LIST_HEAD(mmpcam_devices);
55static struct mutex mmpcam_devices_lock;
56
57static void mmpcam_add_device(struct mmp_camera *cam)
58{
59 mutex_lock(&mmpcam_devices_lock);
60 list_add(&cam->devlist, &mmpcam_devices);
61 mutex_unlock(&mmpcam_devices_lock);
62}
63
64static void mmpcam_remove_device(struct mmp_camera *cam)
65{
66 mutex_lock(&mmpcam_devices_lock);
67 list_del(&cam->devlist);
68 mutex_unlock(&mmpcam_devices_lock);
69}
70
71/*
72 * Platform dev remove passes us a platform_device, and there's
73 * no handy unused drvdata to stash a backpointer in. So just
74 * dig it out of our list.
75 */
76static struct mmp_camera *mmpcam_find_device(struct platform_device *pdev)
77{
78 struct mmp_camera *cam;
79
80 mutex_lock(&mmpcam_devices_lock);
81 list_for_each_entry(cam, &mmpcam_devices, devlist) {
82 if (cam->pdev == pdev) {
83 mutex_unlock(&mmpcam_devices_lock);
84 return cam;
85 }
86 }
87 mutex_unlock(&mmpcam_devices_lock);
88 return NULL;
89}
90
91
92
93
94/*
95 * Power-related registers; this almost certainly belongs
96 * somewhere else.
97 *
98 * ARMADA 610 register manual, sec 7.2.1, p1842.
99 */
100#define CPU_SUBSYS_PMU_BASE 0xd4282800
101#define REG_CCIC_DCGCR 0x28 /* CCIC dyn clock gate ctrl reg */
102#define REG_CCIC_CRCR 0x50 /* CCIC clk reset ctrl reg */
103
104/*
105 * Power control.
106 */
107static void mmpcam_power_up(struct mcam_camera *mcam)
108{
109 struct mmp_camera *cam = mcam_to_cam(mcam);
110 struct mmp_camera_platform_data *pdata;
111/*
112 * Turn on power and clocks to the controller.
113 */
114 iowrite32(0x3f, cam->power_regs + REG_CCIC_DCGCR);
115 iowrite32(0x3805b, cam->power_regs + REG_CCIC_CRCR);
116 mdelay(1);
117/*
118 * Provide power to the sensor.
119 */
120 mcam_reg_write(mcam, REG_CLKCTRL, 0x60000002);
121 pdata = cam->pdev->dev.platform_data;
122 gpio_set_value(pdata->sensor_power_gpio, 1);
123 mdelay(5);
124 mcam_reg_clear_bit(mcam, REG_CTRL1, 0x10000000);
125 gpio_set_value(pdata->sensor_reset_gpio, 0); /* reset is active low */
126 mdelay(5);
127 gpio_set_value(pdata->sensor_reset_gpio, 1); /* reset is active low */
128 mdelay(5);
129}
130
131static void mmpcam_power_down(struct mcam_camera *mcam)
132{
133 struct mmp_camera *cam = mcam_to_cam(mcam);
134 struct mmp_camera_platform_data *pdata;
135/*
136 * Turn off clocks and set reset lines
137 */
138 iowrite32(0, cam->power_regs + REG_CCIC_DCGCR);
139 iowrite32(0, cam->power_regs + REG_CCIC_CRCR);
140/*
141 * Shut down the sensor.
142 */
143 pdata = cam->pdev->dev.platform_data;
144 gpio_set_value(pdata->sensor_power_gpio, 0);
145 gpio_set_value(pdata->sensor_reset_gpio, 0);
146}
147
148
149static irqreturn_t mmpcam_irq(int irq, void *data)
150{
151 struct mcam_camera *mcam = data;
152 unsigned int irqs, handled;
153
154 spin_lock(&mcam->dev_lock);
155 irqs = mcam_reg_read(mcam, REG_IRQSTAT);
156 handled = mccic_irq(mcam, irqs);
157 spin_unlock(&mcam->dev_lock);
158 return IRQ_RETVAL(handled);
159}
160
161
162static int mmpcam_probe(struct platform_device *pdev)
163{
164 struct mmp_camera *cam;
165 struct mcam_camera *mcam;
166 struct resource *res;
167 struct mmp_camera_platform_data *pdata;
168 int ret;
169
170 cam = kzalloc(sizeof(*cam), GFP_KERNEL);
171 if (cam == NULL)
172 return -ENOMEM;
173 cam->pdev = pdev;
174 INIT_LIST_HEAD(&cam->devlist);
175
176 mcam = &cam->mcam;
177 mcam->platform = MHP_Armada610;
178 mcam->plat_power_up = mmpcam_power_up;
179 mcam->plat_power_down = mmpcam_power_down;
180 mcam->dev = &pdev->dev;
181 mcam->use_smbus = 0;
182 mcam->chip_id = V4L2_IDENT_ARMADA610;
183 mcam->buffer_mode = B_DMA_sg;
184 spin_lock_init(&mcam->dev_lock);
185 /*
186 * Get our I/O memory.
187 */
188 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
189 if (res == NULL) {
190 dev_err(&pdev->dev, "no iomem resource!\n");
191 ret = -ENODEV;
192 goto out_free;
193 }
194 mcam->regs = ioremap(res->start, resource_size(res));
195 if (mcam->regs == NULL) {
196 dev_err(&pdev->dev, "MMIO ioremap fail\n");
197 ret = -ENODEV;
198 goto out_free;
199 }
200 /*
201 * Power/clock memory is elsewhere; get it too. Perhaps this
202 * should really be managed outside of this driver?
203 */
204 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
205 if (res == NULL) {
206 dev_err(&pdev->dev, "no power resource!\n");
207 ret = -ENODEV;
208 goto out_unmap1;
209 }
210 cam->power_regs = ioremap(res->start, resource_size(res));
211 if (cam->power_regs == NULL) {
212 dev_err(&pdev->dev, "power MMIO ioremap fail\n");
213 ret = -ENODEV;
214 goto out_unmap1;
215 }
216 /*
217 * Find the i2c adapter. This assumes, of course, that the
218 * i2c bus is already up and functioning.
219 */
220 pdata = pdev->dev.platform_data;
221 mcam->i2c_adapter = platform_get_drvdata(pdata->i2c_device);
222 if (mcam->i2c_adapter == NULL) {
223 ret = -ENODEV;
224 dev_err(&pdev->dev, "No i2c adapter\n");
225 goto out_unmap2;
226 }
227 /*
228 * Sensor GPIO pins.
229 */
230 ret = gpio_request(pdata->sensor_power_gpio, "cam-power");
231 if (ret) {
232 dev_err(&pdev->dev, "Can't get sensor power gpio %d",
233 pdata->sensor_power_gpio);
234 goto out_unmap2;
235 }
236 gpio_direction_output(pdata->sensor_power_gpio, 0);
237 ret = gpio_request(pdata->sensor_reset_gpio, "cam-reset");
238 if (ret) {
239 dev_err(&pdev->dev, "Can't get sensor reset gpio %d",
240 pdata->sensor_reset_gpio);
241 goto out_gpio;
242 }
243 gpio_direction_output(pdata->sensor_reset_gpio, 0);
244 /*
245 * Power the device up and hand it off to the core.
246 */
247 mmpcam_power_up(mcam);
248 ret = mccic_register(mcam);
249 if (ret)
250 goto out_gpio2;
251 /*
252 * Finally, set up our IRQ now that the core is ready to
253 * deal with it.
254 */
255 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
256 if (res == NULL) {
257 ret = -ENODEV;
258 goto out_unregister;
259 }
260 cam->irq = res->start;
261 ret = request_irq(cam->irq, mmpcam_irq, IRQF_SHARED,
262 "mmp-camera", mcam);
263 if (ret == 0) {
264 mmpcam_add_device(cam);
265 return 0;
266 }
267
268out_unregister:
269 mccic_shutdown(mcam);
270out_gpio2:
271 mmpcam_power_down(mcam);
272 gpio_free(pdata->sensor_reset_gpio);
273out_gpio:
274 gpio_free(pdata->sensor_power_gpio);
275out_unmap2:
276 iounmap(cam->power_regs);
277out_unmap1:
278 iounmap(mcam->regs);
279out_free:
280 kfree(cam);
281 return ret;
282}
283
284
285static int mmpcam_remove(struct mmp_camera *cam)
286{
287 struct mcam_camera *mcam = &cam->mcam;
288 struct mmp_camera_platform_data *pdata;
289
290 mmpcam_remove_device(cam);
291 free_irq(cam->irq, mcam);
292 mccic_shutdown(mcam);
293 mmpcam_power_down(mcam);
294 pdata = cam->pdev->dev.platform_data;
295 gpio_free(pdata->sensor_reset_gpio);
296 gpio_free(pdata->sensor_power_gpio);
297 iounmap(cam->power_regs);
298 iounmap(mcam->regs);
299 kfree(cam);
300 return 0;
301}
302
303static int mmpcam_platform_remove(struct platform_device *pdev)
304{
305 struct mmp_camera *cam = mmpcam_find_device(pdev);
306
307 if (cam == NULL)
308 return -ENODEV;
309 return mmpcam_remove(cam);
310}
311
312
313static struct platform_driver mmpcam_driver = {
314 .probe = mmpcam_probe,
315 .remove = mmpcam_platform_remove,
316 .driver = {
317 .name = "mmp-camera",
318 .owner = THIS_MODULE
319 }
320};
321
322
323static int __init mmpcam_init_module(void)
324{
325 mutex_init(&mmpcam_devices_lock);
326 return platform_driver_register(&mmpcam_driver);
327}
328
329static void __exit mmpcam_exit_module(void)
330{
331 platform_driver_unregister(&mmpcam_driver);
332 /*
333 * platform_driver_unregister() should have emptied the list
334 */
335 if (!list_empty(&mmpcam_devices))
336 printk(KERN_ERR "mmp_camera leaving devices behind\n");
337}
338
339module_init(mmpcam_init_module);
340module_exit(mmpcam_exit_module);
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
index b03d74e09a3c..166bf9349c10 100644
--- a/drivers/media/video/mem2mem_testdev.c
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -19,7 +19,6 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/fs.h> 21#include <linux/fs.h>
22#include <linux/version.h>
23#include <linux/timer.h> 22#include <linux/timer.h>
24#include <linux/sched.h> 23#include <linux/sched.h>
25#include <linux/slab.h> 24#include <linux/slab.h>
@@ -35,7 +34,7 @@
35MODULE_DESCRIPTION("Virtual device for mem2mem framework testing"); 34MODULE_DESCRIPTION("Virtual device for mem2mem framework testing");
36MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>"); 35MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
37MODULE_LICENSE("GPL"); 36MODULE_LICENSE("GPL");
38 37MODULE_VERSION("0.1.1");
39 38
40#define MIN_W 32 39#define MIN_W 32
41#define MIN_H 32 40#define MIN_H 32
@@ -380,7 +379,6 @@ static int vidioc_querycap(struct file *file, void *priv,
380 strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1); 379 strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1);
381 strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1); 380 strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);
382 cap->bus_info[0] = 0; 381 cap->bus_info[0] = 0;
383 cap->version = KERNEL_VERSION(0, 1, 0);
384 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT 382 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
385 | V4L2_CAP_STREAMING; 383 | V4L2_CAP_STREAMING;
386 384
diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c
index e2bbd8c35c98..4da9cca939c1 100644
--- a/drivers/media/video/mt9m001.c
+++ b/drivers/media/video/mt9m001.c
@@ -603,13 +603,9 @@ static int mt9m001_video_probe(struct soc_camera_device *icd,
603 unsigned long flags; 603 unsigned long flags;
604 int ret; 604 int ret;
605 605
606 /* 606 /* We must have a parent by now. And it cannot be a wrong one. */
607 * We must have a parent by now. And it cannot be a wrong one. 607 BUG_ON(!icd->parent ||
608 * So this entire test is completely redundant. 608 to_soc_camera_host(icd->parent)->nr != icd->iface);
609 */
610 if (!icd->dev.parent ||
611 to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
612 return -ENODEV;
613 609
614 /* Enable the chip */ 610 /* Enable the chip */
615 data = reg_write(client, MT9M001_CHIP_ENABLE, 1); 611 data = reg_write(client, MT9M001_CHIP_ENABLE, 1);
@@ -675,8 +671,8 @@ static void mt9m001_video_remove(struct soc_camera_device *icd)
675{ 671{
676 struct soc_camera_link *icl = to_soc_camera_link(icd); 672 struct soc_camera_link *icl = to_soc_camera_link(icd);
677 673
678 dev_dbg(&icd->dev, "Video removed: %p, %p\n", 674 dev_dbg(icd->pdev, "Video removed: %p, %p\n",
679 icd->dev.parent, icd->vdev); 675 icd->parent, icd->vdev);
680 if (icl->free_bus) 676 if (icl->free_bus)
681 icl->free_bus(icl); 677 icl->free_bus(icl);
682} 678}
diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c
index ebebed929627..a357aa889fc6 100644
--- a/drivers/media/video/mt9m111.c
+++ b/drivers/media/video/mt9m111.c
@@ -63,6 +63,12 @@
63#define MT9M111_RESET_RESTART_FRAME (1 << 1) 63#define MT9M111_RESET_RESTART_FRAME (1 << 1)
64#define MT9M111_RESET_RESET_MODE (1 << 0) 64#define MT9M111_RESET_RESET_MODE (1 << 0)
65 65
66#define MT9M111_RM_FULL_POWER_RD (0 << 10)
67#define MT9M111_RM_LOW_POWER_RD (1 << 10)
68#define MT9M111_RM_COL_SKIP_4X (1 << 5)
69#define MT9M111_RM_ROW_SKIP_4X (1 << 4)
70#define MT9M111_RM_COL_SKIP_2X (1 << 3)
71#define MT9M111_RM_ROW_SKIP_2X (1 << 2)
66#define MT9M111_RMB_MIRROR_COLS (1 << 1) 72#define MT9M111_RMB_MIRROR_COLS (1 << 1)
67#define MT9M111_RMB_MIRROR_ROWS (1 << 0) 73#define MT9M111_RMB_MIRROR_ROWS (1 << 0)
68#define MT9M111_CTXT_CTRL_RESTART (1 << 15) 74#define MT9M111_CTXT_CTRL_RESTART (1 << 15)
@@ -95,7 +101,8 @@
95 101
96#define MT9M111_OPMODE_AUTOEXPO_EN (1 << 14) 102#define MT9M111_OPMODE_AUTOEXPO_EN (1 << 14)
97#define MT9M111_OPMODE_AUTOWHITEBAL_EN (1 << 1) 103#define MT9M111_OPMODE_AUTOWHITEBAL_EN (1 << 1)
98 104#define MT9M111_OUTFMT_FLIP_BAYER_COL (1 << 9)
105#define MT9M111_OUTFMT_FLIP_BAYER_ROW (1 << 8)
99#define MT9M111_OUTFMT_PROCESSED_BAYER (1 << 14) 106#define MT9M111_OUTFMT_PROCESSED_BAYER (1 << 14)
100#define MT9M111_OUTFMT_BYPASS_IFP (1 << 10) 107#define MT9M111_OUTFMT_BYPASS_IFP (1 << 10)
101#define MT9M111_OUTFMT_INV_PIX_CLOCK (1 << 9) 108#define MT9M111_OUTFMT_INV_PIX_CLOCK (1 << 9)
@@ -110,9 +117,8 @@
110#define MT9M111_OUTFMT_TST_RAMP_FRAME (3 << 4) 117#define MT9M111_OUTFMT_TST_RAMP_FRAME (3 << 4)
111#define MT9M111_OUTFMT_SHIFT_3_UP (1 << 3) 118#define MT9M111_OUTFMT_SHIFT_3_UP (1 << 3)
112#define MT9M111_OUTFMT_AVG_CHROMA (1 << 2) 119#define MT9M111_OUTFMT_AVG_CHROMA (1 << 2)
113#define MT9M111_OUTFMT_SWAP_YCbCr_C_Y (1 << 1) 120#define MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN (1 << 1)
114#define MT9M111_OUTFMT_SWAP_RGB_EVEN (1 << 1) 121#define MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B (1 << 0)
115#define MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr (1 << 0)
116 122
117/* 123/*
118 * Camera control register addresses (0x200..0x2ff not implemented) 124 * Camera control register addresses (0x200..0x2ff not implemented)
@@ -122,6 +128,8 @@
122#define reg_write(reg, val) mt9m111_reg_write(client, MT9M111_##reg, (val)) 128#define reg_write(reg, val) mt9m111_reg_write(client, MT9M111_##reg, (val))
123#define reg_set(reg, val) mt9m111_reg_set(client, MT9M111_##reg, (val)) 129#define reg_set(reg, val) mt9m111_reg_set(client, MT9M111_##reg, (val))
124#define reg_clear(reg, val) mt9m111_reg_clear(client, MT9M111_##reg, (val)) 130#define reg_clear(reg, val) mt9m111_reg_clear(client, MT9M111_##reg, (val))
131#define reg_mask(reg, val, mask) mt9m111_reg_mask(client, MT9M111_##reg, \
132 (val), (mask))
125 133
126#define MT9M111_MIN_DARK_ROWS 8 134#define MT9M111_MIN_DARK_ROWS 8
127#define MT9M111_MIN_DARK_COLS 26 135#define MT9M111_MIN_DARK_COLS 26
@@ -153,7 +161,11 @@ static const struct mt9m111_datafmt mt9m111_colour_fmts[] = {
153 {V4L2_MBUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG}, 161 {V4L2_MBUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG},
154 {V4L2_MBUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_JPEG}, 162 {V4L2_MBUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_JPEG},
155 {V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB}, 163 {V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
164 {V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE, V4L2_COLORSPACE_SRGB},
156 {V4L2_MBUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB}, 165 {V4L2_MBUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB},
166 {V4L2_MBUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB},
167 {V4L2_MBUS_FMT_BGR565_2X8_LE, V4L2_COLORSPACE_SRGB},
168 {V4L2_MBUS_FMT_BGR565_2X8_BE, V4L2_COLORSPACE_SRGB},
157 {V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB}, 169 {V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB},
158 {V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB}, 170 {V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
159}; 171};
@@ -169,6 +181,8 @@ struct mt9m111 {
169 * from v4l2-chip-ident.h */ 181 * from v4l2-chip-ident.h */
170 enum mt9m111_context context; 182 enum mt9m111_context context;
171 struct v4l2_rect rect; 183 struct v4l2_rect rect;
184 struct mutex power_lock; /* lock to protect power_count */
185 int power_count;
172 const struct mt9m111_datafmt *fmt; 186 const struct mt9m111_datafmt *fmt;
173 unsigned int gain; 187 unsigned int gain;
174 unsigned char autoexposure; 188 unsigned char autoexposure;
@@ -176,10 +190,6 @@ struct mt9m111 {
176 unsigned int powered:1; 190 unsigned int powered:1;
177 unsigned int hflip:1; 191 unsigned int hflip:1;
178 unsigned int vflip:1; 192 unsigned int vflip:1;
179 unsigned int swap_rgb_even_odd:1;
180 unsigned int swap_rgb_red_blue:1;
181 unsigned int swap_yuv_y_chromas:1;
182 unsigned int swap_yuv_cb_cr:1;
183 unsigned int autowhitebalance:1; 193 unsigned int autowhitebalance:1;
184}; 194};
185 195
@@ -248,12 +258,26 @@ static int mt9m111_reg_clear(struct i2c_client *client, const u16 reg,
248 int ret; 258 int ret;
249 259
250 ret = mt9m111_reg_read(client, reg); 260 ret = mt9m111_reg_read(client, reg);
251 return mt9m111_reg_write(client, reg, ret & ~data); 261 if (ret >= 0)
262 ret = mt9m111_reg_write(client, reg, ret & ~data);
263 return ret;
252} 264}
253 265
254static int mt9m111_set_context(struct i2c_client *client, 266static int mt9m111_reg_mask(struct i2c_client *client, const u16 reg,
267 const u16 data, const u16 mask)
268{
269 int ret;
270
271 ret = mt9m111_reg_read(client, reg);
272 if (ret >= 0)
273 ret = mt9m111_reg_write(client, reg, (ret & ~mask) | data);
274 return ret;
275}
276
277static int mt9m111_set_context(struct mt9m111 *mt9m111,
255 enum mt9m111_context ctxt) 278 enum mt9m111_context ctxt)
256{ 279{
280 struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
257 int valB = MT9M111_CTXT_CTRL_RESTART | MT9M111_CTXT_CTRL_DEFECTCOR_B 281 int valB = MT9M111_CTXT_CTRL_RESTART | MT9M111_CTXT_CTRL_DEFECTCOR_B
258 | MT9M111_CTXT_CTRL_RESIZE_B | MT9M111_CTXT_CTRL_CTRL2_B 282 | MT9M111_CTXT_CTRL_RESIZE_B | MT9M111_CTXT_CTRL_CTRL2_B
259 | MT9M111_CTXT_CTRL_GAMMA_B | MT9M111_CTXT_CTRL_READ_MODE_B 283 | MT9M111_CTXT_CTRL_GAMMA_B | MT9M111_CTXT_CTRL_READ_MODE_B
@@ -267,10 +291,10 @@ static int mt9m111_set_context(struct i2c_client *client,
267 return reg_write(CONTEXT_CONTROL, valA); 291 return reg_write(CONTEXT_CONTROL, valA);
268} 292}
269 293
270static int mt9m111_setup_rect(struct i2c_client *client, 294static int mt9m111_setup_rect(struct mt9m111 *mt9m111,
271 struct v4l2_rect *rect) 295 struct v4l2_rect *rect)
272{ 296{
273 struct mt9m111 *mt9m111 = to_mt9m111(client); 297 struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
274 int ret, is_raw_format; 298 int ret, is_raw_format;
275 int width = rect->width; 299 int width = rect->width;
276 int height = rect->height; 300 int height = rect->height;
@@ -312,81 +336,9 @@ static int mt9m111_setup_rect(struct i2c_client *client,
312 return ret; 336 return ret;
313} 337}
314 338
315static int mt9m111_setup_pixfmt(struct i2c_client *client, u16 outfmt) 339static int mt9m111_enable(struct mt9m111 *mt9m111)
316{ 340{
317 int ret; 341 struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
318 u16 mask = MT9M111_OUTFMT_PROCESSED_BAYER | MT9M111_OUTFMT_RGB |
319 MT9M111_OUTFMT_BYPASS_IFP | MT9M111_OUTFMT_SWAP_RGB_EVEN |
320 MT9M111_OUTFMT_RGB565 | MT9M111_OUTFMT_RGB555 |
321 MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr |
322 MT9M111_OUTFMT_SWAP_YCbCr_C_Y;
323
324 ret = reg_read(OUTPUT_FORMAT_CTRL2_A);
325 if (ret >= 0)
326 ret = reg_write(OUTPUT_FORMAT_CTRL2_A, (ret & ~mask) | outfmt);
327 if (!ret)
328 ret = reg_read(OUTPUT_FORMAT_CTRL2_B);
329 if (ret >= 0)
330 ret = reg_write(OUTPUT_FORMAT_CTRL2_B, (ret & ~mask) | outfmt);
331
332 return ret;
333}
334
335static int mt9m111_setfmt_bayer8(struct i2c_client *client)
336{
337 return mt9m111_setup_pixfmt(client, MT9M111_OUTFMT_PROCESSED_BAYER |
338 MT9M111_OUTFMT_RGB);
339}
340
341static int mt9m111_setfmt_bayer10(struct i2c_client *client)
342{
343 return mt9m111_setup_pixfmt(client, MT9M111_OUTFMT_BYPASS_IFP);
344}
345
346static int mt9m111_setfmt_rgb565(struct i2c_client *client)
347{
348 struct mt9m111 *mt9m111 = to_mt9m111(client);
349 int val = 0;
350
351 if (mt9m111->swap_rgb_red_blue)
352 val |= MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr;
353 if (mt9m111->swap_rgb_even_odd)
354 val |= MT9M111_OUTFMT_SWAP_RGB_EVEN;
355 val |= MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565;
356
357 return mt9m111_setup_pixfmt(client, val);
358}
359
360static int mt9m111_setfmt_rgb555(struct i2c_client *client)
361{
362 struct mt9m111 *mt9m111 = to_mt9m111(client);
363 int val = 0;
364
365 if (mt9m111->swap_rgb_red_blue)
366 val |= MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr;
367 if (mt9m111->swap_rgb_even_odd)
368 val |= MT9M111_OUTFMT_SWAP_RGB_EVEN;
369 val |= MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB555;
370
371 return mt9m111_setup_pixfmt(client, val);
372}
373
374static int mt9m111_setfmt_yuv(struct i2c_client *client)
375{
376 struct mt9m111 *mt9m111 = to_mt9m111(client);
377 int val = 0;
378
379 if (mt9m111->swap_yuv_cb_cr)
380 val |= MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr;
381 if (mt9m111->swap_yuv_y_chromas)
382 val |= MT9M111_OUTFMT_SWAP_YCbCr_C_Y;
383
384 return mt9m111_setup_pixfmt(client, val);
385}
386
387static int mt9m111_enable(struct i2c_client *client)
388{
389 struct mt9m111 *mt9m111 = to_mt9m111(client);
390 int ret; 342 int ret;
391 343
392 ret = reg_set(RESET, MT9M111_RESET_CHIP_ENABLE); 344 ret = reg_set(RESET, MT9M111_RESET_CHIP_ENABLE);
@@ -395,8 +347,9 @@ static int mt9m111_enable(struct i2c_client *client)
395 return ret; 347 return ret;
396} 348}
397 349
398static int mt9m111_reset(struct i2c_client *client) 350static int mt9m111_reset(struct mt9m111 *mt9m111)
399{ 351{
352 struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
400 int ret; 353 int ret;
401 354
402 ret = reg_set(RESET, MT9M111_RESET_RESET_MODE); 355 ret = reg_set(RESET, MT9M111_RESET_RESET_MODE);
@@ -424,11 +377,9 @@ static int mt9m111_set_bus_param(struct soc_camera_device *icd, unsigned long f)
424 return 0; 377 return 0;
425} 378}
426 379
427static int mt9m111_make_rect(struct i2c_client *client, 380static int mt9m111_make_rect(struct mt9m111 *mt9m111,
428 struct v4l2_rect *rect) 381 struct v4l2_rect *rect)
429{ 382{
430 struct mt9m111 *mt9m111 = to_mt9m111(client);
431
432 if (mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR8_1X8 || 383 if (mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
433 mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE) { 384 mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE) {
434 /* Bayer format - even size lengths */ 385 /* Bayer format - even size lengths */
@@ -444,14 +395,14 @@ static int mt9m111_make_rect(struct i2c_client *client,
444 soc_camera_limit_side(&rect->top, &rect->height, 395 soc_camera_limit_side(&rect->top, &rect->height,
445 MT9M111_MIN_DARK_ROWS, 2, MT9M111_MAX_HEIGHT); 396 MT9M111_MIN_DARK_ROWS, 2, MT9M111_MAX_HEIGHT);
446 397
447 return mt9m111_setup_rect(client, rect); 398 return mt9m111_setup_rect(mt9m111, rect);
448} 399}
449 400
450static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) 401static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
451{ 402{
452 struct v4l2_rect rect = a->c; 403 struct v4l2_rect rect = a->c;
453 struct i2c_client *client = v4l2_get_subdevdata(sd); 404 struct i2c_client *client = v4l2_get_subdevdata(sd);
454 struct mt9m111 *mt9m111 = to_mt9m111(client); 405 struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
455 int ret; 406 int ret;
456 407
457 dev_dbg(&client->dev, "%s left=%d, top=%d, width=%d, height=%d\n", 408 dev_dbg(&client->dev, "%s left=%d, top=%d, width=%d, height=%d\n",
@@ -460,7 +411,7 @@ static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
460 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 411 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
461 return -EINVAL; 412 return -EINVAL;
462 413
463 ret = mt9m111_make_rect(client, &rect); 414 ret = mt9m111_make_rect(mt9m111, &rect);
464 if (!ret) 415 if (!ret)
465 mt9m111->rect = rect; 416 mt9m111->rect = rect;
466 return ret; 417 return ret;
@@ -468,8 +419,7 @@ static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
468 419
469static int mt9m111_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) 420static int mt9m111_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
470{ 421{
471 struct i2c_client *client = v4l2_get_subdevdata(sd); 422 struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
472 struct mt9m111 *mt9m111 = to_mt9m111(client);
473 423
474 a->c = mt9m111->rect; 424 a->c = mt9m111->rect;
475 a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 425 a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
@@ -496,8 +446,7 @@ static int mt9m111_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
496static int mt9m111_g_fmt(struct v4l2_subdev *sd, 446static int mt9m111_g_fmt(struct v4l2_subdev *sd,
497 struct v4l2_mbus_framefmt *mf) 447 struct v4l2_mbus_framefmt *mf)
498{ 448{
499 struct i2c_client *client = v4l2_get_subdevdata(sd); 449 struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
500 struct mt9m111 *mt9m111 = to_mt9m111(client);
501 450
502 mf->width = mt9m111->rect.width; 451 mf->width = mt9m111->rect.width;
503 mf->height = mt9m111->rect.height; 452 mf->height = mt9m111->rect.height;
@@ -508,51 +457,73 @@ static int mt9m111_g_fmt(struct v4l2_subdev *sd,
508 return 0; 457 return 0;
509} 458}
510 459
511static int mt9m111_set_pixfmt(struct i2c_client *client, 460static int mt9m111_set_pixfmt(struct mt9m111 *mt9m111,
512 enum v4l2_mbus_pixelcode code) 461 enum v4l2_mbus_pixelcode code)
513{ 462{
514 struct mt9m111 *mt9m111 = to_mt9m111(client); 463 struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
464 u16 data_outfmt2, mask_outfmt2 = MT9M111_OUTFMT_PROCESSED_BAYER |
465 MT9M111_OUTFMT_BYPASS_IFP | MT9M111_OUTFMT_RGB |
466 MT9M111_OUTFMT_RGB565 | MT9M111_OUTFMT_RGB555 |
467 MT9M111_OUTFMT_RGB444x | MT9M111_OUTFMT_RGBx444 |
468 MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN |
469 MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B;
515 int ret; 470 int ret;
516 471
517 switch (code) { 472 switch (code) {
518 case V4L2_MBUS_FMT_SBGGR8_1X8: 473 case V4L2_MBUS_FMT_SBGGR8_1X8:
519 ret = mt9m111_setfmt_bayer8(client); 474 data_outfmt2 = MT9M111_OUTFMT_PROCESSED_BAYER |
475 MT9M111_OUTFMT_RGB;
520 break; 476 break;
521 case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE: 477 case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE:
522 ret = mt9m111_setfmt_bayer10(client); 478 data_outfmt2 = MT9M111_OUTFMT_BYPASS_IFP | MT9M111_OUTFMT_RGB;
523 break; 479 break;
524 case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE: 480 case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
525 ret = mt9m111_setfmt_rgb555(client); 481 data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB555 |
482 MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN;
483 break;
484 case V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE:
485 data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB555;
526 break; 486 break;
527 case V4L2_MBUS_FMT_RGB565_2X8_LE: 487 case V4L2_MBUS_FMT_RGB565_2X8_LE:
528 ret = mt9m111_setfmt_rgb565(client); 488 data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565 |
489 MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN;
490 break;
491 case V4L2_MBUS_FMT_RGB565_2X8_BE:
492 data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565;
493 break;
494 case V4L2_MBUS_FMT_BGR565_2X8_BE:
495 data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565 |
496 MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B;
497 break;
498 case V4L2_MBUS_FMT_BGR565_2X8_LE:
499 data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565 |
500 MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN |
501 MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B;
529 break; 502 break;
530 case V4L2_MBUS_FMT_UYVY8_2X8: 503 case V4L2_MBUS_FMT_UYVY8_2X8:
531 mt9m111->swap_yuv_y_chromas = 0; 504 data_outfmt2 = 0;
532 mt9m111->swap_yuv_cb_cr = 0;
533 ret = mt9m111_setfmt_yuv(client);
534 break; 505 break;
535 case V4L2_MBUS_FMT_VYUY8_2X8: 506 case V4L2_MBUS_FMT_VYUY8_2X8:
536 mt9m111->swap_yuv_y_chromas = 0; 507 data_outfmt2 = MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B;
537 mt9m111->swap_yuv_cb_cr = 1;
538 ret = mt9m111_setfmt_yuv(client);
539 break; 508 break;
540 case V4L2_MBUS_FMT_YUYV8_2X8: 509 case V4L2_MBUS_FMT_YUYV8_2X8:
541 mt9m111->swap_yuv_y_chromas = 1; 510 data_outfmt2 = MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN;
542 mt9m111->swap_yuv_cb_cr = 0;
543 ret = mt9m111_setfmt_yuv(client);
544 break; 511 break;
545 case V4L2_MBUS_FMT_YVYU8_2X8: 512 case V4L2_MBUS_FMT_YVYU8_2X8:
546 mt9m111->swap_yuv_y_chromas = 1; 513 data_outfmt2 = MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN |
547 mt9m111->swap_yuv_cb_cr = 1; 514 MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B;
548 ret = mt9m111_setfmt_yuv(client);
549 break; 515 break;
550 default: 516 default:
551 dev_err(&client->dev, "Pixel format not handled : %x\n", 517 dev_err(&client->dev, "Pixel format not handled: %x\n", code);
552 code); 518 return -EINVAL;
553 ret = -EINVAL;
554 } 519 }
555 520
521 ret = reg_mask(OUTPUT_FORMAT_CTRL2_A, data_outfmt2,
522 mask_outfmt2);
523 if (!ret)
524 ret = reg_mask(OUTPUT_FORMAT_CTRL2_B, data_outfmt2,
525 mask_outfmt2);
526
556 return ret; 527 return ret;
557} 528}
558 529
@@ -561,7 +532,7 @@ static int mt9m111_s_fmt(struct v4l2_subdev *sd,
561{ 532{
562 struct i2c_client *client = v4l2_get_subdevdata(sd); 533 struct i2c_client *client = v4l2_get_subdevdata(sd);
563 const struct mt9m111_datafmt *fmt; 534 const struct mt9m111_datafmt *fmt;
564 struct mt9m111 *mt9m111 = to_mt9m111(client); 535 struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
565 struct v4l2_rect rect = { 536 struct v4l2_rect rect = {
566 .left = mt9m111->rect.left, 537 .left = mt9m111->rect.left,
567 .top = mt9m111->rect.top, 538 .top = mt9m111->rect.top,
@@ -579,9 +550,9 @@ static int mt9m111_s_fmt(struct v4l2_subdev *sd,
579 "%s code=%x left=%d, top=%d, width=%d, height=%d\n", __func__, 550 "%s code=%x left=%d, top=%d, width=%d, height=%d\n", __func__,
580 mf->code, rect.left, rect.top, rect.width, rect.height); 551 mf->code, rect.left, rect.top, rect.width, rect.height);
581 552
582 ret = mt9m111_make_rect(client, &rect); 553 ret = mt9m111_make_rect(mt9m111, &rect);
583 if (!ret) 554 if (!ret)
584 ret = mt9m111_set_pixfmt(client, mf->code); 555 ret = mt9m111_set_pixfmt(mt9m111, mf->code);
585 if (!ret) { 556 if (!ret) {
586 mt9m111->rect = rect; 557 mt9m111->rect = rect;
587 mt9m111->fmt = fmt; 558 mt9m111->fmt = fmt;
@@ -594,8 +565,7 @@ static int mt9m111_s_fmt(struct v4l2_subdev *sd,
594static int mt9m111_try_fmt(struct v4l2_subdev *sd, 565static int mt9m111_try_fmt(struct v4l2_subdev *sd,
595 struct v4l2_mbus_framefmt *mf) 566 struct v4l2_mbus_framefmt *mf)
596{ 567{
597 struct i2c_client *client = v4l2_get_subdevdata(sd); 568 struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
598 struct mt9m111 *mt9m111 = to_mt9m111(client);
599 const struct mt9m111_datafmt *fmt; 569 const struct mt9m111_datafmt *fmt;
600 bool bayer = mf->code == V4L2_MBUS_FMT_SBGGR8_1X8 || 570 bool bayer = mf->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
601 mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE; 571 mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE;
@@ -635,7 +605,7 @@ static int mt9m111_g_chip_ident(struct v4l2_subdev *sd,
635 struct v4l2_dbg_chip_ident *id) 605 struct v4l2_dbg_chip_ident *id)
636{ 606{
637 struct i2c_client *client = v4l2_get_subdevdata(sd); 607 struct i2c_client *client = v4l2_get_subdevdata(sd);
638 struct mt9m111 *mt9m111 = to_mt9m111(client); 608 struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
639 609
640 if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR) 610 if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
641 return -EINVAL; 611 return -EINVAL;
@@ -726,21 +696,16 @@ static const struct v4l2_queryctrl mt9m111_controls[] = {
726 } 696 }
727}; 697};
728 698
729static int mt9m111_resume(struct soc_camera_device *icd);
730static int mt9m111_suspend(struct soc_camera_device *icd, pm_message_t state);
731
732static struct soc_camera_ops mt9m111_ops = { 699static struct soc_camera_ops mt9m111_ops = {
733 .suspend = mt9m111_suspend,
734 .resume = mt9m111_resume,
735 .query_bus_param = mt9m111_query_bus_param, 700 .query_bus_param = mt9m111_query_bus_param,
736 .set_bus_param = mt9m111_set_bus_param, 701 .set_bus_param = mt9m111_set_bus_param,
737 .controls = mt9m111_controls, 702 .controls = mt9m111_controls,
738 .num_controls = ARRAY_SIZE(mt9m111_controls), 703 .num_controls = ARRAY_SIZE(mt9m111_controls),
739}; 704};
740 705
741static int mt9m111_set_flip(struct i2c_client *client, int flip, int mask) 706static int mt9m111_set_flip(struct mt9m111 *mt9m111, int flip, int mask)
742{ 707{
743 struct mt9m111 *mt9m111 = to_mt9m111(client); 708 struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
744 int ret; 709 int ret;
745 710
746 if (mt9m111->context == HIGHPOWER) { 711 if (mt9m111->context == HIGHPOWER) {
@@ -758,8 +723,9 @@ static int mt9m111_set_flip(struct i2c_client *client, int flip, int mask)
758 return ret; 723 return ret;
759} 724}
760 725
761static int mt9m111_get_global_gain(struct i2c_client *client) 726static int mt9m111_get_global_gain(struct mt9m111 *mt9m111)
762{ 727{
728 struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
763 int data; 729 int data;
764 730
765 data = reg_read(GLOBAL_GAIN); 731 data = reg_read(GLOBAL_GAIN);
@@ -769,9 +735,9 @@ static int mt9m111_get_global_gain(struct i2c_client *client)
769 return data; 735 return data;
770} 736}
771 737
772static int mt9m111_set_global_gain(struct i2c_client *client, int gain) 738static int mt9m111_set_global_gain(struct mt9m111 *mt9m111, int gain)
773{ 739{
774 struct mt9m111 *mt9m111 = to_mt9m111(client); 740 struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
775 u16 val; 741 u16 val;
776 742
777 if (gain > 63 * 2 * 2) 743 if (gain > 63 * 2 * 2)
@@ -788,9 +754,9 @@ static int mt9m111_set_global_gain(struct i2c_client *client, int gain)
788 return reg_write(GLOBAL_GAIN, val); 754 return reg_write(GLOBAL_GAIN, val);
789} 755}
790 756
791static int mt9m111_set_autoexposure(struct i2c_client *client, int on) 757static int mt9m111_set_autoexposure(struct mt9m111 *mt9m111, int on)
792{ 758{
793 struct mt9m111 *mt9m111 = to_mt9m111(client); 759 struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
794 int ret; 760 int ret;
795 761
796 if (on) 762 if (on)
@@ -804,9 +770,9 @@ static int mt9m111_set_autoexposure(struct i2c_client *client, int on)
804 return ret; 770 return ret;
805} 771}
806 772
807static int mt9m111_set_autowhitebalance(struct i2c_client *client, int on) 773static int mt9m111_set_autowhitebalance(struct mt9m111 *mt9m111, int on)
808{ 774{
809 struct mt9m111 *mt9m111 = to_mt9m111(client); 775 struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
810 int ret; 776 int ret;
811 777
812 if (on) 778 if (on)
@@ -823,7 +789,7 @@ static int mt9m111_set_autowhitebalance(struct i2c_client *client, int on)
823static int mt9m111_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) 789static int mt9m111_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
824{ 790{
825 struct i2c_client *client = v4l2_get_subdevdata(sd); 791 struct i2c_client *client = v4l2_get_subdevdata(sd);
826 struct mt9m111 *mt9m111 = to_mt9m111(client); 792 struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
827 int data; 793 int data;
828 794
829 switch (ctrl->id) { 795 switch (ctrl->id) {
@@ -848,7 +814,7 @@ static int mt9m111_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
848 ctrl->value = !!(data & MT9M111_RMB_MIRROR_COLS); 814 ctrl->value = !!(data & MT9M111_RMB_MIRROR_COLS);
849 break; 815 break;
850 case V4L2_CID_GAIN: 816 case V4L2_CID_GAIN:
851 data = mt9m111_get_global_gain(client); 817 data = mt9m111_get_global_gain(mt9m111);
852 if (data < 0) 818 if (data < 0)
853 return data; 819 return data;
854 ctrl->value = data; 820 ctrl->value = data;
@@ -865,8 +831,7 @@ static int mt9m111_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
865 831
866static int mt9m111_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) 832static int mt9m111_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
867{ 833{
868 struct i2c_client *client = v4l2_get_subdevdata(sd); 834 struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
869 struct mt9m111 *mt9m111 = to_mt9m111(client);
870 const struct v4l2_queryctrl *qctrl; 835 const struct v4l2_queryctrl *qctrl;
871 int ret; 836 int ret;
872 837
@@ -877,22 +842,22 @@ static int mt9m111_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
877 switch (ctrl->id) { 842 switch (ctrl->id) {
878 case V4L2_CID_VFLIP: 843 case V4L2_CID_VFLIP:
879 mt9m111->vflip = ctrl->value; 844 mt9m111->vflip = ctrl->value;
880 ret = mt9m111_set_flip(client, ctrl->value, 845 ret = mt9m111_set_flip(mt9m111, ctrl->value,
881 MT9M111_RMB_MIRROR_ROWS); 846 MT9M111_RMB_MIRROR_ROWS);
882 break; 847 break;
883 case V4L2_CID_HFLIP: 848 case V4L2_CID_HFLIP:
884 mt9m111->hflip = ctrl->value; 849 mt9m111->hflip = ctrl->value;
885 ret = mt9m111_set_flip(client, ctrl->value, 850 ret = mt9m111_set_flip(mt9m111, ctrl->value,
886 MT9M111_RMB_MIRROR_COLS); 851 MT9M111_RMB_MIRROR_COLS);
887 break; 852 break;
888 case V4L2_CID_GAIN: 853 case V4L2_CID_GAIN:
889 ret = mt9m111_set_global_gain(client, ctrl->value); 854 ret = mt9m111_set_global_gain(mt9m111, ctrl->value);
890 break; 855 break;
891 case V4L2_CID_EXPOSURE_AUTO: 856 case V4L2_CID_EXPOSURE_AUTO:
892 ret = mt9m111_set_autoexposure(client, ctrl->value); 857 ret = mt9m111_set_autoexposure(mt9m111, ctrl->value);
893 break; 858 break;
894 case V4L2_CID_AUTO_WHITE_BALANCE: 859 case V4L2_CID_AUTO_WHITE_BALANCE:
895 ret = mt9m111_set_autowhitebalance(client, ctrl->value); 860 ret = mt9m111_set_autowhitebalance(mt9m111, ctrl->value);
896 break; 861 break;
897 default: 862 default:
898 ret = -EINVAL; 863 ret = -EINVAL;
@@ -901,60 +866,52 @@ static int mt9m111_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
901 return ret; 866 return ret;
902} 867}
903 868
904static int mt9m111_suspend(struct soc_camera_device *icd, pm_message_t state) 869static int mt9m111_suspend(struct mt9m111 *mt9m111)
905{ 870{
906 struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); 871 mt9m111->gain = mt9m111_get_global_gain(mt9m111);
907 struct mt9m111 *mt9m111 = to_mt9m111(client);
908
909 mt9m111->gain = mt9m111_get_global_gain(client);
910 872
911 return 0; 873 return 0;
912} 874}
913 875
914static int mt9m111_restore_state(struct i2c_client *client) 876static void mt9m111_restore_state(struct mt9m111 *mt9m111)
915{ 877{
916 struct mt9m111 *mt9m111 = to_mt9m111(client); 878 mt9m111_set_context(mt9m111, mt9m111->context);
917 879 mt9m111_set_pixfmt(mt9m111, mt9m111->fmt->code);
918 mt9m111_set_context(client, mt9m111->context); 880 mt9m111_setup_rect(mt9m111, &mt9m111->rect);
919 mt9m111_set_pixfmt(client, mt9m111->fmt->code); 881 mt9m111_set_flip(mt9m111, mt9m111->hflip, MT9M111_RMB_MIRROR_COLS);
920 mt9m111_setup_rect(client, &mt9m111->rect); 882 mt9m111_set_flip(mt9m111, mt9m111->vflip, MT9M111_RMB_MIRROR_ROWS);
921 mt9m111_set_flip(client, mt9m111->hflip, MT9M111_RMB_MIRROR_COLS); 883 mt9m111_set_global_gain(mt9m111, mt9m111->gain);
922 mt9m111_set_flip(client, mt9m111->vflip, MT9M111_RMB_MIRROR_ROWS); 884 mt9m111_set_autoexposure(mt9m111, mt9m111->autoexposure);
923 mt9m111_set_global_gain(client, mt9m111->gain); 885 mt9m111_set_autowhitebalance(mt9m111, mt9m111->autowhitebalance);
924 mt9m111_set_autoexposure(client, mt9m111->autoexposure);
925 mt9m111_set_autowhitebalance(client, mt9m111->autowhitebalance);
926 return 0;
927} 886}
928 887
929static int mt9m111_resume(struct soc_camera_device *icd) 888static int mt9m111_resume(struct mt9m111 *mt9m111)
930{ 889{
931 struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
932 struct mt9m111 *mt9m111 = to_mt9m111(client);
933 int ret = 0; 890 int ret = 0;
934 891
935 if (mt9m111->powered) { 892 if (mt9m111->powered) {
936 ret = mt9m111_enable(client); 893 ret = mt9m111_enable(mt9m111);
937 if (!ret) 894 if (!ret)
938 ret = mt9m111_reset(client); 895 ret = mt9m111_reset(mt9m111);
939 if (!ret) 896 if (!ret)
940 ret = mt9m111_restore_state(client); 897 mt9m111_restore_state(mt9m111);
941 } 898 }
942 return ret; 899 return ret;
943} 900}
944 901
945static int mt9m111_init(struct i2c_client *client) 902static int mt9m111_init(struct mt9m111 *mt9m111)
946{ 903{
947 struct mt9m111 *mt9m111 = to_mt9m111(client); 904 struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
948 int ret; 905 int ret;
949 906
950 mt9m111->context = HIGHPOWER; 907 mt9m111->context = HIGHPOWER;
951 ret = mt9m111_enable(client); 908 ret = mt9m111_enable(mt9m111);
952 if (!ret) 909 if (!ret)
953 ret = mt9m111_reset(client); 910 ret = mt9m111_reset(mt9m111);
954 if (!ret) 911 if (!ret)
955 ret = mt9m111_set_context(client, mt9m111->context); 912 ret = mt9m111_set_context(mt9m111, mt9m111->context);
956 if (!ret) 913 if (!ret)
957 ret = mt9m111_set_autoexposure(client, mt9m111->autoexposure); 914 ret = mt9m111_set_autoexposure(mt9m111, mt9m111->autoexposure);
958 if (ret) 915 if (ret)
959 dev_err(&client->dev, "mt9m111 init failed: %d\n", ret); 916 dev_err(&client->dev, "mt9m111 init failed: %d\n", ret);
960 return ret; 917 return ret;
@@ -971,20 +928,13 @@ static int mt9m111_video_probe(struct soc_camera_device *icd,
971 s32 data; 928 s32 data;
972 int ret; 929 int ret;
973 930
974 /* 931 /* We must have a parent by now. And it cannot be a wrong one. */
975 * We must have a parent by now. And it cannot be a wrong one. 932 BUG_ON(!icd->parent ||
976 * So this entire test is completely redundant. 933 to_soc_camera_host(icd->parent)->nr != icd->iface);
977 */
978 if (!icd->dev.parent ||
979 to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
980 return -ENODEV;
981 934
982 mt9m111->autoexposure = 1; 935 mt9m111->autoexposure = 1;
983 mt9m111->autowhitebalance = 1; 936 mt9m111->autowhitebalance = 1;
984 937
985 mt9m111->swap_rgb_even_odd = 1;
986 mt9m111->swap_rgb_red_blue = 1;
987
988 data = reg_read(CHIP_VERSION); 938 data = reg_read(CHIP_VERSION);
989 939
990 switch (data) { 940 switch (data) {
@@ -1005,16 +955,51 @@ static int mt9m111_video_probe(struct soc_camera_device *icd,
1005 goto ei2c; 955 goto ei2c;
1006 } 956 }
1007 957
1008 ret = mt9m111_init(client); 958 ret = mt9m111_init(mt9m111);
1009 959
1010ei2c: 960ei2c:
1011 return ret; 961 return ret;
1012} 962}
1013 963
964static int mt9m111_s_power(struct v4l2_subdev *sd, int on)
965{
966 struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
967 struct i2c_client *client = v4l2_get_subdevdata(sd);
968 int ret = 0;
969
970 mutex_lock(&mt9m111->power_lock);
971
972 /*
973 * If the power count is modified from 0 to != 0 or from != 0 to 0,
974 * update the power state.
975 */
976 if (mt9m111->power_count == !on) {
977 if (on) {
978 ret = mt9m111_resume(mt9m111);
979 if (ret) {
980 dev_err(&client->dev,
981 "Failed to resume the sensor: %d\n", ret);
982 goto out;
983 }
984 } else {
985 mt9m111_suspend(mt9m111);
986 }
987 }
988
989 /* Update the power count. */
990 mt9m111->power_count += on ? 1 : -1;
991 WARN_ON(mt9m111->power_count < 0);
992
993out:
994 mutex_unlock(&mt9m111->power_lock);
995 return ret;
996}
997
1014static struct v4l2_subdev_core_ops mt9m111_subdev_core_ops = { 998static struct v4l2_subdev_core_ops mt9m111_subdev_core_ops = {
1015 .g_ctrl = mt9m111_g_ctrl, 999 .g_ctrl = mt9m111_g_ctrl,
1016 .s_ctrl = mt9m111_s_ctrl, 1000 .s_ctrl = mt9m111_s_ctrl,
1017 .g_chip_ident = mt9m111_g_chip_ident, 1001 .g_chip_ident = mt9m111_g_chip_ident,
1002 .s_power = mt9m111_s_power,
1018#ifdef CONFIG_VIDEO_ADV_DEBUG 1003#ifdef CONFIG_VIDEO_ADV_DEBUG
1019 .g_register = mt9m111_g_register, 1004 .g_register = mt9m111_g_register,
1020 .s_register = mt9m111_s_register, 1005 .s_register = mt9m111_s_register,
diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c
index 7ce279c3751d..30547cc3f89b 100644
--- a/drivers/media/video/mt9t031.c
+++ b/drivers/media/video/mt9t031.c
@@ -700,8 +700,7 @@ static int mt9t031_runtime_suspend(struct device *dev)
700static int mt9t031_runtime_resume(struct device *dev) 700static int mt9t031_runtime_resume(struct device *dev)
701{ 701{
702 struct video_device *vdev = to_video_device(dev); 702 struct video_device *vdev = to_video_device(dev);
703 struct soc_camera_device *icd = container_of(vdev->parent, 703 struct soc_camera_device *icd = dev_get_drvdata(vdev->parent);
704 struct soc_camera_device, dev);
705 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 704 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
706 struct i2c_client *client = v4l2_get_subdevdata(sd); 705 struct i2c_client *client = v4l2_get_subdevdata(sd);
707 struct mt9t031 *mt9t031 = to_mt9t031(client); 706 struct mt9t031 *mt9t031 = to_mt9t031(client);
diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c
index bffa9ee10968..d2e0a50063a2 100644
--- a/drivers/media/video/mt9t112.c
+++ b/drivers/media/video/mt9t112.c
@@ -1057,13 +1057,9 @@ static int mt9t112_camera_probe(struct soc_camera_device *icd,
1057 const char *devname; 1057 const char *devname;
1058 int chipid; 1058 int chipid;
1059 1059
1060 /* 1060 /* We must have a parent by now. And it cannot be a wrong one. */
1061 * We must have a parent by now. And it cannot be a wrong one. 1061 BUG_ON(!icd->parent ||
1062 * So this entire test is completely redundant. 1062 to_soc_camera_host(icd->parent)->nr != icd->iface);
1063 */
1064 if (!icd->dev.parent ||
1065 to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
1066 return -ENODEV;
1067 1063
1068 /* 1064 /*
1069 * check and show chip ID 1065 * check and show chip ID
diff --git a/drivers/media/video/mt9v011.c b/drivers/media/video/mt9v011.c
index 4904d25f689f..893a8b8f5141 100644
--- a/drivers/media/video/mt9v011.c
+++ b/drivers/media/video/mt9v011.c
@@ -54,11 +54,20 @@ static struct v4l2_queryctrl mt9v011_qctrl[] = {
54 .type = V4L2_CTRL_TYPE_INTEGER, 54 .type = V4L2_CTRL_TYPE_INTEGER,
55 .name = "Gain", 55 .name = "Gain",
56 .minimum = 0, 56 .minimum = 0,
57 .maximum = (1 << 10) - 1, 57 .maximum = (1 << 12) - 1 - 0x0020,
58 .step = 1, 58 .step = 1,
59 .default_value = 0x0020, 59 .default_value = 0x0020,
60 .flags = 0, 60 .flags = 0,
61 }, { 61 }, {
62 .id = V4L2_CID_EXPOSURE,
63 .type = V4L2_CTRL_TYPE_INTEGER,
64 .name = "Exposure",
65 .minimum = 0,
66 .maximum = 2047,
67 .step = 1,
68 .default_value = 0x01fc,
69 .flags = 0,
70 }, {
62 .id = V4L2_CID_RED_BALANCE, 71 .id = V4L2_CID_RED_BALANCE,
63 .type = V4L2_CTRL_TYPE_INTEGER, 72 .type = V4L2_CTRL_TYPE_INTEGER,
64 .name = "Red Balance", 73 .name = "Red Balance",
@@ -105,7 +114,8 @@ struct mt9v011 {
105 unsigned hflip:1; 114 unsigned hflip:1;
106 unsigned vflip:1; 115 unsigned vflip:1;
107 116
108 u16 global_gain, red_bal, blue_bal; 117 u16 global_gain, exposure;
118 s16 red_bal, blue_bal;
109}; 119};
110 120
111static inline struct mt9v011 *to_mt9v011(struct v4l2_subdev *sd) 121static inline struct mt9v011 *to_mt9v011(struct v4l2_subdev *sd)
@@ -180,24 +190,68 @@ static const struct i2c_reg_value mt9v011_init_default[] = {
180 { R07_MT9V011_OUT_CTRL, 0x0002 }, /* chip enable */ 190 { R07_MT9V011_OUT_CTRL, 0x0002 }, /* chip enable */
181}; 191};
182 192
193
194static u16 calc_mt9v011_gain(s16 lineargain)
195{
196
197 u16 digitalgain = 0;
198 u16 analogmult = 0;
199 u16 analoginit = 0;
200
201 if (lineargain < 0)
202 lineargain = 0;
203
204 /* recommended minimum */
205 lineargain += 0x0020;
206
207 if (lineargain > 2047)
208 lineargain = 2047;
209
210 if (lineargain > 1023) {
211 digitalgain = 3;
212 analogmult = 3;
213 analoginit = lineargain / 16;
214 } else if (lineargain > 511) {
215 digitalgain = 1;
216 analogmult = 3;
217 analoginit = lineargain / 8;
218 } else if (lineargain > 255) {
219 analogmult = 3;
220 analoginit = lineargain / 4;
221 } else if (lineargain > 127) {
222 analogmult = 1;
223 analoginit = lineargain / 2;
224 } else
225 analoginit = lineargain;
226
227 return analoginit + (analogmult << 7) + (digitalgain << 9);
228
229}
230
183static void set_balance(struct v4l2_subdev *sd) 231static void set_balance(struct v4l2_subdev *sd)
184{ 232{
185 struct mt9v011 *core = to_mt9v011(sd); 233 struct mt9v011 *core = to_mt9v011(sd);
186 u16 green1_gain, green2_gain, blue_gain, red_gain; 234 u16 green_gain, blue_gain, red_gain;
235 u16 exposure;
236 s16 bal;
187 237
188 green1_gain = core->global_gain; 238 exposure = core->exposure;
189 green2_gain = core->global_gain;
190 239
191 blue_gain = core->global_gain + 240 green_gain = calc_mt9v011_gain(core->global_gain);
192 core->global_gain * core->blue_bal / (1 << 9);
193 241
194 red_gain = core->global_gain + 242 bal = core->global_gain;
195 core->global_gain * core->blue_bal / (1 << 9); 243 bal += (core->blue_bal * core->global_gain / (1 << 7));
244 blue_gain = calc_mt9v011_gain(bal);
196 245
197 mt9v011_write(sd, R2B_MT9V011_GREEN_1_GAIN, green1_gain); 246 bal = core->global_gain;
198 mt9v011_write(sd, R2E_MT9V011_GREEN_2_GAIN, green1_gain); 247 bal += (core->red_bal * core->global_gain / (1 << 7));
248 red_gain = calc_mt9v011_gain(bal);
249
250 mt9v011_write(sd, R2B_MT9V011_GREEN_1_GAIN, green_gain);
251 mt9v011_write(sd, R2E_MT9V011_GREEN_2_GAIN, green_gain);
199 mt9v011_write(sd, R2C_MT9V011_BLUE_GAIN, blue_gain); 252 mt9v011_write(sd, R2C_MT9V011_BLUE_GAIN, blue_gain);
200 mt9v011_write(sd, R2D_MT9V011_RED_GAIN, red_gain); 253 mt9v011_write(sd, R2D_MT9V011_RED_GAIN, red_gain);
254 mt9v011_write(sd, R09_MT9V011_SHUTTER_WIDTH, exposure);
201} 255}
202 256
203static void calc_fps(struct v4l2_subdev *sd, u32 *numerator, u32 *denominator) 257static void calc_fps(struct v4l2_subdev *sd, u32 *numerator, u32 *denominator)
@@ -286,7 +340,7 @@ static void set_res(struct v4l2_subdev *sd)
286 * be missing. 340 * be missing.
287 */ 341 */
288 342
289 hstart = 14 + (640 - core->width) / 2; 343 hstart = 20 + (640 - core->width) / 2;
290 mt9v011_write(sd, R02_MT9V011_COLSTART, hstart); 344 mt9v011_write(sd, R02_MT9V011_COLSTART, hstart);
291 mt9v011_write(sd, R04_MT9V011_WIDTH, core->width); 345 mt9v011_write(sd, R04_MT9V011_WIDTH, core->width);
292 mt9v011_write(sd, R05_MT9V011_HBLANK, 771 - core->width); 346 mt9v011_write(sd, R05_MT9V011_HBLANK, 771 - core->width);
@@ -338,6 +392,9 @@ static int mt9v011_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
338 case V4L2_CID_GAIN: 392 case V4L2_CID_GAIN:
339 ctrl->value = core->global_gain; 393 ctrl->value = core->global_gain;
340 return 0; 394 return 0;
395 case V4L2_CID_EXPOSURE:
396 ctrl->value = core->exposure;
397 return 0;
341 case V4L2_CID_RED_BALANCE: 398 case V4L2_CID_RED_BALANCE:
342 ctrl->value = core->red_bal; 399 ctrl->value = core->red_bal;
343 return 0; 400 return 0;
@@ -392,6 +449,9 @@ static int mt9v011_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
392 case V4L2_CID_GAIN: 449 case V4L2_CID_GAIN:
393 core->global_gain = ctrl->value; 450 core->global_gain = ctrl->value;
394 break; 451 break;
452 case V4L2_CID_EXPOSURE:
453 core->exposure = ctrl->value;
454 break;
395 case V4L2_CID_RED_BALANCE: 455 case V4L2_CID_RED_BALANCE:
396 core->red_bal = ctrl->value; 456 core->red_bal = ctrl->value;
397 break; 457 break;
@@ -598,6 +658,7 @@ static int mt9v011_probe(struct i2c_client *c,
598 } 658 }
599 659
600 core->global_gain = 0x0024; 660 core->global_gain = 0x0024;
661 core->exposure = 0x01fc;
601 core->width = 640; 662 core->width = 640;
602 core->height = 480; 663 core->height = 480;
603 core->xtal = 27000000; /* Hz */ 664 core->xtal = 27000000; /* Hz */
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index fc76ed1c08e5..51b0fccbfe70 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -728,9 +728,9 @@ static int mt9v022_video_probe(struct soc_camera_device *icd,
728 int ret; 728 int ret;
729 unsigned long flags; 729 unsigned long flags;
730 730
731 if (!icd->dev.parent || 731 /* We must have a parent by now. And it cannot be a wrong one. */
732 to_soc_camera_host(icd->dev.parent)->nr != icd->iface) 732 BUG_ON(!icd->parent ||
733 return -ENODEV; 733 to_soc_camera_host(icd->parent)->nr != icd->iface);
734 734
735 /* Read out the chip version register */ 735 /* Read out the chip version register */
736 data = reg_read(client, MT9V022_CHIP_VERSION); 736 data = reg_read(client, MT9V022_CHIP_VERSION);
@@ -809,8 +809,8 @@ static void mt9v022_video_remove(struct soc_camera_device *icd)
809{ 809{
810 struct soc_camera_link *icl = to_soc_camera_link(icd); 810 struct soc_camera_link *icl = to_soc_camera_link(icd);
811 811
812 dev_dbg(&icd->dev, "Video removed: %p, %p\n", 812 dev_dbg(icd->pdev, "Video removed: %p, %p\n",
813 icd->dev.parent, icd->vdev); 813 icd->parent, icd->vdev);
814 if (icl->free_bus) 814 if (icl->free_bus)
815 icl->free_bus(icl); 815 icl->free_bus(icl);
816} 816}
diff --git a/drivers/media/video/mt9v032.c b/drivers/media/video/mt9v032.c
index 1319c2c48aff..c64e1dc4cb4e 100644
--- a/drivers/media/video/mt9v032.c
+++ b/drivers/media/video/mt9v032.c
@@ -31,14 +31,14 @@
31#define MT9V032_CHIP_VERSION 0x00 31#define MT9V032_CHIP_VERSION 0x00
32#define MT9V032_CHIP_ID_REV1 0x1311 32#define MT9V032_CHIP_ID_REV1 0x1311
33#define MT9V032_CHIP_ID_REV3 0x1313 33#define MT9V032_CHIP_ID_REV3 0x1313
34#define MT9V032_ROW_START 0x01 34#define MT9V032_COLUMN_START 0x01
35#define MT9V032_ROW_START_MIN 4
36#define MT9V032_ROW_START_DEF 10
37#define MT9V032_ROW_START_MAX 482
38#define MT9V032_COLUMN_START 0x02
39#define MT9V032_COLUMN_START_MIN 1 35#define MT9V032_COLUMN_START_MIN 1
40#define MT9V032_COLUMN_START_DEF 2 36#define MT9V032_COLUMN_START_DEF 1
41#define MT9V032_COLUMN_START_MAX 752 37#define MT9V032_COLUMN_START_MAX 752
38#define MT9V032_ROW_START 0x02
39#define MT9V032_ROW_START_MIN 4
40#define MT9V032_ROW_START_DEF 5
41#define MT9V032_ROW_START_MAX 482
42#define MT9V032_WINDOW_HEIGHT 0x03 42#define MT9V032_WINDOW_HEIGHT 0x03
43#define MT9V032_WINDOW_HEIGHT_MIN 1 43#define MT9V032_WINDOW_HEIGHT_MIN 1
44#define MT9V032_WINDOW_HEIGHT_DEF 480 44#define MT9V032_WINDOW_HEIGHT_DEF 480
@@ -420,13 +420,13 @@ static int mt9v032_set_crop(struct v4l2_subdev *subdev,
420 struct v4l2_rect *__crop; 420 struct v4l2_rect *__crop;
421 struct v4l2_rect rect; 421 struct v4l2_rect rect;
422 422
423 /* Clamp the crop rectangle boundaries and align them to a multiple of 2 423 /* Clamp the crop rectangle boundaries and align them to a non multiple
424 * pixels. 424 * of 2 pixels to ensure a GRBG Bayer pattern.
425 */ 425 */
426 rect.left = clamp(ALIGN(crop->rect.left, 2), 426 rect.left = clamp(ALIGN(crop->rect.left + 1, 2) - 1,
427 MT9V032_COLUMN_START_MIN, 427 MT9V032_COLUMN_START_MIN,
428 MT9V032_COLUMN_START_MAX); 428 MT9V032_COLUMN_START_MAX);
429 rect.top = clamp(ALIGN(crop->rect.top, 2), 429 rect.top = clamp(ALIGN(crop->rect.top + 1, 2) - 1,
430 MT9V032_ROW_START_MIN, 430 MT9V032_ROW_START_MIN,
431 MT9V032_ROW_START_MAX); 431 MT9V032_ROW_START_MAX);
432 rect.width = clamp(ALIGN(crop->rect.width, 2), 432 rect.width = clamp(ALIGN(crop->rect.width, 2),
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index 63f8a0cc33d8..087db12a3a67 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -31,7 +31,6 @@
31#include <linux/sched.h> 31#include <linux/sched.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/time.h> 33#include <linux/time.h>
34#include <linux/version.h>
35#include <linux/videodev2.h> 34#include <linux/videodev2.h>
36 35
37#include <media/soc_camera.h> 36#include <media/soc_camera.h>
@@ -73,7 +72,7 @@
73#define CSISR_SOF_INT (1 << 16) 72#define CSISR_SOF_INT (1 << 16)
74#define CSISR_DRDY (1 << 0) 73#define CSISR_DRDY (1 << 0)
75 74
76#define VERSION_CODE KERNEL_VERSION(0, 0, 1) 75#define DRIVER_VERSION "0.0.2"
77#define DRIVER_NAME "mx1-camera" 76#define DRIVER_NAME "mx1-camera"
78 77
79#define CSI_IRQ_MASK (CSISR_SFF_OR_INT | CSISR_RFF_OR_INT | \ 78#define CSI_IRQ_MASK (CSISR_SFF_OR_INT | CSISR_RFF_OR_INT | \
@@ -142,7 +141,7 @@ static int mx1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
142 if (*size * *count > MAX_VIDEO_MEM * 1024 * 1024) 141 if (*size * *count > MAX_VIDEO_MEM * 1024 * 1024)
143 *count = (MAX_VIDEO_MEM * 1024 * 1024) / *size; 142 *count = (MAX_VIDEO_MEM * 1024 * 1024) / *size;
144 143
145 dev_dbg(icd->dev.parent, "count=%d, size=%d\n", *count, *size); 144 dev_dbg(icd->parent, "count=%d, size=%d\n", *count, *size);
146 145
147 return 0; 146 return 0;
148} 147}
@@ -154,7 +153,7 @@ static void free_buffer(struct videobuf_queue *vq, struct mx1_buffer *buf)
154 153
155 BUG_ON(in_interrupt()); 154 BUG_ON(in_interrupt());
156 155
157 dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__, 156 dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
158 vb, vb->baddr, vb->bsize); 157 vb, vb->baddr, vb->bsize);
159 158
160 /* 159 /*
@@ -179,7 +178,7 @@ static int mx1_videobuf_prepare(struct videobuf_queue *vq,
179 if (bytes_per_line < 0) 178 if (bytes_per_line < 0)
180 return bytes_per_line; 179 return bytes_per_line;
181 180
182 dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__, 181 dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
183 vb, vb->baddr, vb->bsize); 182 vb, vb->baddr, vb->bsize);
184 183
185 /* Added list head initialization on alloc */ 184 /* Added list head initialization on alloc */
@@ -232,7 +231,7 @@ out:
232static int mx1_camera_setup_dma(struct mx1_camera_dev *pcdev) 231static int mx1_camera_setup_dma(struct mx1_camera_dev *pcdev)
233{ 232{
234 struct videobuf_buffer *vbuf = &pcdev->active->vb; 233 struct videobuf_buffer *vbuf = &pcdev->active->vb;
235 struct device *dev = pcdev->icd->dev.parent; 234 struct device *dev = pcdev->icd->parent;
236 int ret; 235 int ret;
237 236
238 if (unlikely(!pcdev->active)) { 237 if (unlikely(!pcdev->active)) {
@@ -256,11 +255,11 @@ static void mx1_videobuf_queue(struct videobuf_queue *vq,
256 struct videobuf_buffer *vb) 255 struct videobuf_buffer *vb)
257{ 256{
258 struct soc_camera_device *icd = vq->priv_data; 257 struct soc_camera_device *icd = vq->priv_data;
259 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 258 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
260 struct mx1_camera_dev *pcdev = ici->priv; 259 struct mx1_camera_dev *pcdev = ici->priv;
261 struct mx1_buffer *buf = container_of(vb, struct mx1_buffer, vb); 260 struct mx1_buffer *buf = container_of(vb, struct mx1_buffer, vb);
262 261
263 dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__, 262 dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
264 vb, vb->baddr, vb->bsize); 263 vb, vb->baddr, vb->bsize);
265 264
266 list_add_tail(&vb->queue, &pcdev->capture); 265 list_add_tail(&vb->queue, &pcdev->capture);
@@ -287,7 +286,7 @@ static void mx1_videobuf_release(struct videobuf_queue *vq,
287 struct mx1_buffer *buf = container_of(vb, struct mx1_buffer, vb); 286 struct mx1_buffer *buf = container_of(vb, struct mx1_buffer, vb);
288#ifdef DEBUG 287#ifdef DEBUG
289 struct soc_camera_device *icd = vq->priv_data; 288 struct soc_camera_device *icd = vq->priv_data;
290 struct device *dev = icd->dev.parent; 289 struct device *dev = icd->parent;
291 290
292 dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, 291 dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
293 vb, vb->baddr, vb->bsize); 292 vb, vb->baddr, vb->bsize);
@@ -343,7 +342,7 @@ static void mx1_camera_wakeup(struct mx1_camera_dev *pcdev,
343static void mx1_camera_dma_irq(int channel, void *data) 342static void mx1_camera_dma_irq(int channel, void *data)
344{ 343{
345 struct mx1_camera_dev *pcdev = data; 344 struct mx1_camera_dev *pcdev = data;
346 struct device *dev = pcdev->icd->dev.parent; 345 struct device *dev = pcdev->icd->parent;
347 struct mx1_buffer *buf; 346 struct mx1_buffer *buf;
348 struct videobuf_buffer *vb; 347 struct videobuf_buffer *vb;
349 unsigned long flags; 348 unsigned long flags;
@@ -378,10 +377,10 @@ static struct videobuf_queue_ops mx1_videobuf_ops = {
378static void mx1_camera_init_videobuf(struct videobuf_queue *q, 377static void mx1_camera_init_videobuf(struct videobuf_queue *q,
379 struct soc_camera_device *icd) 378 struct soc_camera_device *icd)
380{ 379{
381 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 380 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
382 struct mx1_camera_dev *pcdev = ici->priv; 381 struct mx1_camera_dev *pcdev = ici->priv;
383 382
384 videobuf_queue_dma_contig_init(q, &mx1_videobuf_ops, icd->dev.parent, 383 videobuf_queue_dma_contig_init(q, &mx1_videobuf_ops, icd->parent,
385 &pcdev->lock, V4L2_BUF_TYPE_VIDEO_CAPTURE, 384 &pcdev->lock, V4L2_BUF_TYPE_VIDEO_CAPTURE,
386 V4L2_FIELD_NONE, 385 V4L2_FIELD_NONE,
387 sizeof(struct mx1_buffer), icd, &icd->video_lock); 386 sizeof(struct mx1_buffer), icd, &icd->video_lock);
@@ -401,7 +400,7 @@ static int mclk_get_divisor(struct mx1_camera_dev *pcdev)
401 */ 400 */
402 div = (lcdclk + 2 * mclk - 1) / (2 * mclk) - 1; 401 div = (lcdclk + 2 * mclk - 1) / (2 * mclk) - 1;
403 402
404 dev_dbg(pcdev->icd->dev.parent, 403 dev_dbg(pcdev->icd->parent,
405 "System clock %lukHz, target freq %dkHz, divisor %lu\n", 404 "System clock %lukHz, target freq %dkHz, divisor %lu\n",
406 lcdclk / 1000, mclk / 1000, div); 405 lcdclk / 1000, mclk / 1000, div);
407 406
@@ -412,7 +411,7 @@ static void mx1_camera_activate(struct mx1_camera_dev *pcdev)
412{ 411{
413 unsigned int csicr1 = CSICR1_EN; 412 unsigned int csicr1 = CSICR1_EN;
414 413
415 dev_dbg(pcdev->icd->dev.parent, "Activate device\n"); 414 dev_dbg(pcdev->icd->parent, "Activate device\n");
416 415
417 clk_enable(pcdev->clk); 416 clk_enable(pcdev->clk);
418 417
@@ -428,7 +427,7 @@ static void mx1_camera_activate(struct mx1_camera_dev *pcdev)
428 427
429static void mx1_camera_deactivate(struct mx1_camera_dev *pcdev) 428static void mx1_camera_deactivate(struct mx1_camera_dev *pcdev)
430{ 429{
431 dev_dbg(pcdev->icd->dev.parent, "Deactivate device\n"); 430 dev_dbg(pcdev->icd->parent, "Deactivate device\n");
432 431
433 /* Disable all CSI interface */ 432 /* Disable all CSI interface */
434 __raw_writel(0x00, pcdev->base + CSICR1); 433 __raw_writel(0x00, pcdev->base + CSICR1);
@@ -442,13 +441,13 @@ static void mx1_camera_deactivate(struct mx1_camera_dev *pcdev)
442 */ 441 */
443static int mx1_camera_add_device(struct soc_camera_device *icd) 442static int mx1_camera_add_device(struct soc_camera_device *icd)
444{ 443{
445 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 444 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
446 struct mx1_camera_dev *pcdev = ici->priv; 445 struct mx1_camera_dev *pcdev = ici->priv;
447 446
448 if (pcdev->icd) 447 if (pcdev->icd)
449 return -EBUSY; 448 return -EBUSY;
450 449
451 dev_info(icd->dev.parent, "MX1 Camera driver attached to camera %d\n", 450 dev_info(icd->parent, "MX1 Camera driver attached to camera %d\n",
452 icd->devnum); 451 icd->devnum);
453 452
454 mx1_camera_activate(pcdev); 453 mx1_camera_activate(pcdev);
@@ -460,7 +459,7 @@ static int mx1_camera_add_device(struct soc_camera_device *icd)
460 459
461static void mx1_camera_remove_device(struct soc_camera_device *icd) 460static void mx1_camera_remove_device(struct soc_camera_device *icd)
462{ 461{
463 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 462 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
464 struct mx1_camera_dev *pcdev = ici->priv; 463 struct mx1_camera_dev *pcdev = ici->priv;
465 unsigned int csicr1; 464 unsigned int csicr1;
466 465
@@ -473,7 +472,7 @@ static void mx1_camera_remove_device(struct soc_camera_device *icd)
473 /* Stop DMA engine */ 472 /* Stop DMA engine */
474 imx_dma_disable(pcdev->dma_chan); 473 imx_dma_disable(pcdev->dma_chan);
475 474
476 dev_info(icd->dev.parent, "MX1 Camera driver detached from camera %d\n", 475 dev_info(icd->parent, "MX1 Camera driver detached from camera %d\n",
477 icd->devnum); 476 icd->devnum);
478 477
479 mx1_camera_deactivate(pcdev); 478 mx1_camera_deactivate(pcdev);
@@ -491,7 +490,7 @@ static int mx1_camera_set_crop(struct soc_camera_device *icd,
491 490
492static int mx1_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) 491static int mx1_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
493{ 492{
494 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 493 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
495 struct mx1_camera_dev *pcdev = ici->priv; 494 struct mx1_camera_dev *pcdev = ici->priv;
496 unsigned long camera_flags, common_flags; 495 unsigned long camera_flags, common_flags;
497 unsigned int csicr1; 496 unsigned int csicr1;
@@ -562,14 +561,14 @@ static int mx1_camera_set_fmt(struct soc_camera_device *icd,
562 561
563 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); 562 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
564 if (!xlate) { 563 if (!xlate) {
565 dev_warn(icd->dev.parent, "Format %x not found\n", 564 dev_warn(icd->parent, "Format %x not found\n",
566 pix->pixelformat); 565 pix->pixelformat);
567 return -EINVAL; 566 return -EINVAL;
568 } 567 }
569 568
570 buswidth = xlate->host_fmt->bits_per_sample; 569 buswidth = xlate->host_fmt->bits_per_sample;
571 if (buswidth > 8) { 570 if (buswidth > 8) {
572 dev_warn(icd->dev.parent, 571 dev_warn(icd->parent,
573 "bits-per-sample %d for format %x unsupported\n", 572 "bits-per-sample %d for format %x unsupported\n",
574 buswidth, pix->pixelformat); 573 buswidth, pix->pixelformat);
575 return -EINVAL; 574 return -EINVAL;
@@ -609,7 +608,7 @@ static int mx1_camera_try_fmt(struct soc_camera_device *icd,
609 608
610 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); 609 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
611 if (!xlate) { 610 if (!xlate) {
612 dev_warn(icd->dev.parent, "Format %x not found\n", 611 dev_warn(icd->parent, "Format %x not found\n",
613 pix->pixelformat); 612 pix->pixelformat);
614 return -EINVAL; 613 return -EINVAL;
615 } 614 }
@@ -676,7 +675,6 @@ static int mx1_camera_querycap(struct soc_camera_host *ici,
676{ 675{
677 /* cap->name is set by the friendly caller:-> */ 676 /* cap->name is set by the friendly caller:-> */
678 strlcpy(cap->card, "i.MX1/i.MXL Camera", sizeof(cap->card)); 677 strlcpy(cap->card, "i.MX1/i.MXL Camera", sizeof(cap->card));
679 cap->version = VERSION_CODE;
680 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 678 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
681 679
682 return 0; 680 return 0;
@@ -883,4 +881,5 @@ module_exit(mx1_camera_exit);
883MODULE_DESCRIPTION("i.MX1/i.MXL SoC Camera Host driver"); 881MODULE_DESCRIPTION("i.MX1/i.MXL SoC Camera Host driver");
884MODULE_AUTHOR("Paulius Zaleckas <paulius.zaleckas@teltonika.lt>"); 882MODULE_AUTHOR("Paulius Zaleckas <paulius.zaleckas@teltonika.lt>");
885MODULE_LICENSE("GPL v2"); 883MODULE_LICENSE("GPL v2");
884MODULE_VERSION(DRIVER_VERSION);
886MODULE_ALIAS("platform:" DRIVER_NAME); 885MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c
index 4eab1c620318..ec2410c0c806 100644
--- a/drivers/media/video/mx2_camera.c
+++ b/drivers/media/video/mx2_camera.c
@@ -23,7 +23,6 @@
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/moduleparam.h> 24#include <linux/moduleparam.h>
25#include <linux/time.h> 25#include <linux/time.h>
26#include <linux/version.h>
27#include <linux/device.h> 26#include <linux/device.h>
28#include <linux/platform_device.h> 27#include <linux/platform_device.h>
29#include <linux/mutex.h> 28#include <linux/mutex.h>
@@ -47,7 +46,7 @@
47#include <asm/dma.h> 46#include <asm/dma.h>
48 47
49#define MX2_CAM_DRV_NAME "mx2-camera" 48#define MX2_CAM_DRV_NAME "mx2-camera"
50#define MX2_CAM_VERSION_CODE KERNEL_VERSION(0, 0, 5) 49#define MX2_CAM_VERSION "0.0.6"
51#define MX2_CAM_DRIVER_DESCRIPTION "i.MX2x_Camera" 50#define MX2_CAM_DRIVER_DESCRIPTION "i.MX2x_Camera"
52 51
53/* reset values */ 52/* reset values */
@@ -278,7 +277,7 @@ static void mx2_camera_deactivate(struct mx2_camera_dev *pcdev)
278 */ 277 */
279static int mx2_camera_add_device(struct soc_camera_device *icd) 278static int mx2_camera_add_device(struct soc_camera_device *icd)
280{ 279{
281 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 280 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
282 struct mx2_camera_dev *pcdev = ici->priv; 281 struct mx2_camera_dev *pcdev = ici->priv;
283 int ret; 282 int ret;
284 u32 csicr1; 283 u32 csicr1;
@@ -303,7 +302,7 @@ static int mx2_camera_add_device(struct soc_camera_device *icd)
303 302
304 pcdev->icd = icd; 303 pcdev->icd = icd;
305 304
306 dev_info(icd->dev.parent, "Camera driver attached to camera %d\n", 305 dev_info(icd->parent, "Camera driver attached to camera %d\n",
307 icd->devnum); 306 icd->devnum);
308 307
309 return 0; 308 return 0;
@@ -311,12 +310,12 @@ static int mx2_camera_add_device(struct soc_camera_device *icd)
311 310
312static void mx2_camera_remove_device(struct soc_camera_device *icd) 311static void mx2_camera_remove_device(struct soc_camera_device *icd)
313{ 312{
314 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 313 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
315 struct mx2_camera_dev *pcdev = ici->priv; 314 struct mx2_camera_dev *pcdev = ici->priv;
316 315
317 BUG_ON(icd != pcdev->icd); 316 BUG_ON(icd != pcdev->icd);
318 317
319 dev_info(icd->dev.parent, "Camera driver detached from camera %d\n", 318 dev_info(icd->parent, "Camera driver detached from camera %d\n",
320 icd->devnum); 319 icd->devnum);
321 320
322 mx2_camera_deactivate(pcdev); 321 mx2_camera_deactivate(pcdev);
@@ -437,7 +436,7 @@ static int mx2_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
437 int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, 436 int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
438 icd->current_fmt->host_fmt); 437 icd->current_fmt->host_fmt);
439 438
440 dev_dbg(&icd->dev, "count=%d, size=%d\n", *count, *size); 439 dev_dbg(icd->parent, "count=%d, size=%d\n", *count, *size);
441 440
442 if (bytes_per_line < 0) 441 if (bytes_per_line < 0)
443 return bytes_per_line; 442 return bytes_per_line;
@@ -457,7 +456,7 @@ static void free_buffer(struct videobuf_queue *vq, struct mx2_buffer *buf)
457 struct soc_camera_device *icd = vq->priv_data; 456 struct soc_camera_device *icd = vq->priv_data;
458 struct videobuf_buffer *vb = &buf->vb; 457 struct videobuf_buffer *vb = &buf->vb;
459 458
460 dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, 459 dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
461 vb, vb->baddr, vb->bsize); 460 vb, vb->baddr, vb->bsize);
462 461
463 /* 462 /*
@@ -467,7 +466,7 @@ static void free_buffer(struct videobuf_queue *vq, struct mx2_buffer *buf)
467 videobuf_waiton(vq, vb, 0, 0); 466 videobuf_waiton(vq, vb, 0, 0);
468 467
469 videobuf_dma_contig_free(vq, vb); 468 videobuf_dma_contig_free(vq, vb);
470 dev_dbg(&icd->dev, "%s freed\n", __func__); 469 dev_dbg(icd->parent, "%s freed\n", __func__);
471 470
472 vb->state = VIDEOBUF_NEEDS_INIT; 471 vb->state = VIDEOBUF_NEEDS_INIT;
473} 472}
@@ -481,7 +480,7 @@ static int mx2_videobuf_prepare(struct videobuf_queue *vq,
481 icd->current_fmt->host_fmt); 480 icd->current_fmt->host_fmt);
482 int ret = 0; 481 int ret = 0;
483 482
484 dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, 483 dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
485 vb, vb->baddr, vb->bsize); 484 vb, vb->baddr, vb->bsize);
486 485
487 if (bytes_per_line < 0) 486 if (bytes_per_line < 0)
@@ -533,12 +532,12 @@ static void mx2_videobuf_queue(struct videobuf_queue *vq,
533{ 532{
534 struct soc_camera_device *icd = vq->priv_data; 533 struct soc_camera_device *icd = vq->priv_data;
535 struct soc_camera_host *ici = 534 struct soc_camera_host *ici =
536 to_soc_camera_host(icd->dev.parent); 535 to_soc_camera_host(icd->parent);
537 struct mx2_camera_dev *pcdev = ici->priv; 536 struct mx2_camera_dev *pcdev = ici->priv;
538 struct mx2_buffer *buf = container_of(vb, struct mx2_buffer, vb); 537 struct mx2_buffer *buf = container_of(vb, struct mx2_buffer, vb);
539 unsigned long flags; 538 unsigned long flags;
540 539
541 dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, 540 dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
542 vb, vb->baddr, vb->bsize); 541 vb, vb->baddr, vb->bsize);
543 542
544 spin_lock_irqsave(&pcdev->lock, flags); 543 spin_lock_irqsave(&pcdev->lock, flags);
@@ -611,27 +610,27 @@ static void mx2_videobuf_release(struct videobuf_queue *vq,
611 struct videobuf_buffer *vb) 610 struct videobuf_buffer *vb)
612{ 611{
613 struct soc_camera_device *icd = vq->priv_data; 612 struct soc_camera_device *icd = vq->priv_data;
614 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 613 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
615 struct mx2_camera_dev *pcdev = ici->priv; 614 struct mx2_camera_dev *pcdev = ici->priv;
616 struct mx2_buffer *buf = container_of(vb, struct mx2_buffer, vb); 615 struct mx2_buffer *buf = container_of(vb, struct mx2_buffer, vb);
617 unsigned long flags; 616 unsigned long flags;
618 617
619#ifdef DEBUG 618#ifdef DEBUG
620 dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, 619 dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
621 vb, vb->baddr, vb->bsize); 620 vb, vb->baddr, vb->bsize);
622 621
623 switch (vb->state) { 622 switch (vb->state) {
624 case VIDEOBUF_ACTIVE: 623 case VIDEOBUF_ACTIVE:
625 dev_info(&icd->dev, "%s (active)\n", __func__); 624 dev_info(icd->parent, "%s (active)\n", __func__);
626 break; 625 break;
627 case VIDEOBUF_QUEUED: 626 case VIDEOBUF_QUEUED:
628 dev_info(&icd->dev, "%s (queued)\n", __func__); 627 dev_info(icd->parent, "%s (queued)\n", __func__);
629 break; 628 break;
630 case VIDEOBUF_PREPARED: 629 case VIDEOBUF_PREPARED:
631 dev_info(&icd->dev, "%s (prepared)\n", __func__); 630 dev_info(icd->parent, "%s (prepared)\n", __func__);
632 break; 631 break;
633 default: 632 default:
634 dev_info(&icd->dev, "%s (unknown) %d\n", __func__, 633 dev_info(icd->parent, "%s (unknown) %d\n", __func__,
635 vb->state); 634 vb->state);
636 break; 635 break;
637 } 636 }
@@ -678,7 +677,7 @@ static struct videobuf_queue_ops mx2_videobuf_ops = {
678static void mx2_camera_init_videobuf(struct videobuf_queue *q, 677static void mx2_camera_init_videobuf(struct videobuf_queue *q,
679 struct soc_camera_device *icd) 678 struct soc_camera_device *icd)
680{ 679{
681 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 680 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
682 struct mx2_camera_dev *pcdev = ici->priv; 681 struct mx2_camera_dev *pcdev = ici->priv;
683 682
684 videobuf_queue_dma_contig_init(q, &mx2_videobuf_ops, pcdev->dev, 683 videobuf_queue_dma_contig_init(q, &mx2_videobuf_ops, pcdev->dev,
@@ -719,7 +718,7 @@ static void mx27_camera_emma_buf_init(struct soc_camera_device *icd,
719 int bytesperline) 718 int bytesperline)
720{ 719{
721 struct soc_camera_host *ici = 720 struct soc_camera_host *ici =
722 to_soc_camera_host(icd->dev.parent); 721 to_soc_camera_host(icd->parent);
723 struct mx2_camera_dev *pcdev = ici->priv; 722 struct mx2_camera_dev *pcdev = ici->priv;
724 723
725 writel(pcdev->discard_buffer_dma, 724 writel(pcdev->discard_buffer_dma,
@@ -772,7 +771,7 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd,
772 __u32 pixfmt) 771 __u32 pixfmt)
773{ 772{
774 struct soc_camera_host *ici = 773 struct soc_camera_host *ici =
775 to_soc_camera_host(icd->dev.parent); 774 to_soc_camera_host(icd->parent);
776 struct mx2_camera_dev *pcdev = ici->priv; 775 struct mx2_camera_dev *pcdev = ici->priv;
777 unsigned long camera_flags, common_flags; 776 unsigned long camera_flags, common_flags;
778 int ret = 0; 777 int ret = 0;
@@ -891,7 +890,7 @@ static int mx2_camera_set_crop(struct soc_camera_device *icd,
891 if (ret < 0) 890 if (ret < 0)
892 return ret; 891 return ret;
893 892
894 dev_dbg(icd->dev.parent, "Sensor cropped %dx%d\n", 893 dev_dbg(icd->parent, "Sensor cropped %dx%d\n",
895 mf.width, mf.height); 894 mf.width, mf.height);
896 895
897 icd->user_width = mf.width; 896 icd->user_width = mf.width;
@@ -911,7 +910,7 @@ static int mx2_camera_set_fmt(struct soc_camera_device *icd,
911 910
912 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); 911 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
913 if (!xlate) { 912 if (!xlate) {
914 dev_warn(icd->dev.parent, "Format %x not found\n", 913 dev_warn(icd->parent, "Format %x not found\n",
915 pix->pixelformat); 914 pix->pixelformat);
916 return -EINVAL; 915 return -EINVAL;
917 } 916 }
@@ -951,7 +950,7 @@ static int mx2_camera_try_fmt(struct soc_camera_device *icd,
951 950
952 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); 951 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
953 if (pixfmt && !xlate) { 952 if (pixfmt && !xlate) {
954 dev_warn(icd->dev.parent, "Format %x not found\n", pixfmt); 953 dev_warn(icd->parent, "Format %x not found\n", pixfmt);
955 return -EINVAL; 954 return -EINVAL;
956 } 955 }
957 956
@@ -974,11 +973,16 @@ static int mx2_camera_try_fmt(struct soc_camera_device *icd,
974 if (pix->bytesperline < 0) 973 if (pix->bytesperline < 0)
975 return pix->bytesperline; 974 return pix->bytesperline;
976 pix->sizeimage = pix->height * pix->bytesperline; 975 pix->sizeimage = pix->height * pix->bytesperline;
977 if (pix->sizeimage > (4 * 0x3ffff)) { /* CSIRXCNT limit */ 976 /* Check against the CSIRXCNT limit */
978 dev_warn(icd->dev.parent, 977 if (pix->sizeimage > 4 * 0x3ffff) {
979 "Image size (%u) above limit\n", 978 /* Adjust geometry, preserve aspect ratio */
980 pix->sizeimage); 979 unsigned int new_height = int_sqrt(4 * 0x3ffff *
981 return -EINVAL; 980 pix->height / pix->bytesperline);
981 pix->width = new_height * pix->width / pix->height;
982 pix->height = new_height;
983 pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
984 xlate->host_fmt);
985 BUG_ON(pix->bytesperline < 0);
982 } 986 }
983 } 987 }
984 988
@@ -996,7 +1000,7 @@ static int mx2_camera_try_fmt(struct soc_camera_device *icd,
996 if (mf.field == V4L2_FIELD_ANY) 1000 if (mf.field == V4L2_FIELD_ANY)
997 mf.field = V4L2_FIELD_NONE; 1001 mf.field = V4L2_FIELD_NONE;
998 if (mf.field != V4L2_FIELD_NONE) { 1002 if (mf.field != V4L2_FIELD_NONE) {
999 dev_err(icd->dev.parent, "Field type %d unsupported.\n", 1003 dev_err(icd->parent, "Field type %d unsupported.\n",
1000 mf.field); 1004 mf.field);
1001 return -EINVAL; 1005 return -EINVAL;
1002 } 1006 }
@@ -1014,7 +1018,6 @@ static int mx2_camera_querycap(struct soc_camera_host *ici,
1014{ 1018{
1015 /* cap->name is set by the friendly caller:-> */ 1019 /* cap->name is set by the friendly caller:-> */
1016 strlcpy(cap->card, MX2_CAM_DRIVER_DESCRIPTION, sizeof(cap->card)); 1020 strlcpy(cap->card, MX2_CAM_DRIVER_DESCRIPTION, sizeof(cap->card));
1017 cap->version = MX2_CAM_VERSION_CODE;
1018 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 1021 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
1019 1022
1020 return 0; 1023 return 0;
@@ -1523,3 +1526,4 @@ module_exit(mx2_camera_exit);
1523MODULE_DESCRIPTION("i.MX27/i.MX25 SoC Camera Host driver"); 1526MODULE_DESCRIPTION("i.MX27/i.MX25 SoC Camera Host driver");
1524MODULE_AUTHOR("Sascha Hauer <sha@pengutronix.de>"); 1527MODULE_AUTHOR("Sascha Hauer <sha@pengutronix.de>");
1525MODULE_LICENSE("GPL"); 1528MODULE_LICENSE("GPL");
1529MODULE_VERSION(MX2_CAM_VERSION);
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index c7680eb83664..c045b47803ad 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -11,7 +11,6 @@
11 11
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/version.h>
15#include <linux/videodev2.h> 14#include <linux/videodev2.h>
16#include <linux/platform_device.h> 15#include <linux/platform_device.h>
17#include <linux/clk.h> 16#include <linux/clk.h>
@@ -195,7 +194,7 @@ static int mx3_videobuf_setup(struct vb2_queue *vq,
195 unsigned long sizes[], void *alloc_ctxs[]) 194 unsigned long sizes[], void *alloc_ctxs[])
196{ 195{
197 struct soc_camera_device *icd = soc_camera_from_vb2q(vq); 196 struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
198 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 197 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
199 struct mx3_camera_dev *mx3_cam = ici->priv; 198 struct mx3_camera_dev *mx3_cam = ici->priv;
200 int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, 199 int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
201 icd->current_fmt->host_fmt); 200 icd->current_fmt->host_fmt);
@@ -224,7 +223,7 @@ static int mx3_videobuf_setup(struct vb2_queue *vq,
224static int mx3_videobuf_prepare(struct vb2_buffer *vb) 223static int mx3_videobuf_prepare(struct vb2_buffer *vb)
225{ 224{
226 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); 225 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
227 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 226 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
228 struct mx3_camera_dev *mx3_cam = ici->priv; 227 struct mx3_camera_dev *mx3_cam = ici->priv;
229 struct idmac_channel *ichan = mx3_cam->idmac_channel[0]; 228 struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
230 struct scatterlist *sg; 229 struct scatterlist *sg;
@@ -242,7 +241,7 @@ static int mx3_videobuf_prepare(struct vb2_buffer *vb)
242 new_size = bytes_per_line * icd->user_height; 241 new_size = bytes_per_line * icd->user_height;
243 242
244 if (vb2_plane_size(vb, 0) < new_size) { 243 if (vb2_plane_size(vb, 0) < new_size) {
245 dev_err(icd->dev.parent, "Buffer too small (%lu < %zu)\n", 244 dev_err(icd->parent, "Buffer too small (%lu < %zu)\n",
246 vb2_plane_size(vb, 0), new_size); 245 vb2_plane_size(vb, 0), new_size);
247 return -ENOBUFS; 246 return -ENOBUFS;
248 } 247 }
@@ -284,7 +283,7 @@ static enum pixel_fmt fourcc_to_ipu_pix(__u32 fourcc)
284static void mx3_videobuf_queue(struct vb2_buffer *vb) 283static void mx3_videobuf_queue(struct vb2_buffer *vb)
285{ 284{
286 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); 285 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
287 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 286 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
288 struct mx3_camera_dev *mx3_cam = ici->priv; 287 struct mx3_camera_dev *mx3_cam = ici->priv;
289 struct mx3_camera_buffer *buf = to_mx3_vb(vb); 288 struct mx3_camera_buffer *buf = to_mx3_vb(vb);
290 struct dma_async_tx_descriptor *txd = buf->txd; 289 struct dma_async_tx_descriptor *txd = buf->txd;
@@ -337,7 +336,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
337 spin_unlock_irq(&mx3_cam->lock); 336 spin_unlock_irq(&mx3_cam->lock);
338 337
339 cookie = txd->tx_submit(txd); 338 cookie = txd->tx_submit(txd);
340 dev_dbg(icd->dev.parent, "Submitted cookie %d DMA 0x%08x\n", 339 dev_dbg(icd->parent, "Submitted cookie %d DMA 0x%08x\n",
341 cookie, sg_dma_address(&buf->sg)); 340 cookie, sg_dma_address(&buf->sg));
342 341
343 if (cookie >= 0) 342 if (cookie >= 0)
@@ -358,13 +357,13 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
358static void mx3_videobuf_release(struct vb2_buffer *vb) 357static void mx3_videobuf_release(struct vb2_buffer *vb)
359{ 358{
360 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); 359 struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
361 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 360 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
362 struct mx3_camera_dev *mx3_cam = ici->priv; 361 struct mx3_camera_dev *mx3_cam = ici->priv;
363 struct mx3_camera_buffer *buf = to_mx3_vb(vb); 362 struct mx3_camera_buffer *buf = to_mx3_vb(vb);
364 struct dma_async_tx_descriptor *txd = buf->txd; 363 struct dma_async_tx_descriptor *txd = buf->txd;
365 unsigned long flags; 364 unsigned long flags;
366 365
367 dev_dbg(icd->dev.parent, 366 dev_dbg(icd->parent,
368 "Release%s DMA 0x%08x, queue %sempty\n", 367 "Release%s DMA 0x%08x, queue %sempty\n",
369 mx3_cam->active == buf ? " active" : "", sg_dma_address(&buf->sg), 368 mx3_cam->active == buf ? " active" : "", sg_dma_address(&buf->sg),
370 list_empty(&buf->queue) ? "" : "not "); 369 list_empty(&buf->queue) ? "" : "not ");
@@ -403,7 +402,7 @@ static int mx3_videobuf_init(struct vb2_buffer *vb)
403static int mx3_stop_streaming(struct vb2_queue *q) 402static int mx3_stop_streaming(struct vb2_queue *q)
404{ 403{
405 struct soc_camera_device *icd = soc_camera_from_vb2q(q); 404 struct soc_camera_device *icd = soc_camera_from_vb2q(q);
406 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 405 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
407 struct mx3_camera_dev *mx3_cam = ici->priv; 406 struct mx3_camera_dev *mx3_cam = ici->priv;
408 struct idmac_channel *ichan = mx3_cam->idmac_channel[0]; 407 struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
409 struct dma_chan *chan; 408 struct dma_chan *chan;
@@ -499,7 +498,7 @@ static void mx3_camera_activate(struct mx3_camera_dev *mx3_cam,
499 498
500 clk_enable(mx3_cam->clk); 499 clk_enable(mx3_cam->clk);
501 rate = clk_round_rate(mx3_cam->clk, mx3_cam->mclk); 500 rate = clk_round_rate(mx3_cam->clk, mx3_cam->mclk);
502 dev_dbg(icd->dev.parent, "Set SENS_CONF to %x, rate %ld\n", conf, rate); 501 dev_dbg(icd->parent, "Set SENS_CONF to %x, rate %ld\n", conf, rate);
503 if (rate) 502 if (rate)
504 clk_set_rate(mx3_cam->clk, rate); 503 clk_set_rate(mx3_cam->clk, rate);
505} 504}
@@ -507,7 +506,7 @@ static void mx3_camera_activate(struct mx3_camera_dev *mx3_cam,
507/* Called with .video_lock held */ 506/* Called with .video_lock held */
508static int mx3_camera_add_device(struct soc_camera_device *icd) 507static int mx3_camera_add_device(struct soc_camera_device *icd)
509{ 508{
510 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 509 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
511 struct mx3_camera_dev *mx3_cam = ici->priv; 510 struct mx3_camera_dev *mx3_cam = ici->priv;
512 511
513 if (mx3_cam->icd) 512 if (mx3_cam->icd)
@@ -517,7 +516,7 @@ static int mx3_camera_add_device(struct soc_camera_device *icd)
517 516
518 mx3_cam->icd = icd; 517 mx3_cam->icd = icd;
519 518
520 dev_info(icd->dev.parent, "MX3 Camera driver attached to camera %d\n", 519 dev_info(icd->parent, "MX3 Camera driver attached to camera %d\n",
521 icd->devnum); 520 icd->devnum);
522 521
523 return 0; 522 return 0;
@@ -526,7 +525,7 @@ static int mx3_camera_add_device(struct soc_camera_device *icd)
526/* Called with .video_lock held */ 525/* Called with .video_lock held */
527static void mx3_camera_remove_device(struct soc_camera_device *icd) 526static void mx3_camera_remove_device(struct soc_camera_device *icd)
528{ 527{
529 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 528 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
530 struct mx3_camera_dev *mx3_cam = ici->priv; 529 struct mx3_camera_dev *mx3_cam = ici->priv;
531 struct idmac_channel **ichan = &mx3_cam->idmac_channel[0]; 530 struct idmac_channel **ichan = &mx3_cam->idmac_channel[0];
532 531
@@ -541,7 +540,7 @@ static void mx3_camera_remove_device(struct soc_camera_device *icd)
541 540
542 mx3_cam->icd = NULL; 541 mx3_cam->icd = NULL;
543 542
544 dev_info(icd->dev.parent, "MX3 Camera driver detached from camera %d\n", 543 dev_info(icd->parent, "MX3 Camera driver detached from camera %d\n",
545 icd->devnum); 544 icd->devnum);
546} 545}
547 546
@@ -608,12 +607,12 @@ static int test_platform_param(struct mx3_camera_dev *mx3_cam,
608static int mx3_camera_try_bus_param(struct soc_camera_device *icd, 607static int mx3_camera_try_bus_param(struct soc_camera_device *icd,
609 const unsigned int depth) 608 const unsigned int depth)
610{ 609{
611 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 610 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
612 struct mx3_camera_dev *mx3_cam = ici->priv; 611 struct mx3_camera_dev *mx3_cam = ici->priv;
613 unsigned long bus_flags, camera_flags; 612 unsigned long bus_flags, camera_flags;
614 int ret = test_platform_param(mx3_cam, depth, &bus_flags); 613 int ret = test_platform_param(mx3_cam, depth, &bus_flags);
615 614
616 dev_dbg(icd->dev.parent, "request bus width %d bit: %d\n", depth, ret); 615 dev_dbg(icd->parent, "request bus width %d bit: %d\n", depth, ret);
617 616
618 if (ret < 0) 617 if (ret < 0)
619 return ret; 618 return ret;
@@ -622,7 +621,7 @@ static int mx3_camera_try_bus_param(struct soc_camera_device *icd,
622 621
623 ret = soc_camera_bus_param_compatible(camera_flags, bus_flags); 622 ret = soc_camera_bus_param_compatible(camera_flags, bus_flags);
624 if (ret < 0) 623 if (ret < 0)
625 dev_warn(icd->dev.parent, 624 dev_warn(icd->parent,
626 "Flags incompatible: camera %lx, host %lx\n", 625 "Flags incompatible: camera %lx, host %lx\n",
627 camera_flags, bus_flags); 626 camera_flags, bus_flags);
628 627
@@ -676,7 +675,7 @@ static int mx3_camera_get_formats(struct soc_camera_device *icd, unsigned int id
676 struct soc_camera_format_xlate *xlate) 675 struct soc_camera_format_xlate *xlate)
677{ 676{
678 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 677 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
679 struct device *dev = icd->dev.parent; 678 struct device *dev = icd->parent;
680 int formats = 0, ret; 679 int formats = 0, ret;
681 enum v4l2_mbus_pixelcode code; 680 enum v4l2_mbus_pixelcode code;
682 const struct soc_mbus_pixelfmt *fmt; 681 const struct soc_mbus_pixelfmt *fmt;
@@ -688,7 +687,7 @@ static int mx3_camera_get_formats(struct soc_camera_device *icd, unsigned int id
688 687
689 fmt = soc_mbus_get_fmtdesc(code); 688 fmt = soc_mbus_get_fmtdesc(code);
690 if (!fmt) { 689 if (!fmt) {
691 dev_warn(icd->dev.parent, 690 dev_warn(icd->parent,
692 "Unsupported format code #%u: %d\n", idx, code); 691 "Unsupported format code #%u: %d\n", idx, code);
693 return 0; 692 return 0;
694 } 693 }
@@ -816,7 +815,7 @@ static int mx3_camera_set_crop(struct soc_camera_device *icd,
816 struct v4l2_crop *a) 815 struct v4l2_crop *a)
817{ 816{
818 struct v4l2_rect *rect = &a->c; 817 struct v4l2_rect *rect = &a->c;
819 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 818 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
820 struct mx3_camera_dev *mx3_cam = ici->priv; 819 struct mx3_camera_dev *mx3_cam = ici->priv;
821 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 820 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
822 struct v4l2_mbus_framefmt mf; 821 struct v4l2_mbus_framefmt mf;
@@ -849,7 +848,7 @@ static int mx3_camera_set_crop(struct soc_camera_device *icd,
849 configure_geometry(mx3_cam, mf.width, mf.height, 848 configure_geometry(mx3_cam, mf.width, mf.height,
850 icd->current_fmt->host_fmt); 849 icd->current_fmt->host_fmt);
851 850
852 dev_dbg(icd->dev.parent, "Sensor cropped %dx%d\n", 851 dev_dbg(icd->parent, "Sensor cropped %dx%d\n",
853 mf.width, mf.height); 852 mf.width, mf.height);
854 853
855 icd->user_width = mf.width; 854 icd->user_width = mf.width;
@@ -861,7 +860,7 @@ static int mx3_camera_set_crop(struct soc_camera_device *icd,
861static int mx3_camera_set_fmt(struct soc_camera_device *icd, 860static int mx3_camera_set_fmt(struct soc_camera_device *icd,
862 struct v4l2_format *f) 861 struct v4l2_format *f)
863{ 862{
864 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 863 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
865 struct mx3_camera_dev *mx3_cam = ici->priv; 864 struct mx3_camera_dev *mx3_cam = ici->priv;
866 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 865 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
867 const struct soc_camera_format_xlate *xlate; 866 const struct soc_camera_format_xlate *xlate;
@@ -871,13 +870,13 @@ static int mx3_camera_set_fmt(struct soc_camera_device *icd,
871 870
872 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); 871 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
873 if (!xlate) { 872 if (!xlate) {
874 dev_warn(icd->dev.parent, "Format %x not found\n", 873 dev_warn(icd->parent, "Format %x not found\n",
875 pix->pixelformat); 874 pix->pixelformat);
876 return -EINVAL; 875 return -EINVAL;
877 } 876 }
878 877
879 stride_align(&pix->width); 878 stride_align(&pix->width);
880 dev_dbg(icd->dev.parent, "Set format %dx%d\n", pix->width, pix->height); 879 dev_dbg(icd->parent, "Set format %dx%d\n", pix->width, pix->height);
881 880
882 /* 881 /*
883 * Might have to perform a complete interface initialisation like in 882 * Might have to perform a complete interface initialisation like in
@@ -913,13 +912,7 @@ static int mx3_camera_set_fmt(struct soc_camera_device *icd,
913 pix->colorspace = mf.colorspace; 912 pix->colorspace = mf.colorspace;
914 icd->current_fmt = xlate; 913 icd->current_fmt = xlate;
915 914
916 pix->bytesperline = soc_mbus_bytes_per_line(pix->width, 915 dev_dbg(icd->parent, "Sensor set %dx%d\n", pix->width, pix->height);
917 xlate->host_fmt);
918 if (pix->bytesperline < 0)
919 return pix->bytesperline;
920 pix->sizeimage = pix->height * pix->bytesperline;
921
922 dev_dbg(icd->dev.parent, "Sensor set %dx%d\n", pix->width, pix->height);
923 916
924 return ret; 917 return ret;
925} 918}
@@ -936,7 +929,7 @@ static int mx3_camera_try_fmt(struct soc_camera_device *icd,
936 929
937 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); 930 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
938 if (pixfmt && !xlate) { 931 if (pixfmt && !xlate) {
939 dev_warn(icd->dev.parent, "Format %x not found\n", pixfmt); 932 dev_warn(icd->parent, "Format %x not found\n", pixfmt);
940 return -EINVAL; 933 return -EINVAL;
941 } 934 }
942 935
@@ -946,12 +939,6 @@ static int mx3_camera_try_fmt(struct soc_camera_device *icd,
946 if (pix->width > 4096) 939 if (pix->width > 4096)
947 pix->width = 4096; 940 pix->width = 4096;
948 941
949 pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
950 xlate->host_fmt);
951 if (pix->bytesperline < 0)
952 return pix->bytesperline;
953 pix->sizeimage = pix->height * pix->bytesperline;
954
955 /* limit to sensor capabilities */ 942 /* limit to sensor capabilities */
956 mf.width = pix->width; 943 mf.width = pix->width;
957 mf.height = pix->height; 944 mf.height = pix->height;
@@ -974,7 +961,7 @@ static int mx3_camera_try_fmt(struct soc_camera_device *icd,
974 case V4L2_FIELD_NONE: 961 case V4L2_FIELD_NONE:
975 break; 962 break;
976 default: 963 default:
977 dev_err(icd->dev.parent, "Field type %d unsupported.\n", 964 dev_err(icd->parent, "Field type %d unsupported.\n",
978 mf.field); 965 mf.field);
979 ret = -EINVAL; 966 ret = -EINVAL;
980 } 967 }
@@ -1000,7 +987,6 @@ static int mx3_camera_querycap(struct soc_camera_host *ici,
1000{ 987{
1001 /* cap->name is set by the firendly caller:-> */ 988 /* cap->name is set by the firendly caller:-> */
1002 strlcpy(cap->card, "i.MX3x Camera", sizeof(cap->card)); 989 strlcpy(cap->card, "i.MX3x Camera", sizeof(cap->card));
1003 cap->version = KERNEL_VERSION(0, 2, 2);
1004 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 990 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
1005 991
1006 return 0; 992 return 0;
@@ -1008,7 +994,7 @@ static int mx3_camera_querycap(struct soc_camera_host *ici,
1008 994
1009static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) 995static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
1010{ 996{
1011 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 997 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
1012 struct mx3_camera_dev *mx3_cam = ici->priv; 998 struct mx3_camera_dev *mx3_cam = ici->priv;
1013 unsigned long bus_flags, camera_flags, common_flags; 999 unsigned long bus_flags, camera_flags, common_flags;
1014 u32 dw, sens_conf; 1000 u32 dw, sens_conf;
@@ -1016,7 +1002,7 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
1016 int buswidth; 1002 int buswidth;
1017 int ret; 1003 int ret;
1018 const struct soc_camera_format_xlate *xlate; 1004 const struct soc_camera_format_xlate *xlate;
1019 struct device *dev = icd->dev.parent; 1005 struct device *dev = icd->parent;
1020 1006
1021 fmt = soc_mbus_get_fmtdesc(icd->current_fmt->code); 1007 fmt = soc_mbus_get_fmtdesc(icd->current_fmt->code);
1022 if (!fmt) 1008 if (!fmt)
@@ -1325,4 +1311,5 @@ module_exit(mx3_camera_exit);
1325MODULE_DESCRIPTION("i.MX3x SoC Camera Host driver"); 1311MODULE_DESCRIPTION("i.MX3x SoC Camera Host driver");
1326MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>"); 1312MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>");
1327MODULE_LICENSE("GPL v2"); 1313MODULE_LICENSE("GPL v2");
1314MODULE_VERSION("0.2.3");
1328MODULE_ALIAS("platform:" MX3_CAM_DRV_NAME); 1315MODULE_ALIAS("platform:" MX3_CAM_DRV_NAME);
diff --git a/drivers/media/video/omap/Kconfig b/drivers/media/video/omap/Kconfig
index e63233fd2aaa..390ab094f9f2 100644
--- a/drivers/media/video/omap/Kconfig
+++ b/drivers/media/video/omap/Kconfig
@@ -1,11 +1,14 @@
1config VIDEO_OMAP2_VOUT_VRFB
2 bool
3
1config VIDEO_OMAP2_VOUT 4config VIDEO_OMAP2_VOUT
2 tristate "OMAP2/OMAP3 V4L2-Display driver" 5 tristate "OMAP2/OMAP3 V4L2-Display driver"
3 depends on ARCH_OMAP2 || ARCH_OMAP3 6 depends on ARCH_OMAP2 || ARCH_OMAP3
4 select VIDEOBUF_GEN 7 select VIDEOBUF_GEN
5 select VIDEOBUF_DMA_CONTIG 8 select VIDEOBUF_DMA_CONTIG
6 select OMAP2_DSS 9 select OMAP2_DSS
7 select OMAP2_VRAM 10 select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
8 select OMAP2_VRFB 11 select VIDEO_OMAP2_VOUT_VRFB if VIDEO_OMAP2_VOUT && OMAP2_VRFB
9 default n 12 default n
10 ---help--- 13 ---help---
11 V4L2 Display driver support for OMAP2/3 based boards. 14 V4L2 Display driver support for OMAP2/3 based boards.
diff --git a/drivers/media/video/omap/Makefile b/drivers/media/video/omap/Makefile
index b28788070ae1..fc410b438f7d 100644
--- a/drivers/media/video/omap/Makefile
+++ b/drivers/media/video/omap/Makefile
@@ -4,4 +4,5 @@
4 4
5# OMAP2/3 Display driver 5# OMAP2/3 Display driver
6omap-vout-y := omap_vout.o omap_voutlib.o 6omap-vout-y := omap_vout.o omap_voutlib.o
7omap-vout-$(CONFIG_VIDEO_OMAP2_VOUT_VRFB) += omap_vout_vrfb.o
7obj-$(CONFIG_VIDEO_OMAP2_VOUT) += omap-vout.o 8obj-$(CONFIG_VIDEO_OMAP2_VOUT) += omap-vout.o
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index a647894d3a71..b5ef36222440 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -35,28 +35,26 @@
35#include <linux/sched.h> 35#include <linux/sched.h>
36#include <linux/types.h> 36#include <linux/types.h>
37#include <linux/platform_device.h> 37#include <linux/platform_device.h>
38#include <linux/dma-mapping.h>
39#include <linux/irq.h> 38#include <linux/irq.h>
40#include <linux/videodev2.h> 39#include <linux/videodev2.h>
41#include <linux/slab.h> 40#include <linux/dma-mapping.h>
42 41
43#include <media/videobuf-dma-contig.h> 42#include <media/videobuf-dma-contig.h>
44#include <media/v4l2-device.h> 43#include <media/v4l2-device.h>
45#include <media/v4l2-ioctl.h> 44#include <media/v4l2-ioctl.h>
46 45
47#include <plat/dma.h> 46#include <plat/dma.h>
48#include <plat/vram.h>
49#include <plat/vrfb.h> 47#include <plat/vrfb.h>
50#include <video/omapdss.h> 48#include <video/omapdss.h>
51 49
52#include "omap_voutlib.h" 50#include "omap_voutlib.h"
53#include "omap_voutdef.h" 51#include "omap_voutdef.h"
52#include "omap_vout_vrfb.h"
54 53
55MODULE_AUTHOR("Texas Instruments"); 54MODULE_AUTHOR("Texas Instruments");
56MODULE_DESCRIPTION("OMAP Video for Linux Video out driver"); 55MODULE_DESCRIPTION("OMAP Video for Linux Video out driver");
57MODULE_LICENSE("GPL"); 56MODULE_LICENSE("GPL");
58 57
59
60/* Driver Configuration macros */ 58/* Driver Configuration macros */
61#define VOUT_NAME "omap_vout" 59#define VOUT_NAME "omap_vout"
62 60
@@ -65,31 +63,6 @@ enum omap_vout_channels {
65 OMAP_VIDEO2, 63 OMAP_VIDEO2,
66}; 64};
67 65
68enum dma_channel_state {
69 DMA_CHAN_NOT_ALLOTED,
70 DMA_CHAN_ALLOTED,
71};
72
73#define QQVGA_WIDTH 160
74#define QQVGA_HEIGHT 120
75
76/* Max Resolution supported by the driver */
77#define VID_MAX_WIDTH 1280 /* Largest width */
78#define VID_MAX_HEIGHT 720 /* Largest height */
79
80/* Mimimum requirement is 2x2 for DSS */
81#define VID_MIN_WIDTH 2
82#define VID_MIN_HEIGHT 2
83
84/* 2048 x 2048 is max res supported by OMAP display controller */
85#define MAX_PIXELS_PER_LINE 2048
86
87#define VRFB_TX_TIMEOUT 1000
88#define VRFB_NUM_BUFS 4
89
90/* Max buffer size tobe allocated during init */
91#define OMAP_VOUT_MAX_BUF_SIZE (VID_MAX_WIDTH*VID_MAX_HEIGHT*4)
92
93static struct videobuf_queue_ops video_vbq_ops; 66static struct videobuf_queue_ops video_vbq_ops;
94/* Variables configurable through module params*/ 67/* Variables configurable through module params*/
95static u32 video1_numbuffers = 3; 68static u32 video1_numbuffers = 3;
@@ -172,84 +145,6 @@ static const struct v4l2_fmtdesc omap_formats[] = {
172#define NUM_OUTPUT_FORMATS (ARRAY_SIZE(omap_formats)) 145#define NUM_OUTPUT_FORMATS (ARRAY_SIZE(omap_formats))
173 146
174/* 147/*
175 * Allocate buffers
176 */
177static unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr)
178{
179 u32 order, size;
180 unsigned long virt_addr, addr;
181
182 size = PAGE_ALIGN(buf_size);
183 order = get_order(size);
184 virt_addr = __get_free_pages(GFP_KERNEL | GFP_DMA, order);
185 addr = virt_addr;
186
187 if (virt_addr) {
188 while (size > 0) {
189 SetPageReserved(virt_to_page(addr));
190 addr += PAGE_SIZE;
191 size -= PAGE_SIZE;
192 }
193 }
194 *phys_addr = (u32) virt_to_phys((void *) virt_addr);
195 return virt_addr;
196}
197
198/*
199 * Free buffers
200 */
201static void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size)
202{
203 u32 order, size;
204 unsigned long addr = virtaddr;
205
206 size = PAGE_ALIGN(buf_size);
207 order = get_order(size);
208
209 while (size > 0) {
210 ClearPageReserved(virt_to_page(addr));
211 addr += PAGE_SIZE;
212 size -= PAGE_SIZE;
213 }
214 free_pages((unsigned long) virtaddr, order);
215}
216
217/*
218 * Function for allocating video buffers
219 */
220static int omap_vout_allocate_vrfb_buffers(struct omap_vout_device *vout,
221 unsigned int *count, int startindex)
222{
223 int i, j;
224
225 for (i = 0; i < *count; i++) {
226 if (!vout->smsshado_virt_addr[i]) {
227 vout->smsshado_virt_addr[i] =
228 omap_vout_alloc_buffer(vout->smsshado_size,
229 &vout->smsshado_phy_addr[i]);
230 }
231 if (!vout->smsshado_virt_addr[i] && startindex != -1) {
232 if (V4L2_MEMORY_MMAP == vout->memory && i >= startindex)
233 break;
234 }
235 if (!vout->smsshado_virt_addr[i]) {
236 for (j = 0; j < i; j++) {
237 omap_vout_free_buffer(
238 vout->smsshado_virt_addr[j],
239 vout->smsshado_size);
240 vout->smsshado_virt_addr[j] = 0;
241 vout->smsshado_phy_addr[j] = 0;
242 }
243 *count = 0;
244 return -ENOMEM;
245 }
246 memset((void *) vout->smsshado_virt_addr[i], 0,
247 vout->smsshado_size);
248 }
249 return 0;
250}
251
252/*
253 * Try format 148 * Try format
254 */ 149 */
255static int omap_vout_try_format(struct v4l2_pix_format *pix) 150static int omap_vout_try_format(struct v4l2_pix_format *pix)
@@ -342,73 +237,9 @@ static u32 omap_vout_uservirt_to_phys(u32 virtp)
342} 237}
343 238
344/* 239/*
345 * Wakes up the application once the DMA transfer to VRFB space is completed.
346 */
347static void omap_vout_vrfb_dma_tx_callback(int lch, u16 ch_status, void *data)
348{
349 struct vid_vrfb_dma *t = (struct vid_vrfb_dma *) data;
350
351 t->tx_status = 1;
352 wake_up_interruptible(&t->wait);
353}
354
355/*
356 * Release the VRFB context once the module exits
357 */
358static void omap_vout_release_vrfb(struct omap_vout_device *vout)
359{
360 int i;
361
362 for (i = 0; i < VRFB_NUM_BUFS; i++)
363 omap_vrfb_release_ctx(&vout->vrfb_context[i]);
364
365 if (vout->vrfb_dma_tx.req_status == DMA_CHAN_ALLOTED) {
366 vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
367 omap_free_dma(vout->vrfb_dma_tx.dma_ch);
368 }
369}
370
371/*
372 * Return true if rotation is 90 or 270
373 */
374static inline int rotate_90_or_270(const struct omap_vout_device *vout)
375{
376 return (vout->rotation == dss_rotation_90_degree ||
377 vout->rotation == dss_rotation_270_degree);
378}
379
380/*
381 * Return true if rotation is enabled
382 */
383static inline int rotation_enabled(const struct omap_vout_device *vout)
384{
385 return vout->rotation || vout->mirror;
386}
387
388/*
389 * Reverse the rotation degree if mirroring is enabled
390 */
391static inline int calc_rotation(const struct omap_vout_device *vout)
392{
393 if (!vout->mirror)
394 return vout->rotation;
395
396 switch (vout->rotation) {
397 case dss_rotation_90_degree:
398 return dss_rotation_270_degree;
399 case dss_rotation_270_degree:
400 return dss_rotation_90_degree;
401 case dss_rotation_180_degree:
402 return dss_rotation_0_degree;
403 default:
404 return dss_rotation_180_degree;
405 }
406}
407
408/*
409 * Free the V4L2 buffers 240 * Free the V4L2 buffers
410 */ 241 */
411static void omap_vout_free_buffers(struct omap_vout_device *vout) 242void omap_vout_free_buffers(struct omap_vout_device *vout)
412{ 243{
413 int i, numbuffers; 244 int i, numbuffers;
414 245
@@ -425,52 +256,6 @@ static void omap_vout_free_buffers(struct omap_vout_device *vout)
425} 256}
426 257
427/* 258/*
428 * Free VRFB buffers
429 */
430static void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout)
431{
432 int j;
433
434 for (j = 0; j < VRFB_NUM_BUFS; j++) {
435 omap_vout_free_buffer(vout->smsshado_virt_addr[j],
436 vout->smsshado_size);
437 vout->smsshado_virt_addr[j] = 0;
438 vout->smsshado_phy_addr[j] = 0;
439 }
440}
441
442/*
443 * Allocate the buffers for the VRFB space. Data is copied from V4L2
444 * buffers to the VRFB buffers using the DMA engine.
445 */
446static int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
447 unsigned int *count, unsigned int startindex)
448{
449 int i;
450 bool yuv_mode;
451
452 /* Allocate the VRFB buffers only if the buffers are not
453 * allocated during init time.
454 */
455 if ((rotation_enabled(vout)) && !vout->vrfb_static_allocation)
456 if (omap_vout_allocate_vrfb_buffers(vout, count, startindex))
457 return -ENOMEM;
458
459 if (vout->dss_mode == OMAP_DSS_COLOR_YUV2 ||
460 vout->dss_mode == OMAP_DSS_COLOR_UYVY)
461 yuv_mode = true;
462 else
463 yuv_mode = false;
464
465 for (i = 0; i < *count; i++)
466 omap_vrfb_setup(&vout->vrfb_context[i],
467 vout->smsshado_phy_addr[i], vout->pix.width,
468 vout->pix.height, vout->bpp, yuv_mode);
469
470 return 0;
471}
472
473/*
474 * Convert V4L2 rotation to DSS rotation 259 * Convert V4L2 rotation to DSS rotation
475 * V4L2 understand 0, 90, 180, 270. 260 * V4L2 understand 0, 90, 180, 270.
476 * Convert to 0, 1, 2 and 3 respectively for DSS 261 * Convert to 0, 1, 2 and 3 respectively for DSS
@@ -499,124 +284,38 @@ static int v4l2_rot_to_dss_rot(int v4l2_rotation,
499 return ret; 284 return ret;
500} 285}
501 286
502/*
503 * Calculate the buffer offsets from which the streaming should
504 * start. This offset calculation is mainly required because of
505 * the VRFB 32 pixels alignment with rotation.
506 */
507static int omap_vout_calculate_offset(struct omap_vout_device *vout) 287static int omap_vout_calculate_offset(struct omap_vout_device *vout)
508{ 288{
509 struct omap_overlay *ovl;
510 enum dss_rotation rotation;
511 struct omapvideo_info *ovid; 289 struct omapvideo_info *ovid;
512 bool mirroring = vout->mirror;
513 struct omap_dss_device *cur_display;
514 struct v4l2_rect *crop = &vout->crop; 290 struct v4l2_rect *crop = &vout->crop;
515 struct v4l2_pix_format *pix = &vout->pix; 291 struct v4l2_pix_format *pix = &vout->pix;
516 int *cropped_offset = &vout->cropped_offset; 292 int *cropped_offset = &vout->cropped_offset;
517 int vr_ps = 1, ps = 2, temp_ps = 2; 293 int ps = 2, line_length = 0;
518 int offset = 0, ctop = 0, cleft = 0, line_length = 0;
519 294
520 ovid = &vout->vid_info; 295 ovid = &vout->vid_info;
521 ovl = ovid->overlays[0];
522 /* get the display device attached to the overlay */
523 if (!ovl->manager || !ovl->manager->device)
524 return -1;
525 296
526 cur_display = ovl->manager->device; 297 if (ovid->rotation_type == VOUT_ROT_VRFB) {
527 rotation = calc_rotation(vout); 298 omap_vout_calculate_vrfb_offset(vout);
299 } else {
300 vout->line_length = line_length = pix->width;
528 301
529 if (V4L2_PIX_FMT_YUYV == pix->pixelformat || 302 if (V4L2_PIX_FMT_YUYV == pix->pixelformat ||
530 V4L2_PIX_FMT_UYVY == pix->pixelformat) { 303 V4L2_PIX_FMT_UYVY == pix->pixelformat)
531 if (rotation_enabled(vout)) { 304 ps = 2;
532 /* 305 else if (V4L2_PIX_FMT_RGB32 == pix->pixelformat)
533 * ps - Actual pixel size for YUYV/UYVY for
534 * VRFB/Mirroring is 4 bytes
535 * vr_ps - Virtually pixel size for YUYV/UYVY is
536 * 2 bytes
537 */
538 ps = 4; 306 ps = 4;
539 vr_ps = 2; 307 else if (V4L2_PIX_FMT_RGB24 == pix->pixelformat)
540 } else { 308 ps = 3;
541 ps = 2; /* otherwise the pixel size is 2 byte */
542 }
543 } else if (V4L2_PIX_FMT_RGB32 == pix->pixelformat) {
544 ps = 4;
545 } else if (V4L2_PIX_FMT_RGB24 == pix->pixelformat) {
546 ps = 3;
547 }
548 vout->ps = ps;
549 vout->vr_ps = vr_ps;
550
551 if (rotation_enabled(vout)) {
552 line_length = MAX_PIXELS_PER_LINE;
553 ctop = (pix->height - crop->height) - crop->top;
554 cleft = (pix->width - crop->width) - crop->left;
555 } else {
556 line_length = pix->width;
557 }
558 vout->line_length = line_length;
559 switch (rotation) {
560 case dss_rotation_90_degree:
561 offset = vout->vrfb_context[0].yoffset *
562 vout->vrfb_context[0].bytespp;
563 temp_ps = ps / vr_ps;
564 if (mirroring == 0) {
565 *cropped_offset = offset + line_length *
566 temp_ps * cleft + crop->top * temp_ps;
567 } else {
568 *cropped_offset = offset + line_length * temp_ps *
569 cleft + crop->top * temp_ps + (line_length *
570 ((crop->width / (vr_ps)) - 1) * ps);
571 }
572 break;
573 case dss_rotation_180_degree:
574 offset = ((MAX_PIXELS_PER_LINE * vout->vrfb_context[0].yoffset *
575 vout->vrfb_context[0].bytespp) +
576 (vout->vrfb_context[0].xoffset *
577 vout->vrfb_context[0].bytespp));
578 if (mirroring == 0) {
579 *cropped_offset = offset + (line_length * ps * ctop) +
580 (cleft / vr_ps) * ps;
581 309
582 } else { 310 vout->ps = ps;
583 *cropped_offset = offset + (line_length * ps * ctop) + 311
584 (cleft / vr_ps) * ps + (line_length * 312 *cropped_offset = (line_length * ps) *
585 (crop->height - 1) * ps); 313 crop->top + crop->left * ps;
586 }
587 break;
588 case dss_rotation_270_degree:
589 offset = MAX_PIXELS_PER_LINE * vout->vrfb_context[0].xoffset *
590 vout->vrfb_context[0].bytespp;
591 temp_ps = ps / vr_ps;
592 if (mirroring == 0) {
593 *cropped_offset = offset + line_length *
594 temp_ps * crop->left + ctop * ps;
595 } else {
596 *cropped_offset = offset + line_length *
597 temp_ps * crop->left + ctop * ps +
598 (line_length * ((crop->width / vr_ps) - 1) *
599 ps);
600 }
601 break;
602 case dss_rotation_0_degree:
603 if (mirroring == 0) {
604 *cropped_offset = (line_length * ps) *
605 crop->top + (crop->left / vr_ps) * ps;
606 } else {
607 *cropped_offset = (line_length * ps) *
608 crop->top + (crop->left / vr_ps) * ps +
609 (line_length * (crop->height - 1) * ps);
610 }
611 break;
612 default:
613 *cropped_offset = (line_length * ps * crop->top) /
614 vr_ps + (crop->left * ps) / vr_ps +
615 ((crop->width / vr_ps) - 1) * ps;
616 break;
617 } 314 }
315
618 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "%s Offset:%x\n", 316 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "%s Offset:%x\n",
619 __func__, *cropped_offset); 317 __func__, vout->cropped_offset);
318
620 return 0; 319 return 0;
621} 320}
622 321
@@ -664,7 +363,7 @@ static int video_mode_to_dss_mode(struct omap_vout_device *vout)
664/* 363/*
665 * Setup the overlay 364 * Setup the overlay
666 */ 365 */
667int omapvid_setup_overlay(struct omap_vout_device *vout, 366static int omapvid_setup_overlay(struct omap_vout_device *vout,
668 struct omap_overlay *ovl, int posx, int posy, int outw, 367 struct omap_overlay *ovl, int posx, int posy, int outw,
669 int outh, u32 addr) 368 int outh, u32 addr)
670{ 369{
@@ -687,7 +386,7 @@ int omapvid_setup_overlay(struct omap_vout_device *vout,
687 /* Setup the input plane parameters according to 386 /* Setup the input plane parameters according to
688 * rotation value selected. 387 * rotation value selected.
689 */ 388 */
690 if (rotate_90_or_270(vout)) { 389 if (is_rotation_90_or_270(vout)) {
691 cropheight = vout->crop.width; 390 cropheight = vout->crop.width;
692 cropwidth = vout->crop.height; 391 cropwidth = vout->crop.height;
693 pixheight = vout->pix.width; 392 pixheight = vout->pix.width;
@@ -711,7 +410,7 @@ int omapvid_setup_overlay(struct omap_vout_device *vout,
711 info.out_width = outw; 410 info.out_width = outw;
712 info.out_height = outh; 411 info.out_height = outh;
713 info.global_alpha = vout->win.global_alpha; 412 info.global_alpha = vout->win.global_alpha;
714 if (!rotation_enabled(vout)) { 413 if (!is_rotation_enabled(vout)) {
715 info.rotation = 0; 414 info.rotation = 0;
716 info.rotation_type = OMAP_DSS_ROT_DMA; 415 info.rotation_type = OMAP_DSS_ROT_DMA;
717 info.screen_width = pixwidth; 416 info.screen_width = pixwidth;
@@ -744,7 +443,7 @@ setup_ovl_err:
744/* 443/*
745 * Initialize the overlay structure 444 * Initialize the overlay structure
746 */ 445 */
747int omapvid_init(struct omap_vout_device *vout, u32 addr) 446static int omapvid_init(struct omap_vout_device *vout, u32 addr)
748{ 447{
749 int ret = 0, i; 448 int ret = 0, i;
750 struct v4l2_window *win; 449 struct v4l2_window *win;
@@ -809,7 +508,7 @@ omapvid_init_err:
809/* 508/*
810 * Apply the changes set the go bit of DSS 509 * Apply the changes set the go bit of DSS
811 */ 510 */
812int omapvid_apply_changes(struct omap_vout_device *vout) 511static int omapvid_apply_changes(struct omap_vout_device *vout)
813{ 512{
814 int i; 513 int i;
815 struct omap_overlay *ovl; 514 struct omap_overlay *ovl;
@@ -825,7 +524,7 @@ int omapvid_apply_changes(struct omap_vout_device *vout)
825 return 0; 524 return 0;
826} 525}
827 526
828void omap_vout_isr(void *arg, unsigned int irqstatus) 527static void omap_vout_isr(void *arg, unsigned int irqstatus)
829{ 528{
830 int ret; 529 int ret;
831 u32 addr, fid; 530 u32 addr, fid;
@@ -848,10 +547,20 @@ void omap_vout_isr(void *arg, unsigned int irqstatus)
848 547
849 spin_lock(&vout->vbq_lock); 548 spin_lock(&vout->vbq_lock);
850 do_gettimeofday(&timevalue); 549 do_gettimeofday(&timevalue);
851 if (cur_display->type == OMAP_DISPLAY_TYPE_DPI) {
852 if (!(irqstatus & DISPC_IRQ_VSYNC))
853 goto vout_isr_err;
854 550
551 if (cur_display->type != OMAP_DISPLAY_TYPE_VENC) {
552 switch (cur_display->type) {
553 case OMAP_DISPLAY_TYPE_DPI:
554 if (!(irqstatus & (DISPC_IRQ_VSYNC | DISPC_IRQ_VSYNC2)))
555 goto vout_isr_err;
556 break;
557 case OMAP_DISPLAY_TYPE_HDMI:
558 if (!(irqstatus & DISPC_IRQ_EVSYNC_EVEN))
559 goto vout_isr_err;
560 break;
561 default:
562 goto vout_isr_err;
563 }
855 if (!vout->first_int && (vout->cur_frm != vout->next_frm)) { 564 if (!vout->first_int && (vout->cur_frm != vout->next_frm)) {
856 vout->cur_frm->ts = timevalue; 565 vout->cur_frm->ts = timevalue;
857 vout->cur_frm->state = VIDEOBUF_DONE; 566 vout->cur_frm->state = VIDEOBUF_DONE;
@@ -875,7 +584,7 @@ void omap_vout_isr(void *arg, unsigned int irqstatus)
875 ret = omapvid_init(vout, addr); 584 ret = omapvid_init(vout, addr);
876 if (ret) 585 if (ret)
877 printk(KERN_ERR VOUT_NAME 586 printk(KERN_ERR VOUT_NAME
878 "failed to set overlay info\n"); 587 "failed to set overlay info\n");
879 /* Enable the pipeline and set the Go bit */ 588 /* Enable the pipeline and set the Go bit */
880 ret = omapvid_apply_changes(vout); 589 ret = omapvid_apply_changes(vout);
881 if (ret) 590 if (ret)
@@ -954,6 +663,7 @@ static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
954 int startindex = 0, i, j; 663 int startindex = 0, i, j;
955 u32 phy_addr = 0, virt_addr = 0; 664 u32 phy_addr = 0, virt_addr = 0;
956 struct omap_vout_device *vout = q->priv_data; 665 struct omap_vout_device *vout = q->priv_data;
666 struct omapvideo_info *ovid = &vout->vid_info;
957 667
958 if (!vout) 668 if (!vout)
959 return -EINVAL; 669 return -EINVAL;
@@ -966,13 +676,10 @@ static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
966 if (V4L2_MEMORY_MMAP == vout->memory && *count < startindex) 676 if (V4L2_MEMORY_MMAP == vout->memory && *count < startindex)
967 *count = startindex; 677 *count = startindex;
968 678
969 if ((rotation_enabled(vout)) && *count > VRFB_NUM_BUFS) 679 if (ovid->rotation_type == VOUT_ROT_VRFB) {
970 *count = VRFB_NUM_BUFS;
971
972 /* If rotation is enabled, allocate memory for VRFB space also */
973 if (rotation_enabled(vout))
974 if (omap_vout_vrfb_buffer_setup(vout, count, startindex)) 680 if (omap_vout_vrfb_buffer_setup(vout, count, startindex))
975 return -ENOMEM; 681 return -ENOMEM;
682 }
976 683
977 if (V4L2_MEMORY_MMAP != vout->memory) 684 if (V4L2_MEMORY_MMAP != vout->memory)
978 return 0; 685 return 0;
@@ -996,8 +703,11 @@ static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
996 virt_addr = omap_vout_alloc_buffer(vout->buffer_size, 703 virt_addr = omap_vout_alloc_buffer(vout->buffer_size,
997 &phy_addr); 704 &phy_addr);
998 if (!virt_addr) { 705 if (!virt_addr) {
999 if (!rotation_enabled(vout)) 706 if (ovid->rotation_type == VOUT_ROT_NONE) {
1000 break; 707 break;
708 } else {
709 if (!is_rotation_enabled(vout))
710 break;
1001 /* Free the VRFB buffers if no space for V4L2 buffers */ 711 /* Free the VRFB buffers if no space for V4L2 buffers */
1002 for (j = i; j < *count; j++) { 712 for (j = i; j < *count; j++) {
1003 omap_vout_free_buffer( 713 omap_vout_free_buffer(
@@ -1005,6 +715,7 @@ static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
1005 vout->smsshado_size); 715 vout->smsshado_size);
1006 vout->smsshado_virt_addr[j] = 0; 716 vout->smsshado_virt_addr[j] = 0;
1007 vout->smsshado_phy_addr[j] = 0; 717 vout->smsshado_phy_addr[j] = 0;
718 }
1008 } 719 }
1009 } 720 }
1010 vout->buf_virt_addr[i] = virt_addr; 721 vout->buf_virt_addr[i] = virt_addr;
@@ -1017,9 +728,9 @@ static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
1017 728
1018/* 729/*
1019 * Free the V4L2 buffers additionally allocated than default 730 * Free the V4L2 buffers additionally allocated than default
1020 * number of buffers and free all the VRFB buffers 731 * number of buffers
1021 */ 732 */
1022static void omap_vout_free_allbuffers(struct omap_vout_device *vout) 733static void omap_vout_free_extra_buffers(struct omap_vout_device *vout)
1023{ 734{
1024 int num_buffers = 0, i; 735 int num_buffers = 0, i;
1025 736
@@ -1034,20 +745,6 @@ static void omap_vout_free_allbuffers(struct omap_vout_device *vout)
1034 vout->buf_virt_addr[i] = 0; 745 vout->buf_virt_addr[i] = 0;
1035 vout->buf_phy_addr[i] = 0; 746 vout->buf_phy_addr[i] = 0;
1036 } 747 }
1037 /* Free the VRFB buffers only if they are allocated
1038 * during reqbufs. Don't free if init time allocated
1039 */
1040 if (!vout->vrfb_static_allocation) {
1041 for (i = 0; i < VRFB_NUM_BUFS; i++) {
1042 if (vout->smsshado_virt_addr[i]) {
1043 omap_vout_free_buffer(
1044 vout->smsshado_virt_addr[i],
1045 vout->smsshado_size);
1046 vout->smsshado_virt_addr[i] = 0;
1047 vout->smsshado_phy_addr[i] = 0;
1048 }
1049 }
1050 }
1051 vout->buffer_allocated = num_buffers; 748 vout->buffer_allocated = num_buffers;
1052} 749}
1053 750
@@ -1059,16 +756,11 @@ static void omap_vout_free_allbuffers(struct omap_vout_device *vout)
1059 * buffer into VRFB memory space before giving it to the DSS. 756 * buffer into VRFB memory space before giving it to the DSS.
1060 */ 757 */
1061static int omap_vout_buffer_prepare(struct videobuf_queue *q, 758static int omap_vout_buffer_prepare(struct videobuf_queue *q,
1062 struct videobuf_buffer *vb, 759 struct videobuf_buffer *vb,
1063 enum v4l2_field field) 760 enum v4l2_field field)
1064{ 761{
1065 dma_addr_t dmabuf;
1066 struct vid_vrfb_dma *tx;
1067 enum dss_rotation rotation;
1068 struct omap_vout_device *vout = q->priv_data; 762 struct omap_vout_device *vout = q->priv_data;
1069 u32 dest_frame_index = 0, src_element_index = 0; 763 struct omapvideo_info *ovid = &vout->vid_info;
1070 u32 dest_element_index = 0, src_frame_index = 0;
1071 u32 elem_count = 0, frame_count = 0, pixsize = 2;
1072 764
1073 if (VIDEOBUF_NEEDS_INIT == vb->state) { 765 if (VIDEOBUF_NEEDS_INIT == vb->state) {
1074 vb->width = vout->pix.width; 766 vb->width = vout->pix.width;
@@ -1087,66 +779,24 @@ static int omap_vout_buffer_prepare(struct videobuf_queue *q,
1087 vout->queued_buf_addr[vb->i] = (u8 *) 779 vout->queued_buf_addr[vb->i] = (u8 *)
1088 omap_vout_uservirt_to_phys(vb->baddr); 780 omap_vout_uservirt_to_phys(vb->baddr);
1089 } else { 781 } else {
1090 vout->queued_buf_addr[vb->i] = (u8 *)vout->buf_phy_addr[vb->i]; 782 u32 addr, dma_addr;
1091 } 783 unsigned long size;
1092 784
1093 if (!rotation_enabled(vout)) 785 addr = (unsigned long) vout->buf_virt_addr[vb->i];
1094 return 0; 786 size = (unsigned long) vb->size;
1095 787
1096 dmabuf = vout->buf_phy_addr[vb->i]; 788 dma_addr = dma_map_single(vout->vid_dev->v4l2_dev.dev, (void *) addr,
1097 /* If rotation is enabled, copy input buffer into VRFB 789 size, DMA_TO_DEVICE);
1098 * memory space using DMA. We are copying input buffer 790 if (dma_mapping_error(vout->vid_dev->v4l2_dev.dev, dma_addr))
1099 * into VRFB memory space of desired angle and DSS will 791 v4l2_err(&vout->vid_dev->v4l2_dev, "dma_map_single failed\n");
1100 * read image VRFB memory for 0 degree angle
1101 */
1102 pixsize = vout->bpp * vout->vrfb_bpp;
1103 /*
1104 * DMA transfer in double index mode
1105 */
1106 792
1107 /* Frame index */ 793 vout->queued_buf_addr[vb->i] = (u8 *)vout->buf_phy_addr[vb->i];
1108 dest_frame_index = ((MAX_PIXELS_PER_LINE * pixsize) -
1109 (vout->pix.width * vout->bpp)) + 1;
1110
1111 /* Source and destination parameters */
1112 src_element_index = 0;
1113 src_frame_index = 0;
1114 dest_element_index = 1;
1115 /* Number of elements per frame */
1116 elem_count = vout->pix.width * vout->bpp;
1117 frame_count = vout->pix.height;
1118 tx = &vout->vrfb_dma_tx;
1119 tx->tx_status = 0;
1120 omap_set_dma_transfer_params(tx->dma_ch, OMAP_DMA_DATA_TYPE_S32,
1121 (elem_count / 4), frame_count, OMAP_DMA_SYNC_ELEMENT,
1122 tx->dev_id, 0x0);
1123 /* src_port required only for OMAP1 */
1124 omap_set_dma_src_params(tx->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
1125 dmabuf, src_element_index, src_frame_index);
1126 /*set dma source burst mode for VRFB */
1127 omap_set_dma_src_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
1128 rotation = calc_rotation(vout);
1129
1130 /* dest_port required only for OMAP1 */
1131 omap_set_dma_dest_params(tx->dma_ch, 0, OMAP_DMA_AMODE_DOUBLE_IDX,
1132 vout->vrfb_context[vb->i].paddr[0], dest_element_index,
1133 dest_frame_index);
1134 /*set dma dest burst mode for VRFB */
1135 omap_set_dma_dest_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
1136 omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, 0x20, 0);
1137
1138 omap_start_dma(tx->dma_ch);
1139 interruptible_sleep_on_timeout(&tx->wait, VRFB_TX_TIMEOUT);
1140
1141 if (tx->tx_status == 0) {
1142 omap_stop_dma(tx->dma_ch);
1143 return -EINVAL;
1144 } 794 }
1145 /* Store buffers physical address into an array. Addresses 795
1146 * from this array will be used to configure DSS */ 796 if (ovid->rotation_type == VOUT_ROT_VRFB)
1147 vout->queued_buf_addr[vb->i] = (u8 *) 797 return omap_vout_prepare_vrfb(vout, vb);
1148 vout->vrfb_context[vb->i].paddr[rotation]; 798 else
1149 return 0; 799 return 0;
1150} 800}
1151 801
1152/* 802/*
@@ -1298,7 +948,15 @@ static int omap_vout_release(struct file *file)
1298 "Unable to apply changes\n"); 948 "Unable to apply changes\n");
1299 949
1300 /* Free all buffers */ 950 /* Free all buffers */
1301 omap_vout_free_allbuffers(vout); 951 omap_vout_free_extra_buffers(vout);
952
953 /* Free the VRFB buffers only if they are allocated
954 * during reqbufs. Don't free if init time allocated
955 */
956 if (ovid->rotation_type == VOUT_ROT_VRFB) {
957 if (!vout->vrfb_static_allocation)
958 omap_vout_free_vrfb_buffers(vout);
959 }
1302 videobuf_mmap_free(q); 960 videobuf_mmap_free(q);
1303 961
1304 /* Even if apply changes fails we should continue 962 /* Even if apply changes fails we should continue
@@ -1307,7 +965,7 @@ static int omap_vout_release(struct file *file)
1307 u32 mask = 0; 965 u32 mask = 0;
1308 966
1309 mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | 967 mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN |
1310 DISPC_IRQ_EVSYNC_ODD; 968 DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_VSYNC2;
1311 omap_dispc_unregister_isr(omap_vout_isr, vout, mask); 969 omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
1312 vout->streaming = 0; 970 vout->streaming = 0;
1313 971
@@ -1383,10 +1041,7 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *fh,
1383 struct v4l2_fmtdesc *fmt) 1041 struct v4l2_fmtdesc *fmt)
1384{ 1042{
1385 int index = fmt->index; 1043 int index = fmt->index;
1386 enum v4l2_buf_type type = fmt->type;
1387 1044
1388 fmt->index = index;
1389 fmt->type = type;
1390 if (index >= NUM_OUTPUT_FORMATS) 1045 if (index >= NUM_OUTPUT_FORMATS)
1391 return -EINVAL; 1046 return -EINVAL;
1392 1047
@@ -1457,7 +1112,7 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *fh,
1457 1112
1458 /* We dont support RGB24-packed mode if vrfb rotation 1113 /* We dont support RGB24-packed mode if vrfb rotation
1459 * is enabled*/ 1114 * is enabled*/
1460 if ((rotation_enabled(vout)) && 1115 if ((is_rotation_enabled(vout)) &&
1461 f->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24) { 1116 f->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24) {
1462 ret = -EINVAL; 1117 ret = -EINVAL;
1463 goto s_fmt_vid_out_exit; 1118 goto s_fmt_vid_out_exit;
@@ -1465,7 +1120,7 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *fh,
1465 1120
1466 /* get the framebuffer parameters */ 1121 /* get the framebuffer parameters */
1467 1122
1468 if (rotate_90_or_270(vout)) { 1123 if (is_rotation_90_or_270(vout)) {
1469 vout->fbuf.fmt.height = timing->x_res; 1124 vout->fbuf.fmt.height = timing->x_res;
1470 vout->fbuf.fmt.width = timing->y_res; 1125 vout->fbuf.fmt.width = timing->y_res;
1471 } else { 1126 } else {
@@ -1555,10 +1210,7 @@ static int vidioc_enum_fmt_vid_overlay(struct file *file, void *fh,
1555 struct v4l2_fmtdesc *fmt) 1210 struct v4l2_fmtdesc *fmt)
1556{ 1211{
1557 int index = fmt->index; 1212 int index = fmt->index;
1558 enum v4l2_buf_type type = fmt->type;
1559 1213
1560 fmt->index = index;
1561 fmt->type = type;
1562 if (index >= NUM_OUTPUT_FORMATS) 1214 if (index >= NUM_OUTPUT_FORMATS)
1563 return -EINVAL; 1215 return -EINVAL;
1564 1216
@@ -1645,7 +1297,7 @@ static int vidioc_s_crop(struct file *file, void *fh, struct v4l2_crop *crop)
1645 /* get the display device attached to the overlay */ 1297 /* get the display device attached to the overlay */
1646 timing = &ovl->manager->device->panel.timings; 1298 timing = &ovl->manager->device->panel.timings;
1647 1299
1648 if (rotate_90_or_270(vout)) { 1300 if (is_rotation_90_or_270(vout)) {
1649 vout->fbuf.fmt.height = timing->x_res; 1301 vout->fbuf.fmt.height = timing->x_res;
1650 vout->fbuf.fmt.width = timing->y_res; 1302 vout->fbuf.fmt.width = timing->y_res;
1651 } else { 1303 } else {
@@ -1725,9 +1377,17 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
1725 switch (a->id) { 1377 switch (a->id) {
1726 case V4L2_CID_ROTATE: 1378 case V4L2_CID_ROTATE:
1727 { 1379 {
1380 struct omapvideo_info *ovid;
1728 int rotation = a->value; 1381 int rotation = a->value;
1729 1382
1383 ovid = &vout->vid_info;
1384
1730 mutex_lock(&vout->lock); 1385 mutex_lock(&vout->lock);
1386 if (rotation && ovid->rotation_type == VOUT_ROT_NONE) {
1387 mutex_unlock(&vout->lock);
1388 ret = -ERANGE;
1389 break;
1390 }
1731 1391
1732 if (rotation && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) { 1392 if (rotation && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) {
1733 mutex_unlock(&vout->lock); 1393 mutex_unlock(&vout->lock);
@@ -1783,6 +1443,11 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
1783 ovl = ovid->overlays[0]; 1443 ovl = ovid->overlays[0];
1784 1444
1785 mutex_lock(&vout->lock); 1445 mutex_lock(&vout->lock);
1446 if (mirror && ovid->rotation_type == VOUT_ROT_NONE) {
1447 mutex_unlock(&vout->lock);
1448 ret = -ERANGE;
1449 break;
1450 }
1786 1451
1787 if (mirror && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) { 1452 if (mirror && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) {
1788 mutex_unlock(&vout->lock); 1453 mutex_unlock(&vout->lock);
@@ -1893,7 +1558,7 @@ static int vidioc_qbuf(struct file *file, void *fh,
1893 } 1558 }
1894 } 1559 }
1895 1560
1896 if ((rotation_enabled(vout)) && 1561 if ((is_rotation_enabled(vout)) &&
1897 vout->vrfb_dma_tx.req_status == DMA_CHAN_NOT_ALLOTED) { 1562 vout->vrfb_dma_tx.req_status == DMA_CHAN_NOT_ALLOTED) {
1898 v4l2_warn(&vout->vid_dev->v4l2_dev, 1563 v4l2_warn(&vout->vid_dev->v4l2_dev,
1899 "DMA Channel not allocated for Rotation\n"); 1564 "DMA Channel not allocated for Rotation\n");
@@ -1908,15 +1573,28 @@ static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
1908 struct omap_vout_device *vout = fh; 1573 struct omap_vout_device *vout = fh;
1909 struct videobuf_queue *q = &vout->vbq; 1574 struct videobuf_queue *q = &vout->vbq;
1910 1575
1576 int ret;
1577 u32 addr;
1578 unsigned long size;
1579 struct videobuf_buffer *vb;
1580
1581 vb = q->bufs[b->index];
1582
1911 if (!vout->streaming) 1583 if (!vout->streaming)
1912 return -EINVAL; 1584 return -EINVAL;
1913 1585
1914 if (file->f_flags & O_NONBLOCK) 1586 if (file->f_flags & O_NONBLOCK)
1915 /* Call videobuf_dqbuf for non blocking mode */ 1587 /* Call videobuf_dqbuf for non blocking mode */
1916 return videobuf_dqbuf(q, (struct v4l2_buffer *)b, 1); 1588 ret = videobuf_dqbuf(q, (struct v4l2_buffer *)b, 1);
1917 else 1589 else
1918 /* Call videobuf_dqbuf for blocking mode */ 1590 /* Call videobuf_dqbuf for blocking mode */
1919 return videobuf_dqbuf(q, (struct v4l2_buffer *)b, 0); 1591 ret = videobuf_dqbuf(q, (struct v4l2_buffer *)b, 0);
1592
1593 addr = (unsigned long) vout->buf_phy_addr[vb->i];
1594 size = (unsigned long) vb->size;
1595 dma_unmap_single(vout->vid_dev->v4l2_dev.dev, addr,
1596 size, DMA_TO_DEVICE);
1597 return ret;
1920} 1598}
1921 1599
1922static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i) 1600static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
@@ -1965,7 +1643,8 @@ static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
1965 addr = (unsigned long) vout->queued_buf_addr[vout->cur_frm->i] 1643 addr = (unsigned long) vout->queued_buf_addr[vout->cur_frm->i]
1966 + vout->cropped_offset; 1644 + vout->cropped_offset;
1967 1645
1968 mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD; 1646 mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD
1647 | DISPC_IRQ_VSYNC2;
1969 1648
1970 omap_dispc_register_isr(omap_vout_isr, vout, mask); 1649 omap_dispc_register_isr(omap_vout_isr, vout, mask);
1971 1650
@@ -2015,7 +1694,8 @@ static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
2015 return -EINVAL; 1694 return -EINVAL;
2016 1695
2017 vout->streaming = 0; 1696 vout->streaming = 0;
2018 mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD; 1697 mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD
1698 | DISPC_IRQ_VSYNC2;
2019 1699
2020 omap_dispc_unregister_isr(omap_vout_isr, vout, mask); 1700 omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
2021 1701
@@ -2228,7 +1908,8 @@ static int __init omap_vout_setup_video_data(struct omap_vout_device *vout)
2228 vout->mirror = 0; 1908 vout->mirror = 0;
2229 vout->control[2].id = V4L2_CID_HFLIP; 1909 vout->control[2].id = V4L2_CID_HFLIP;
2230 vout->control[2].value = 0; 1910 vout->control[2].value = 0;
2231 vout->vrfb_bpp = 2; 1911 if (vout->vid_info.rotation_type == VOUT_ROT_VRFB)
1912 vout->vrfb_bpp = 2;
2232 1913
2233 control[1].id = V4L2_CID_BG_COLOR; 1914 control[1].id = V4L2_CID_BG_COLOR;
2234 control[1].value = 0; 1915 control[1].value = 0;
@@ -2260,17 +1941,15 @@ static int __init omap_vout_setup_video_bufs(struct platform_device *pdev,
2260 int vid_num) 1941 int vid_num)
2261{ 1942{
2262 u32 numbuffers; 1943 u32 numbuffers;
2263 int ret = 0, i, j; 1944 int ret = 0, i;
2264 int image_width, image_height; 1945 struct omapvideo_info *ovid;
2265 struct video_device *vfd;
2266 struct omap_vout_device *vout; 1946 struct omap_vout_device *vout;
2267 int static_vrfb_allocation = 0, vrfb_num_bufs = VRFB_NUM_BUFS;
2268 struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev); 1947 struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
2269 struct omap2video_device *vid_dev = 1948 struct omap2video_device *vid_dev =
2270 container_of(v4l2_dev, struct omap2video_device, v4l2_dev); 1949 container_of(v4l2_dev, struct omap2video_device, v4l2_dev);
2271 1950
2272 vout = vid_dev->vouts[vid_num]; 1951 vout = vid_dev->vouts[vid_num];
2273 vfd = vout->vfd; 1952 ovid = &vout->vid_info;
2274 1953
2275 numbuffers = (vid_num == 0) ? video1_numbuffers : video2_numbuffers; 1954 numbuffers = (vid_num == 0) ? video1_numbuffers : video2_numbuffers;
2276 vout->buffer_size = (vid_num == 0) ? video1_bufsize : video2_bufsize; 1955 vout->buffer_size = (vid_num == 0) ? video1_bufsize : video2_bufsize;
@@ -2287,66 +1966,16 @@ static int __init omap_vout_setup_video_bufs(struct platform_device *pdev,
2287 } 1966 }
2288 } 1967 }
2289 1968
2290 for (i = 0; i < VRFB_NUM_BUFS; i++) {
2291 if (omap_vrfb_request_ctx(&vout->vrfb_context[i])) {
2292 dev_info(&pdev->dev, ": VRFB allocation failed\n");
2293 for (j = 0; j < i; j++)
2294 omap_vrfb_release_ctx(&vout->vrfb_context[j]);
2295 ret = -ENOMEM;
2296 goto free_buffers;
2297 }
2298 }
2299 vout->cropped_offset = 0; 1969 vout->cropped_offset = 0;
2300 1970
2301 /* Calculate VRFB memory size */ 1971 if (ovid->rotation_type == VOUT_ROT_VRFB) {
2302 /* allocate for worst case size */ 1972 int static_vrfb_allocation = (vid_num == 0) ?
2303 image_width = VID_MAX_WIDTH / TILE_SIZE; 1973 vid1_static_vrfb_alloc : vid2_static_vrfb_alloc;
2304 if (VID_MAX_WIDTH % TILE_SIZE) 1974 ret = omap_vout_setup_vrfb_bufs(pdev, vid_num,
2305 image_width++; 1975 static_vrfb_allocation);
2306
2307 image_width = image_width * TILE_SIZE;
2308 image_height = VID_MAX_HEIGHT / TILE_SIZE;
2309
2310 if (VID_MAX_HEIGHT % TILE_SIZE)
2311 image_height++;
2312
2313 image_height = image_height * TILE_SIZE;
2314 vout->smsshado_size = PAGE_ALIGN(image_width * image_height * 2 * 2);
2315
2316 /*
2317 * Request and Initialize DMA, for DMA based VRFB transfer
2318 */
2319 vout->vrfb_dma_tx.dev_id = OMAP_DMA_NO_DEVICE;
2320 vout->vrfb_dma_tx.dma_ch = -1;
2321 vout->vrfb_dma_tx.req_status = DMA_CHAN_ALLOTED;
2322 ret = omap_request_dma(vout->vrfb_dma_tx.dev_id, "VRFB DMA TX",
2323 omap_vout_vrfb_dma_tx_callback,
2324 (void *) &vout->vrfb_dma_tx, &vout->vrfb_dma_tx.dma_ch);
2325 if (ret < 0) {
2326 vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
2327 dev_info(&pdev->dev, ": failed to allocate DMA Channel for"
2328 " video%d\n", vfd->minor);
2329 }
2330 init_waitqueue_head(&vout->vrfb_dma_tx.wait);
2331
2332 /* Allocate VRFB buffers if selected through bootargs */
2333 static_vrfb_allocation = (vid_num == 0) ?
2334 vid1_static_vrfb_alloc : vid2_static_vrfb_alloc;
2335
2336 /* statically allocated the VRFB buffer is done through
2337 commands line aruments */
2338 if (static_vrfb_allocation) {
2339 if (omap_vout_allocate_vrfb_buffers(vout, &vrfb_num_bufs, -1)) {
2340 ret = -ENOMEM;
2341 goto release_vrfb_ctx;
2342 }
2343 vout->vrfb_static_allocation = 1;
2344 } 1976 }
2345 return 0;
2346 1977
2347release_vrfb_ctx: 1978 return ret;
2348 for (j = 0; j < VRFB_NUM_BUFS; j++)
2349 omap_vrfb_release_ctx(&vout->vrfb_context[j]);
2350 1979
2351free_buffers: 1980free_buffers:
2352 for (i = 0; i < numbuffers; i++) { 1981 for (i = 0; i < numbuffers; i++) {
@@ -2389,6 +2018,10 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
2389 vout->vid_info.num_overlays = 1; 2018 vout->vid_info.num_overlays = 1;
2390 vout->vid_info.id = k + 1; 2019 vout->vid_info.id = k + 1;
2391 2020
2021 /* Set VRFB as rotation_type for omap2 and omap3 */
2022 if (cpu_is_omap24xx() || cpu_is_omap34xx())
2023 vout->vid_info.rotation_type = VOUT_ROT_VRFB;
2024
2392 /* Setup the default configuration for the video devices 2025 /* Setup the default configuration for the video devices
2393 */ 2026 */
2394 if (omap_vout_setup_video_data(vout) != 0) { 2027 if (omap_vout_setup_video_data(vout) != 0) {
@@ -2422,7 +2055,8 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
2422 goto success; 2055 goto success;
2423 2056
2424error2: 2057error2:
2425 omap_vout_release_vrfb(vout); 2058 if (vout->vid_info.rotation_type == VOUT_ROT_VRFB)
2059 omap_vout_release_vrfb(vout);
2426 omap_vout_free_buffers(vout); 2060 omap_vout_free_buffers(vout);
2427error1: 2061error1:
2428 video_device_release(vfd); 2062 video_device_release(vfd);
@@ -2443,11 +2077,13 @@ success:
2443static void omap_vout_cleanup_device(struct omap_vout_device *vout) 2077static void omap_vout_cleanup_device(struct omap_vout_device *vout)
2444{ 2078{
2445 struct video_device *vfd; 2079 struct video_device *vfd;
2080 struct omapvideo_info *ovid;
2446 2081
2447 if (!vout) 2082 if (!vout)
2448 return; 2083 return;
2449 2084
2450 vfd = vout->vfd; 2085 vfd = vout->vfd;
2086 ovid = &vout->vid_info;
2451 if (vfd) { 2087 if (vfd) {
2452 if (!video_is_registered(vfd)) { 2088 if (!video_is_registered(vfd)) {
2453 /* 2089 /*
@@ -2463,14 +2099,15 @@ static void omap_vout_cleanup_device(struct omap_vout_device *vout)
2463 video_unregister_device(vfd); 2099 video_unregister_device(vfd);
2464 } 2100 }
2465 } 2101 }
2466 2102 if (ovid->rotation_type == VOUT_ROT_VRFB) {
2467 omap_vout_release_vrfb(vout); 2103 omap_vout_release_vrfb(vout);
2104 /* Free the VRFB buffer if allocated
2105 * init time
2106 */
2107 if (vout->vrfb_static_allocation)
2108 omap_vout_free_vrfb_buffers(vout);
2109 }
2468 omap_vout_free_buffers(vout); 2110 omap_vout_free_buffers(vout);
2469 /* Free the VRFB buffer if allocated
2470 * init time
2471 */
2472 if (vout->vrfb_static_allocation)
2473 omap_vout_free_vrfb_buffers(vout);
2474 2111
2475 kfree(vout); 2112 kfree(vout);
2476} 2113}
diff --git a/drivers/media/video/omap/omap_vout_vrfb.c b/drivers/media/video/omap/omap_vout_vrfb.c
new file mode 100644
index 000000000000..ebebcac49225
--- /dev/null
+++ b/drivers/media/video/omap/omap_vout_vrfb.c
@@ -0,0 +1,390 @@
1/*
2 * omap_vout_vrfb.c
3 *
4 * Copyright (C) 2010 Texas Instruments.
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 *
10 */
11
12#include <linux/sched.h>
13#include <linux/platform_device.h>
14#include <linux/videodev2.h>
15
16#include <media/videobuf-dma-contig.h>
17#include <media/v4l2-device.h>
18
19#include <plat/dma.h>
20#include <plat/vrfb.h>
21
22#include "omap_voutdef.h"
23#include "omap_voutlib.h"
24
25/*
26 * Function for allocating video buffers
27 */
28static int omap_vout_allocate_vrfb_buffers(struct omap_vout_device *vout,
29 unsigned int *count, int startindex)
30{
31 int i, j;
32
33 for (i = 0; i < *count; i++) {
34 if (!vout->smsshado_virt_addr[i]) {
35 vout->smsshado_virt_addr[i] =
36 omap_vout_alloc_buffer(vout->smsshado_size,
37 &vout->smsshado_phy_addr[i]);
38 }
39 if (!vout->smsshado_virt_addr[i] && startindex != -1) {
40 if (V4L2_MEMORY_MMAP == vout->memory && i >= startindex)
41 break;
42 }
43 if (!vout->smsshado_virt_addr[i]) {
44 for (j = 0; j < i; j++) {
45 omap_vout_free_buffer(
46 vout->smsshado_virt_addr[j],
47 vout->smsshado_size);
48 vout->smsshado_virt_addr[j] = 0;
49 vout->smsshado_phy_addr[j] = 0;
50 }
51 *count = 0;
52 return -ENOMEM;
53 }
54 memset((void *) vout->smsshado_virt_addr[i], 0,
55 vout->smsshado_size);
56 }
57 return 0;
58}
59
60/*
61 * Wakes up the application once the DMA transfer to VRFB space is completed.
62 */
63static void omap_vout_vrfb_dma_tx_callback(int lch, u16 ch_status, void *data)
64{
65 struct vid_vrfb_dma *t = (struct vid_vrfb_dma *) data;
66
67 t->tx_status = 1;
68 wake_up_interruptible(&t->wait);
69}
70
71/*
72 * Free VRFB buffers
73 */
74void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout)
75{
76 int j;
77
78 for (j = 0; j < VRFB_NUM_BUFS; j++) {
79 omap_vout_free_buffer(vout->smsshado_virt_addr[j],
80 vout->smsshado_size);
81 vout->smsshado_virt_addr[j] = 0;
82 vout->smsshado_phy_addr[j] = 0;
83 }
84}
85
86int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
87 u32 static_vrfb_allocation)
88{
89 int ret = 0, i, j;
90 struct omap_vout_device *vout;
91 struct video_device *vfd;
92 int image_width, image_height;
93 int vrfb_num_bufs = VRFB_NUM_BUFS;
94 struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
95 struct omap2video_device *vid_dev =
96 container_of(v4l2_dev, struct omap2video_device, v4l2_dev);
97
98 vout = vid_dev->vouts[vid_num];
99 vfd = vout->vfd;
100
101 for (i = 0; i < VRFB_NUM_BUFS; i++) {
102 if (omap_vrfb_request_ctx(&vout->vrfb_context[i])) {
103 dev_info(&pdev->dev, ": VRFB allocation failed\n");
104 for (j = 0; j < i; j++)
105 omap_vrfb_release_ctx(&vout->vrfb_context[j]);
106 ret = -ENOMEM;
107 goto free_buffers;
108 }
109 }
110
111 /* Calculate VRFB memory size */
112 /* allocate for worst case size */
113 image_width = VID_MAX_WIDTH / TILE_SIZE;
114 if (VID_MAX_WIDTH % TILE_SIZE)
115 image_width++;
116
117 image_width = image_width * TILE_SIZE;
118 image_height = VID_MAX_HEIGHT / TILE_SIZE;
119
120 if (VID_MAX_HEIGHT % TILE_SIZE)
121 image_height++;
122
123 image_height = image_height * TILE_SIZE;
124 vout->smsshado_size = PAGE_ALIGN(image_width * image_height * 2 * 2);
125
126 /*
127 * Request and Initialize DMA, for DMA based VRFB transfer
128 */
129 vout->vrfb_dma_tx.dev_id = OMAP_DMA_NO_DEVICE;
130 vout->vrfb_dma_tx.dma_ch = -1;
131 vout->vrfb_dma_tx.req_status = DMA_CHAN_ALLOTED;
132 ret = omap_request_dma(vout->vrfb_dma_tx.dev_id, "VRFB DMA TX",
133 omap_vout_vrfb_dma_tx_callback,
134 (void *) &vout->vrfb_dma_tx, &vout->vrfb_dma_tx.dma_ch);
135 if (ret < 0) {
136 vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
137 dev_info(&pdev->dev, ": failed to allocate DMA Channel for"
138 " video%d\n", vfd->minor);
139 }
140 init_waitqueue_head(&vout->vrfb_dma_tx.wait);
141
142 /* statically allocated the VRFB buffer is done through
143 commands line aruments */
144 if (static_vrfb_allocation) {
145 if (omap_vout_allocate_vrfb_buffers(vout, &vrfb_num_bufs, -1)) {
146 ret = -ENOMEM;
147 goto release_vrfb_ctx;
148 }
149 vout->vrfb_static_allocation = 1;
150 }
151 return 0;
152
153release_vrfb_ctx:
154 for (j = 0; j < VRFB_NUM_BUFS; j++)
155 omap_vrfb_release_ctx(&vout->vrfb_context[j]);
156free_buffers:
157 omap_vout_free_buffers(vout);
158
159 return ret;
160}
161
162/*
163 * Release the VRFB context once the module exits
164 */
165void omap_vout_release_vrfb(struct omap_vout_device *vout)
166{
167 int i;
168
169 for (i = 0; i < VRFB_NUM_BUFS; i++)
170 omap_vrfb_release_ctx(&vout->vrfb_context[i]);
171
172 if (vout->vrfb_dma_tx.req_status == DMA_CHAN_ALLOTED) {
173 vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
174 omap_free_dma(vout->vrfb_dma_tx.dma_ch);
175 }
176}
177
178/*
179 * Allocate the buffers for the VRFB space. Data is copied from V4L2
180 * buffers to the VRFB buffers using the DMA engine.
181 */
182int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
183 unsigned int *count, unsigned int startindex)
184{
185 int i;
186 bool yuv_mode;
187
188 if (!is_rotation_enabled(vout))
189 return 0;
190
191 /* If rotation is enabled, allocate memory for VRFB space also */
192 *count = *count > VRFB_NUM_BUFS ? VRFB_NUM_BUFS : *count;
193
194 /* Allocate the VRFB buffers only if the buffers are not
195 * allocated during init time.
196 */
197 if (!vout->vrfb_static_allocation)
198 if (omap_vout_allocate_vrfb_buffers(vout, count, startindex))
199 return -ENOMEM;
200
201 if (vout->dss_mode == OMAP_DSS_COLOR_YUV2 ||
202 vout->dss_mode == OMAP_DSS_COLOR_UYVY)
203 yuv_mode = true;
204 else
205 yuv_mode = false;
206
207 for (i = 0; i < *count; i++)
208 omap_vrfb_setup(&vout->vrfb_context[i],
209 vout->smsshado_phy_addr[i], vout->pix.width,
210 vout->pix.height, vout->bpp, yuv_mode);
211
212 return 0;
213}
214
215int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
216 struct videobuf_buffer *vb)
217{
218 dma_addr_t dmabuf;
219 struct vid_vrfb_dma *tx;
220 enum dss_rotation rotation;
221 u32 dest_frame_index = 0, src_element_index = 0;
222 u32 dest_element_index = 0, src_frame_index = 0;
223 u32 elem_count = 0, frame_count = 0, pixsize = 2;
224
225 if (!is_rotation_enabled(vout))
226 return 0;
227
228 dmabuf = vout->buf_phy_addr[vb->i];
229 /* If rotation is enabled, copy input buffer into VRFB
230 * memory space using DMA. We are copying input buffer
231 * into VRFB memory space of desired angle and DSS will
232 * read image VRFB memory for 0 degree angle
233 */
234 pixsize = vout->bpp * vout->vrfb_bpp;
235 /*
236 * DMA transfer in double index mode
237 */
238
239 /* Frame index */
240 dest_frame_index = ((MAX_PIXELS_PER_LINE * pixsize) -
241 (vout->pix.width * vout->bpp)) + 1;
242
243 /* Source and destination parameters */
244 src_element_index = 0;
245 src_frame_index = 0;
246 dest_element_index = 1;
247 /* Number of elements per frame */
248 elem_count = vout->pix.width * vout->bpp;
249 frame_count = vout->pix.height;
250 tx = &vout->vrfb_dma_tx;
251 tx->tx_status = 0;
252 omap_set_dma_transfer_params(tx->dma_ch, OMAP_DMA_DATA_TYPE_S32,
253 (elem_count / 4), frame_count, OMAP_DMA_SYNC_ELEMENT,
254 tx->dev_id, 0x0);
255 /* src_port required only for OMAP1 */
256 omap_set_dma_src_params(tx->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
257 dmabuf, src_element_index, src_frame_index);
258 /*set dma source burst mode for VRFB */
259 omap_set_dma_src_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
260 rotation = calc_rotation(vout);
261
262 /* dest_port required only for OMAP1 */
263 omap_set_dma_dest_params(tx->dma_ch, 0, OMAP_DMA_AMODE_DOUBLE_IDX,
264 vout->vrfb_context[vb->i].paddr[0], dest_element_index,
265 dest_frame_index);
266 /*set dma dest burst mode for VRFB */
267 omap_set_dma_dest_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
268 omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, 0x20, 0);
269
270 omap_start_dma(tx->dma_ch);
271 interruptible_sleep_on_timeout(&tx->wait, VRFB_TX_TIMEOUT);
272
273 if (tx->tx_status == 0) {
274 omap_stop_dma(tx->dma_ch);
275 return -EINVAL;
276 }
277 /* Store buffers physical address into an array. Addresses
278 * from this array will be used to configure DSS */
279 vout->queued_buf_addr[vb->i] = (u8 *)
280 vout->vrfb_context[vb->i].paddr[rotation];
281 return 0;
282}
283
284/*
285 * Calculate the buffer offsets from which the streaming should
286 * start. This offset calculation is mainly required because of
287 * the VRFB 32 pixels alignment with rotation.
288 */
289void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout)
290{
291 enum dss_rotation rotation;
292 bool mirroring = vout->mirror;
293 struct v4l2_rect *crop = &vout->crop;
294 struct v4l2_pix_format *pix = &vout->pix;
295 int *cropped_offset = &vout->cropped_offset;
296 int vr_ps = 1, ps = 2, temp_ps = 2;
297 int offset = 0, ctop = 0, cleft = 0, line_length = 0;
298
299 rotation = calc_rotation(vout);
300
301 if (V4L2_PIX_FMT_YUYV == pix->pixelformat ||
302 V4L2_PIX_FMT_UYVY == pix->pixelformat) {
303 if (is_rotation_enabled(vout)) {
304 /*
305 * ps - Actual pixel size for YUYV/UYVY for
306 * VRFB/Mirroring is 4 bytes
307 * vr_ps - Virtually pixel size for YUYV/UYVY is
308 * 2 bytes
309 */
310 ps = 4;
311 vr_ps = 2;
312 } else {
313 ps = 2; /* otherwise the pixel size is 2 byte */
314 }
315 } else if (V4L2_PIX_FMT_RGB32 == pix->pixelformat) {
316 ps = 4;
317 } else if (V4L2_PIX_FMT_RGB24 == pix->pixelformat) {
318 ps = 3;
319 }
320 vout->ps = ps;
321 vout->vr_ps = vr_ps;
322
323 if (is_rotation_enabled(vout)) {
324 line_length = MAX_PIXELS_PER_LINE;
325 ctop = (pix->height - crop->height) - crop->top;
326 cleft = (pix->width - crop->width) - crop->left;
327 } else {
328 line_length = pix->width;
329 }
330 vout->line_length = line_length;
331 switch (rotation) {
332 case dss_rotation_90_degree:
333 offset = vout->vrfb_context[0].yoffset *
334 vout->vrfb_context[0].bytespp;
335 temp_ps = ps / vr_ps;
336 if (mirroring == 0) {
337 *cropped_offset = offset + line_length *
338 temp_ps * cleft + crop->top * temp_ps;
339 } else {
340 *cropped_offset = offset + line_length * temp_ps *
341 cleft + crop->top * temp_ps + (line_length *
342 ((crop->width / (vr_ps)) - 1) * ps);
343 }
344 break;
345 case dss_rotation_180_degree:
346 offset = ((MAX_PIXELS_PER_LINE * vout->vrfb_context[0].yoffset *
347 vout->vrfb_context[0].bytespp) +
348 (vout->vrfb_context[0].xoffset *
349 vout->vrfb_context[0].bytespp));
350 if (mirroring == 0) {
351 *cropped_offset = offset + (line_length * ps * ctop) +
352 (cleft / vr_ps) * ps;
353
354 } else {
355 *cropped_offset = offset + (line_length * ps * ctop) +
356 (cleft / vr_ps) * ps + (line_length *
357 (crop->height - 1) * ps);
358 }
359 break;
360 case dss_rotation_270_degree:
361 offset = MAX_PIXELS_PER_LINE * vout->vrfb_context[0].xoffset *
362 vout->vrfb_context[0].bytespp;
363 temp_ps = ps / vr_ps;
364 if (mirroring == 0) {
365 *cropped_offset = offset + line_length *
366 temp_ps * crop->left + ctop * ps;
367 } else {
368 *cropped_offset = offset + line_length *
369 temp_ps * crop->left + ctop * ps +
370 (line_length * ((crop->width / vr_ps) - 1) *
371 ps);
372 }
373 break;
374 case dss_rotation_0_degree:
375 if (mirroring == 0) {
376 *cropped_offset = (line_length * ps) *
377 crop->top + (crop->left / vr_ps) * ps;
378 } else {
379 *cropped_offset = (line_length * ps) *
380 crop->top + (crop->left / vr_ps) * ps +
381 (line_length * (crop->height - 1) * ps);
382 }
383 break;
384 default:
385 *cropped_offset = (line_length * ps * crop->top) /
386 vr_ps + (crop->left * ps) / vr_ps +
387 ((crop->width / vr_ps) - 1) * ps;
388 break;
389 }
390}
diff --git a/drivers/media/video/omap/omap_vout_vrfb.h b/drivers/media/video/omap/omap_vout_vrfb.h
new file mode 100644
index 000000000000..ffde741e0590
--- /dev/null
+++ b/drivers/media/video/omap/omap_vout_vrfb.h
@@ -0,0 +1,40 @@
1/*
2 * omap_vout_vrfb.h
3 *
4 * Copyright (C) 2010 Texas Instruments.
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 *
10 */
11
12#ifndef OMAP_VOUT_VRFB_H
13#define OMAP_VOUT_VRFB_H
14
15#ifdef CONFIG_VIDEO_OMAP2_VOUT_VRFB
16void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout);
17int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
18 u32 static_vrfb_allocation);
19void omap_vout_release_vrfb(struct omap_vout_device *vout);
20int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
21 unsigned int *count, unsigned int startindex);
22int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
23 struct videobuf_buffer *vb);
24void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout);
25#else
26void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout) { }
27int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
28 u32 static_vrfb_allocation)
29 { return 0; }
30void omap_vout_release_vrfb(struct omap_vout_device *vout) { }
31int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
32 unsigned int *count, unsigned int startindex)
33 { return 0; }
34int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
35 struct videobuf_buffer *vb)
36 { return 0; }
37void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout) { }
38#endif
39
40#endif
diff --git a/drivers/media/video/omap/omap_voutdef.h b/drivers/media/video/omap/omap_voutdef.h
index 659497b84996..d793501cafcc 100644
--- a/drivers/media/video/omap/omap_voutdef.h
+++ b/drivers/media/video/omap/omap_voutdef.h
@@ -12,6 +12,7 @@
12#define OMAP_VOUTDEF_H 12#define OMAP_VOUTDEF_H
13 13
14#include <video/omapdss.h> 14#include <video/omapdss.h>
15#include <plat/vrfb.h>
15 16
16#define YUYV_BPP 2 17#define YUYV_BPP 2
17#define RGB565_BPP 2 18#define RGB565_BPP 2
@@ -27,6 +28,31 @@
27#define MAX_DISPLAYS 3 28#define MAX_DISPLAYS 3
28#define MAX_MANAGERS 3 29#define MAX_MANAGERS 3
29 30
31#define QQVGA_WIDTH 160
32#define QQVGA_HEIGHT 120
33
34/* Max Resolution supported by the driver */
35#define VID_MAX_WIDTH 1280 /* Largest width */
36#define VID_MAX_HEIGHT 720 /* Largest height */
37
38/* Mimimum requirement is 2x2 for DSS */
39#define VID_MIN_WIDTH 2
40#define VID_MIN_HEIGHT 2
41
42/* 2048 x 2048 is max res supported by OMAP display controller */
43#define MAX_PIXELS_PER_LINE 2048
44
45#define VRFB_TX_TIMEOUT 1000
46#define VRFB_NUM_BUFS 4
47
48/* Max buffer size tobe allocated during init */
49#define OMAP_VOUT_MAX_BUF_SIZE (VID_MAX_WIDTH*VID_MAX_HEIGHT*4)
50
51enum dma_channel_state {
52 DMA_CHAN_NOT_ALLOTED,
53 DMA_CHAN_ALLOTED,
54};
55
30/* Enum for Rotation 56/* Enum for Rotation
31 * DSS understands rotation in 0, 1, 2, 3 context 57 * DSS understands rotation in 0, 1, 2, 3 context
32 * while V4L2 driver understands it as 0, 90, 180, 270 58 * while V4L2 driver understands it as 0, 90, 180, 270
@@ -37,6 +63,18 @@ enum dss_rotation {
37 dss_rotation_180_degree = 2, 63 dss_rotation_180_degree = 2,
38 dss_rotation_270_degree = 3, 64 dss_rotation_270_degree = 3,
39}; 65};
66
67/* Enum for choosing rotation type for vout
68 * DSS2 doesn't understand no rotation as an
69 * option while V4L2 driver doesn't support
70 * rotation in the case where VRFB is not built in
71 * the kernel
72 */
73enum vout_rotaion_type {
74 VOUT_ROT_NONE = 0,
75 VOUT_ROT_VRFB = 1,
76};
77
40/* 78/*
41 * This structure is used to store the DMA transfer parameters 79 * This structure is used to store the DMA transfer parameters
42 * for VRFB hidden buffer 80 * for VRFB hidden buffer
@@ -53,6 +91,7 @@ struct omapvideo_info {
53 int id; 91 int id;
54 int num_overlays; 92 int num_overlays;
55 struct omap_overlay *overlays[MAX_OVLS]; 93 struct omap_overlay *overlays[MAX_OVLS];
94 enum vout_rotaion_type rotation_type;
56}; 95};
57 96
58struct omap2video_device { 97struct omap2video_device {
@@ -144,4 +183,43 @@ struct omap_vout_device {
144 int io_allowed; 183 int io_allowed;
145 184
146}; 185};
186
187/*
188 * Return true if rotation is 90 or 270
189 */
190static inline int is_rotation_90_or_270(const struct omap_vout_device *vout)
191{
192 return (vout->rotation == dss_rotation_90_degree ||
193 vout->rotation == dss_rotation_270_degree);
194}
195
196/*
197 * Return true if rotation is enabled
198 */
199static inline int is_rotation_enabled(const struct omap_vout_device *vout)
200{
201 return vout->rotation || vout->mirror;
202}
203
204/*
205 * Reverse the rotation degree if mirroring is enabled
206 */
207static inline int calc_rotation(const struct omap_vout_device *vout)
208{
209 if (!vout->mirror)
210 return vout->rotation;
211
212 switch (vout->rotation) {
213 case dss_rotation_90_degree:
214 return dss_rotation_270_degree;
215 case dss_rotation_270_degree:
216 return dss_rotation_90_degree;
217 case dss_rotation_180_degree:
218 return dss_rotation_0_degree;
219 default:
220 return dss_rotation_180_degree;
221 }
222}
223
224void omap_vout_free_buffers(struct omap_vout_device *vout);
147#endif /* ifndef OMAP_VOUTDEF_H */ 225#endif /* ifndef OMAP_VOUTDEF_H */
diff --git a/drivers/media/video/omap/omap_voutlib.c b/drivers/media/video/omap/omap_voutlib.c
index 8ae74817a110..115408b9274f 100644
--- a/drivers/media/video/omap/omap_voutlib.c
+++ b/drivers/media/video/omap/omap_voutlib.c
@@ -24,8 +24,12 @@
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/videodev2.h> 25#include <linux/videodev2.h>
26 26
27#include <linux/dma-mapping.h>
28
27#include <plat/cpu.h> 29#include <plat/cpu.h>
28 30
31#include "omap_voutlib.h"
32
29MODULE_AUTHOR("Texas Instruments"); 33MODULE_AUTHOR("Texas Instruments");
30MODULE_DESCRIPTION("OMAP Video library"); 34MODULE_DESCRIPTION("OMAP Video library");
31MODULE_LICENSE("GPL"); 35MODULE_LICENSE("GPL");
@@ -291,3 +295,45 @@ void omap_vout_new_format(struct v4l2_pix_format *pix,
291} 295}
292EXPORT_SYMBOL_GPL(omap_vout_new_format); 296EXPORT_SYMBOL_GPL(omap_vout_new_format);
293 297
298/*
299 * Allocate buffers
300 */
301unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr)
302{
303 u32 order, size;
304 unsigned long virt_addr, addr;
305
306 size = PAGE_ALIGN(buf_size);
307 order = get_order(size);
308 virt_addr = __get_free_pages(GFP_KERNEL, order);
309 addr = virt_addr;
310
311 if (virt_addr) {
312 while (size > 0) {
313 SetPageReserved(virt_to_page(addr));
314 addr += PAGE_SIZE;
315 size -= PAGE_SIZE;
316 }
317 }
318 *phys_addr = (u32) virt_to_phys((void *) virt_addr);
319 return virt_addr;
320}
321
322/*
323 * Free buffers
324 */
325void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size)
326{
327 u32 order, size;
328 unsigned long addr = virtaddr;
329
330 size = PAGE_ALIGN(buf_size);
331 order = get_order(size);
332
333 while (size > 0) {
334 ClearPageReserved(virt_to_page(addr));
335 addr += PAGE_SIZE;
336 size -= PAGE_SIZE;
337 }
338 free_pages((unsigned long) virtaddr, order);
339}
diff --git a/drivers/media/video/omap/omap_voutlib.h b/drivers/media/video/omap/omap_voutlib.h
index a60b16e8bfc3..e51750a597e3 100644
--- a/drivers/media/video/omap/omap_voutlib.h
+++ b/drivers/media/video/omap/omap_voutlib.h
@@ -12,23 +12,25 @@
12#ifndef OMAP_VOUTLIB_H 12#ifndef OMAP_VOUTLIB_H
13#define OMAP_VOUTLIB_H 13#define OMAP_VOUTLIB_H
14 14
15extern void omap_vout_default_crop(struct v4l2_pix_format *pix, 15void omap_vout_default_crop(struct v4l2_pix_format *pix,
16 struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop); 16 struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop);
17 17
18extern int omap_vout_new_crop(struct v4l2_pix_format *pix, 18int omap_vout_new_crop(struct v4l2_pix_format *pix,
19 struct v4l2_rect *crop, struct v4l2_window *win, 19 struct v4l2_rect *crop, struct v4l2_window *win,
20 struct v4l2_framebuffer *fbuf, 20 struct v4l2_framebuffer *fbuf,
21 const struct v4l2_rect *new_crop); 21 const struct v4l2_rect *new_crop);
22 22
23extern int omap_vout_try_window(struct v4l2_framebuffer *fbuf, 23int omap_vout_try_window(struct v4l2_framebuffer *fbuf,
24 struct v4l2_window *new_win); 24 struct v4l2_window *new_win);
25 25
26extern int omap_vout_new_window(struct v4l2_rect *crop, 26int omap_vout_new_window(struct v4l2_rect *crop,
27 struct v4l2_window *win, struct v4l2_framebuffer *fbuf, 27 struct v4l2_window *win, struct v4l2_framebuffer *fbuf,
28 struct v4l2_window *new_win); 28 struct v4l2_window *new_win);
29 29
30extern void omap_vout_new_format(struct v4l2_pix_format *pix, 30void omap_vout_new_format(struct v4l2_pix_format *pix,
31 struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop, 31 struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop,
32 struct v4l2_window *win); 32 struct v4l2_window *win);
33unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr);
34void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size);
33#endif /* #ifndef OMAP_VOUTLIB_H */ 35#endif /* #ifndef OMAP_VOUTLIB_H */
34 36
diff --git a/drivers/media/video/omap1_camera.c b/drivers/media/video/omap1_camera.c
index e7cfc85b0a1c..8a947e603aca 100644
--- a/drivers/media/video/omap1_camera.c
+++ b/drivers/media/video/omap1_camera.c
@@ -26,7 +26,6 @@
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/version.h>
30 29
31#include <media/omap1_camera.h> 30#include <media/omap1_camera.h>
32#include <media/soc_camera.h> 31#include <media/soc_camera.h>
@@ -38,7 +37,7 @@
38 37
39 38
40#define DRIVER_NAME "omap1-camera" 39#define DRIVER_NAME "omap1-camera"
41#define VERSION_CODE KERNEL_VERSION(0, 0, 1) 40#define DRIVER_VERSION "0.0.2"
42 41
43 42
44/* 43/*
@@ -208,7 +207,7 @@ static int omap1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
208 struct soc_camera_device *icd = vq->priv_data; 207 struct soc_camera_device *icd = vq->priv_data;
209 int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, 208 int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
210 icd->current_fmt->host_fmt); 209 icd->current_fmt->host_fmt);
211 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 210 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
212 struct omap1_cam_dev *pcdev = ici->priv; 211 struct omap1_cam_dev *pcdev = ici->priv;
213 212
214 if (bytes_per_line < 0) 213 if (bytes_per_line < 0)
@@ -222,7 +221,7 @@ static int omap1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
222 if (*size * *count > MAX_VIDEO_MEM * 1024 * 1024) 221 if (*size * *count > MAX_VIDEO_MEM * 1024 * 1024)
223 *count = (MAX_VIDEO_MEM * 1024 * 1024) / *size; 222 *count = (MAX_VIDEO_MEM * 1024 * 1024) / *size;
224 223
225 dev_dbg(icd->dev.parent, 224 dev_dbg(icd->parent,
226 "%s: count=%d, size=%d\n", __func__, *count, *size); 225 "%s: count=%d, size=%d\n", __func__, *count, *size);
227 226
228 return 0; 227 return 0;
@@ -241,7 +240,7 @@ static void free_buffer(struct videobuf_queue *vq, struct omap1_cam_buf *buf,
241 videobuf_dma_contig_free(vq, vb); 240 videobuf_dma_contig_free(vq, vb);
242 } else { 241 } else {
243 struct soc_camera_device *icd = vq->priv_data; 242 struct soc_camera_device *icd = vq->priv_data;
244 struct device *dev = icd->dev.parent; 243 struct device *dev = icd->parent;
245 struct videobuf_dmabuf *dma = videobuf_to_dma(vb); 244 struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
246 245
247 videobuf_dma_unmap(dev, dma); 246 videobuf_dma_unmap(dev, dma);
@@ -258,7 +257,7 @@ static int omap1_videobuf_prepare(struct videobuf_queue *vq,
258 struct omap1_cam_buf *buf = container_of(vb, struct omap1_cam_buf, vb); 257 struct omap1_cam_buf *buf = container_of(vb, struct omap1_cam_buf, vb);
259 int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, 258 int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
260 icd->current_fmt->host_fmt); 259 icd->current_fmt->host_fmt);
261 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 260 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
262 struct omap1_cam_dev *pcdev = ici->priv; 261 struct omap1_cam_dev *pcdev = ici->priv;
263 int ret; 262 int ret;
264 263
@@ -490,7 +489,7 @@ static void omap1_videobuf_queue(struct videobuf_queue *vq,
490 struct videobuf_buffer *vb) 489 struct videobuf_buffer *vb)
491{ 490{
492 struct soc_camera_device *icd = vq->priv_data; 491 struct soc_camera_device *icd = vq->priv_data;
493 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 492 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
494 struct omap1_cam_dev *pcdev = ici->priv; 493 struct omap1_cam_dev *pcdev = ici->priv;
495 struct omap1_cam_buf *buf; 494 struct omap1_cam_buf *buf;
496 u32 mode; 495 u32 mode;
@@ -519,7 +518,7 @@ static void omap1_videobuf_queue(struct videobuf_queue *vq,
519 pcdev->active = buf; 518 pcdev->active = buf;
520 pcdev->ready = NULL; 519 pcdev->ready = NULL;
521 520
522 dev_dbg(icd->dev.parent, 521 dev_dbg(icd->parent,
523 "%s: capture not active, setup FIFO, start DMA\n", __func__); 522 "%s: capture not active, setup FIFO, start DMA\n", __func__);
524 mode = CAM_READ_CACHE(pcdev, MODE) & ~THRESHOLD_MASK; 523 mode = CAM_READ_CACHE(pcdev, MODE) & ~THRESHOLD_MASK;
525 mode |= THRESHOLD_LEVEL(pcdev->vb_mode) << THRESHOLD_SHIFT; 524 mode |= THRESHOLD_LEVEL(pcdev->vb_mode) << THRESHOLD_SHIFT;
@@ -543,8 +542,8 @@ static void omap1_videobuf_release(struct videobuf_queue *vq,
543 struct omap1_cam_buf *buf = 542 struct omap1_cam_buf *buf =
544 container_of(vb, struct omap1_cam_buf, vb); 543 container_of(vb, struct omap1_cam_buf, vb);
545 struct soc_camera_device *icd = vq->priv_data; 544 struct soc_camera_device *icd = vq->priv_data;
546 struct device *dev = icd->dev.parent; 545 struct device *dev = icd->parent;
547 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 546 struct soc_camera_host *ici = to_soc_camera_host(dev);
548 struct omap1_cam_dev *pcdev = ici->priv; 547 struct omap1_cam_dev *pcdev = ici->priv;
549 548
550 switch (vb->state) { 549 switch (vb->state) {
@@ -573,7 +572,7 @@ static void videobuf_done(struct omap1_cam_dev *pcdev,
573{ 572{
574 struct omap1_cam_buf *buf = pcdev->active; 573 struct omap1_cam_buf *buf = pcdev->active;
575 struct videobuf_buffer *vb; 574 struct videobuf_buffer *vb;
576 struct device *dev = pcdev->icd->dev.parent; 575 struct device *dev = pcdev->icd->parent;
577 576
578 if (WARN_ON(!buf)) { 577 if (WARN_ON(!buf)) {
579 suspend_capture(pcdev); 578 suspend_capture(pcdev);
@@ -799,7 +798,7 @@ out:
799static irqreturn_t cam_isr(int irq, void *data) 798static irqreturn_t cam_isr(int irq, void *data)
800{ 799{
801 struct omap1_cam_dev *pcdev = data; 800 struct omap1_cam_dev *pcdev = data;
802 struct device *dev = pcdev->icd->dev.parent; 801 struct device *dev = pcdev->icd->parent;
803 struct omap1_cam_buf *buf = pcdev->active; 802 struct omap1_cam_buf *buf = pcdev->active;
804 u32 it_status; 803 u32 it_status;
805 unsigned long flags; 804 unsigned long flags;
@@ -909,7 +908,7 @@ static void sensor_reset(struct omap1_cam_dev *pcdev, bool reset)
909 */ 908 */
910static int omap1_cam_add_device(struct soc_camera_device *icd) 909static int omap1_cam_add_device(struct soc_camera_device *icd)
911{ 910{
912 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 911 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
913 struct omap1_cam_dev *pcdev = ici->priv; 912 struct omap1_cam_dev *pcdev = ici->priv;
914 u32 ctrlclock; 913 u32 ctrlclock;
915 914
@@ -952,14 +951,14 @@ static int omap1_cam_add_device(struct soc_camera_device *icd)
952 951
953 pcdev->icd = icd; 952 pcdev->icd = icd;
954 953
955 dev_dbg(icd->dev.parent, "OMAP1 Camera driver attached to camera %d\n", 954 dev_dbg(icd->parent, "OMAP1 Camera driver attached to camera %d\n",
956 icd->devnum); 955 icd->devnum);
957 return 0; 956 return 0;
958} 957}
959 958
960static void omap1_cam_remove_device(struct soc_camera_device *icd) 959static void omap1_cam_remove_device(struct soc_camera_device *icd)
961{ 960{
962 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 961 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
963 struct omap1_cam_dev *pcdev = ici->priv; 962 struct omap1_cam_dev *pcdev = ici->priv;
964 u32 ctrlclock; 963 u32 ctrlclock;
965 964
@@ -985,7 +984,7 @@ static void omap1_cam_remove_device(struct soc_camera_device *icd)
985 984
986 pcdev->icd = NULL; 985 pcdev->icd = NULL;
987 986
988 dev_dbg(icd->dev.parent, 987 dev_dbg(icd->parent,
989 "OMAP1 Camera driver detached from camera %d\n", icd->devnum); 988 "OMAP1 Camera driver detached from camera %d\n", icd->devnum);
990} 989}
991 990
@@ -1070,7 +1069,7 @@ static int omap1_cam_get_formats(struct soc_camera_device *icd,
1070 unsigned int idx, struct soc_camera_format_xlate *xlate) 1069 unsigned int idx, struct soc_camera_format_xlate *xlate)
1071{ 1070{
1072 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 1071 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
1073 struct device *dev = icd->dev.parent; 1072 struct device *dev = icd->parent;
1074 int formats = 0, ret; 1073 int formats = 0, ret;
1075 enum v4l2_mbus_pixelcode code; 1074 enum v4l2_mbus_pixelcode code;
1076 const struct soc_mbus_pixelfmt *fmt; 1075 const struct soc_mbus_pixelfmt *fmt;
@@ -1222,9 +1221,9 @@ static int omap1_cam_set_crop(struct soc_camera_device *icd,
1222 struct v4l2_rect *rect = &crop->c; 1221 struct v4l2_rect *rect = &crop->c;
1223 const struct soc_camera_format_xlate *xlate = icd->current_fmt; 1222 const struct soc_camera_format_xlate *xlate = icd->current_fmt;
1224 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 1223 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
1225 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 1224 struct device *dev = icd->parent;
1225 struct soc_camera_host *ici = to_soc_camera_host(dev);
1226 struct omap1_cam_dev *pcdev = ici->priv; 1226 struct omap1_cam_dev *pcdev = ici->priv;
1227 struct device *dev = icd->dev.parent;
1228 struct v4l2_mbus_framefmt mf; 1227 struct v4l2_mbus_framefmt mf;
1229 int ret; 1228 int ret;
1230 1229
@@ -1270,8 +1269,8 @@ static int omap1_cam_set_fmt(struct soc_camera_device *icd,
1270{ 1269{
1271 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 1270 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
1272 const struct soc_camera_format_xlate *xlate; 1271 const struct soc_camera_format_xlate *xlate;
1273 struct device *dev = icd->dev.parent; 1272 struct device *dev = icd->parent;
1274 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 1273 struct soc_camera_host *ici = to_soc_camera_host(dev);
1275 struct omap1_cam_dev *pcdev = ici->priv; 1274 struct omap1_cam_dev *pcdev = ici->priv;
1276 struct v4l2_pix_format *pix = &f->fmt.pix; 1275 struct v4l2_pix_format *pix = &f->fmt.pix;
1277 struct v4l2_mbus_framefmt mf; 1276 struct v4l2_mbus_framefmt mf;
@@ -1326,7 +1325,7 @@ static int omap1_cam_try_fmt(struct soc_camera_device *icd,
1326 1325
1327 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); 1326 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
1328 if (!xlate) { 1327 if (!xlate) {
1329 dev_warn(icd->dev.parent, "Format %#x not found\n", 1328 dev_warn(icd->parent, "Format %#x not found\n",
1330 pix->pixelformat); 1329 pix->pixelformat);
1331 return -EINVAL; 1330 return -EINVAL;
1332 } 1331 }
@@ -1362,7 +1361,7 @@ static int omap1_cam_mmap_mapper(struct videobuf_queue *q,
1362 struct vm_area_struct *vma) 1361 struct vm_area_struct *vma)
1363{ 1362{
1364 struct soc_camera_device *icd = q->priv_data; 1363 struct soc_camera_device *icd = q->priv_data;
1365 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 1364 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
1366 struct omap1_cam_dev *pcdev = ici->priv; 1365 struct omap1_cam_dev *pcdev = ici->priv;
1367 int ret; 1366 int ret;
1368 1367
@@ -1377,17 +1376,17 @@ static int omap1_cam_mmap_mapper(struct videobuf_queue *q,
1377static void omap1_cam_init_videobuf(struct videobuf_queue *q, 1376static void omap1_cam_init_videobuf(struct videobuf_queue *q,
1378 struct soc_camera_device *icd) 1377 struct soc_camera_device *icd)
1379{ 1378{
1380 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 1379 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
1381 struct omap1_cam_dev *pcdev = ici->priv; 1380 struct omap1_cam_dev *pcdev = ici->priv;
1382 1381
1383 if (!sg_mode) 1382 if (!sg_mode)
1384 videobuf_queue_dma_contig_init(q, &omap1_videobuf_ops, 1383 videobuf_queue_dma_contig_init(q, &omap1_videobuf_ops,
1385 icd->dev.parent, &pcdev->lock, 1384 icd->parent, &pcdev->lock,
1386 V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE, 1385 V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
1387 sizeof(struct omap1_cam_buf), icd, &icd->video_lock); 1386 sizeof(struct omap1_cam_buf), icd, &icd->video_lock);
1388 else 1387 else
1389 videobuf_queue_sg_init(q, &omap1_videobuf_ops, 1388 videobuf_queue_sg_init(q, &omap1_videobuf_ops,
1390 icd->dev.parent, &pcdev->lock, 1389 icd->parent, &pcdev->lock,
1391 V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE, 1390 V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
1392 sizeof(struct omap1_cam_buf), icd, &icd->video_lock); 1391 sizeof(struct omap1_cam_buf), icd, &icd->video_lock);
1393 1392
@@ -1431,7 +1430,6 @@ static int omap1_cam_querycap(struct soc_camera_host *ici,
1431{ 1430{
1432 /* cap->name is set by the friendly caller:-> */ 1431 /* cap->name is set by the friendly caller:-> */
1433 strlcpy(cap->card, "OMAP1 Camera", sizeof(cap->card)); 1432 strlcpy(cap->card, "OMAP1 Camera", sizeof(cap->card));
1434 cap->version = VERSION_CODE;
1435 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 1433 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
1436 1434
1437 return 0; 1435 return 0;
@@ -1440,9 +1438,9 @@ static int omap1_cam_querycap(struct soc_camera_host *ici,
1440static int omap1_cam_set_bus_param(struct soc_camera_device *icd, 1438static int omap1_cam_set_bus_param(struct soc_camera_device *icd,
1441 __u32 pixfmt) 1439 __u32 pixfmt)
1442{ 1440{
1443 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 1441 struct device *dev = icd->parent;
1442 struct soc_camera_host *ici = to_soc_camera_host(dev);
1444 struct omap1_cam_dev *pcdev = ici->priv; 1443 struct omap1_cam_dev *pcdev = ici->priv;
1445 struct device *dev = icd->dev.parent;
1446 const struct soc_camera_format_xlate *xlate; 1444 const struct soc_camera_format_xlate *xlate;
1447 const struct soc_mbus_pixelfmt *fmt; 1445 const struct soc_mbus_pixelfmt *fmt;
1448 unsigned long camera_flags, common_flags; 1446 unsigned long camera_flags, common_flags;
@@ -1718,4 +1716,5 @@ MODULE_PARM_DESC(sg_mode, "videobuf mode, 0: dma-contig (default), 1: dma-sg");
1718MODULE_DESCRIPTION("OMAP1 Camera Interface driver"); 1716MODULE_DESCRIPTION("OMAP1 Camera Interface driver");
1719MODULE_AUTHOR("Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>"); 1717MODULE_AUTHOR("Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>");
1720MODULE_LICENSE("GPL v2"); 1718MODULE_LICENSE("GPL v2");
1719MODULE_LICENSE(DRIVER_VERSION);
1721MODULE_ALIAS("platform:" DRIVER_NAME); 1720MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
index 69b60ba5dd7a..eb97bff7116f 100644
--- a/drivers/media/video/omap24xxcam.c
+++ b/drivers/media/video/omap24xxcam.c
@@ -31,7 +31,6 @@
31#include <linux/interrupt.h> 31#include <linux/interrupt.h>
32#include <linux/videodev2.h> 32#include <linux/videodev2.h>
33#include <linux/pci.h> /* needed for videobufs */ 33#include <linux/pci.h> /* needed for videobufs */
34#include <linux/version.h>
35#include <linux/platform_device.h> 34#include <linux/platform_device.h>
36#include <linux/clk.h> 35#include <linux/clk.h>
37#include <linux/io.h> 36#include <linux/io.h>
@@ -43,7 +42,7 @@
43 42
44#include "omap24xxcam.h" 43#include "omap24xxcam.h"
45 44
46#define OMAP24XXCAM_VERSION KERNEL_VERSION(0, 0, 0) 45#define OMAP24XXCAM_VERSION "0.0.1"
47 46
48#define RESET_TIMEOUT_NS 10000 47#define RESET_TIMEOUT_NS 10000
49 48
@@ -309,11 +308,11 @@ static int omap24xxcam_vbq_alloc_mmap_buffer(struct videobuf_buffer *vb)
309 order--; 308 order--;
310 309
311 /* try to allocate as many contiguous pages as possible */ 310 /* try to allocate as many contiguous pages as possible */
312 page = alloc_pages(GFP_KERNEL | GFP_DMA, order); 311 page = alloc_pages(GFP_KERNEL, order);
313 /* if allocation fails, try to allocate smaller amount */ 312 /* if allocation fails, try to allocate smaller amount */
314 while (page == NULL) { 313 while (page == NULL) {
315 order--; 314 order--;
316 page = alloc_pages(GFP_KERNEL | GFP_DMA, order); 315 page = alloc_pages(GFP_KERNEL, order);
317 if (page == NULL && !order) { 316 if (page == NULL && !order) {
318 err = -ENOMEM; 317 err = -ENOMEM;
319 goto out; 318 goto out;
@@ -993,7 +992,6 @@ static int vidioc_querycap(struct file *file, void *fh,
993 992
994 strlcpy(cap->driver, CAM_NAME, sizeof(cap->driver)); 993 strlcpy(cap->driver, CAM_NAME, sizeof(cap->driver));
995 strlcpy(cap->card, cam->vfd->name, sizeof(cap->card)); 994 strlcpy(cap->card, cam->vfd->name, sizeof(cap->card));
996 cap->version = OMAP24XXCAM_VERSION;
997 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 995 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
998 996
999 return 0; 997 return 0;
@@ -1888,6 +1886,7 @@ static void __exit omap24xxcam_cleanup(void)
1888MODULE_AUTHOR("Sakari Ailus <sakari.ailus@nokia.com>"); 1886MODULE_AUTHOR("Sakari Ailus <sakari.ailus@nokia.com>");
1889MODULE_DESCRIPTION("OMAP24xx Video for Linux camera driver"); 1887MODULE_DESCRIPTION("OMAP24xx Video for Linux camera driver");
1890MODULE_LICENSE("GPL"); 1888MODULE_LICENSE("GPL");
1889MODULE_VERSION(OMAP24XXCAM_VERSION);
1891module_param(video_nr, int, 0); 1890module_param(video_nr, int, 0);
1892MODULE_PARM_DESC(video_nr, 1891MODULE_PARM_DESC(video_nr,
1893 "Minor number for video device (-1 ==> auto assign)"); 1892 "Minor number for video device (-1 ==> auto assign)");
diff --git a/drivers/media/video/omap3isp/isp.c b/drivers/media/video/omap3isp/isp.c
index 94b6ed89e195..5cea2bbd7014 100644
--- a/drivers/media/video/omap3isp/isp.c
+++ b/drivers/media/video/omap3isp/isp.c
@@ -2234,3 +2234,4 @@ module_exit(isp_cleanup);
2234MODULE_AUTHOR("Nokia Corporation"); 2234MODULE_AUTHOR("Nokia Corporation");
2235MODULE_DESCRIPTION("TI OMAP3 ISP driver"); 2235MODULE_DESCRIPTION("TI OMAP3 ISP driver");
2236MODULE_LICENSE("GPL"); 2236MODULE_LICENSE("GPL");
2237MODULE_VERSION(ISP_VIDEO_DRIVER_VERSION);
diff --git a/drivers/media/video/omap3isp/isp.h b/drivers/media/video/omap3isp/isp.h
index 2620c405f5e4..529e582ef948 100644
--- a/drivers/media/video/omap3isp/isp.h
+++ b/drivers/media/video/omap3isp/isp.h
@@ -139,6 +139,10 @@ struct isp_reg {
139 * 3 - CAMEXT[13:6] -> CAM[7:0] 139 * 3 - CAMEXT[13:6] -> CAM[7:0]
140 * @clk_pol: Pixel clock polarity 140 * @clk_pol: Pixel clock polarity
141 * 0 - Non Inverted, 1 - Inverted 141 * 0 - Non Inverted, 1 - Inverted
142 * @hs_pol: Horizontal synchronization polarity
143 * 0 - Active high, 1 - Active low
144 * @vs_pol: Vertical synchronization polarity
145 * 0 - Active high, 1 - Active low
142 * @bridge: CCDC Bridge input control 146 * @bridge: CCDC Bridge input control
143 * ISPCTRL_PAR_BRIDGE_DISABLE - Disable 147 * ISPCTRL_PAR_BRIDGE_DISABLE - Disable
144 * ISPCTRL_PAR_BRIDGE_LENDIAN - Little endian 148 * ISPCTRL_PAR_BRIDGE_LENDIAN - Little endian
@@ -147,6 +151,8 @@ struct isp_reg {
147struct isp_parallel_platform_data { 151struct isp_parallel_platform_data {
148 unsigned int data_lane_shift:2; 152 unsigned int data_lane_shift:2;
149 unsigned int clk_pol:1; 153 unsigned int clk_pol:1;
154 unsigned int hs_pol:1;
155 unsigned int vs_pol:1;
150 unsigned int bridge:4; 156 unsigned int bridge:4;
151}; 157};
152 158
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c
index 39d501bda636..9d3459de04b2 100644
--- a/drivers/media/video/omap3isp/ispccdc.c
+++ b/drivers/media/video/omap3isp/ispccdc.c
@@ -1148,6 +1148,8 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
1148 omap3isp_configure_bridge(isp, ccdc->input, pdata, shift); 1148 omap3isp_configure_bridge(isp, ccdc->input, pdata, shift);
1149 1149
1150 ccdc->syncif.datsz = depth_out; 1150 ccdc->syncif.datsz = depth_out;
1151 ccdc->syncif.hdpol = pdata ? pdata->hs_pol : 0;
1152 ccdc->syncif.vdpol = pdata ? pdata->vs_pol : 0;
1151 ccdc_config_sync_if(ccdc, &ccdc->syncif); 1153 ccdc_config_sync_if(ccdc, &ccdc->syncif);
1152 1154
1153 /* CCDC_PAD_SINK */ 1155 /* CCDC_PAD_SINK */
@@ -1691,7 +1693,7 @@ static int ccdc_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
1691 if (sub->type != V4L2_EVENT_OMAP3ISP_HS_VS) 1693 if (sub->type != V4L2_EVENT_OMAP3ISP_HS_VS)
1692 return -EINVAL; 1694 return -EINVAL;
1693 1695
1694 return v4l2_event_subscribe(fh, sub); 1696 return v4l2_event_subscribe(fh, sub, OMAP3ISP_CCDC_NEVENTS);
1695} 1697}
1696 1698
1697static int ccdc_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, 1699static int ccdc_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
@@ -2162,7 +2164,6 @@ static int ccdc_init_entities(struct isp_ccdc_device *ccdc)
2162 sd->grp_id = 1 << 16; /* group ID for isp subdevs */ 2164 sd->grp_id = 1 << 16; /* group ID for isp subdevs */
2163 v4l2_set_subdevdata(sd, ccdc); 2165 v4l2_set_subdevdata(sd, ccdc);
2164 sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE; 2166 sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
2165 sd->nevents = OMAP3ISP_CCDC_NEVENTS;
2166 2167
2167 pads[CCDC_PAD_SINK].flags = MEDIA_PAD_FL_SINK; 2168 pads[CCDC_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
2168 pads[CCDC_PAD_SOURCE_VP].flags = MEDIA_PAD_FL_SOURCE; 2169 pads[CCDC_PAD_SOURCE_VP].flags = MEDIA_PAD_FL_SOURCE;
@@ -2257,8 +2258,6 @@ int omap3isp_ccdc_init(struct isp_device *isp)
2257 ccdc->syncif.fldout = 0; 2258 ccdc->syncif.fldout = 0;
2258 ccdc->syncif.fldpol = 0; 2259 ccdc->syncif.fldpol = 0;
2259 ccdc->syncif.fldstat = 0; 2260 ccdc->syncif.fldstat = 0;
2260 ccdc->syncif.hdpol = 0;
2261 ccdc->syncif.vdpol = 0;
2262 2261
2263 ccdc->clamp.oblen = 0; 2262 ccdc->clamp.oblen = 0;
2264 ccdc->clamp.dcsubval = 0; 2263 ccdc->clamp.dcsubval = 0;
diff --git a/drivers/media/video/omap3isp/ispccp2.c b/drivers/media/video/omap3isp/ispccp2.c
index 0e16cab8e089..ec9e395f3339 100644
--- a/drivers/media/video/omap3isp/ispccp2.c
+++ b/drivers/media/video/omap3isp/ispccp2.c
@@ -30,6 +30,7 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/uaccess.h> 32#include <linux/uaccess.h>
33#include <linux/regulator/consumer.h>
33 34
34#include "isp.h" 35#include "isp.h"
35#include "ispreg.h" 36#include "ispreg.h"
@@ -163,6 +164,9 @@ static void ccp2_if_enable(struct isp_ccp2_device *ccp2, u8 enable)
163 struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity); 164 struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity);
164 int i; 165 int i;
165 166
167 if (enable && ccp2->vdds_csib)
168 regulator_enable(ccp2->vdds_csib);
169
166 /* Enable/Disable all the LCx channels */ 170 /* Enable/Disable all the LCx channels */
167 for (i = 0; i < CCP2_LCx_CHANS_NUM; i++) 171 for (i = 0; i < CCP2_LCx_CHANS_NUM; i++)
168 isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_CTRL(i), 172 isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_CTRL(i),
@@ -186,6 +190,9 @@ static void ccp2_if_enable(struct isp_ccp2_device *ccp2, u8 enable)
186 ISPCCP2_LC01_IRQENABLE, 190 ISPCCP2_LC01_IRQENABLE,
187 ISPCCP2_LC01_IRQSTATUS_LC0_FS_IRQ); 191 ISPCCP2_LC01_IRQSTATUS_LC0_FS_IRQ);
188 } 192 }
193
194 if (!enable && ccp2->vdds_csib)
195 regulator_disable(ccp2->vdds_csib);
189} 196}
190 197
191/* 198/*
@@ -1137,6 +1144,9 @@ error:
1137 */ 1144 */
1138void omap3isp_ccp2_cleanup(struct isp_device *isp) 1145void omap3isp_ccp2_cleanup(struct isp_device *isp)
1139{ 1146{
1147 struct isp_ccp2_device *ccp2 = &isp->isp_ccp2;
1148
1149 regulator_put(ccp2->vdds_csib);
1140} 1150}
1141 1151
1142/* 1152/*
@@ -1151,14 +1161,27 @@ int omap3isp_ccp2_init(struct isp_device *isp)
1151 1161
1152 init_waitqueue_head(&ccp2->wait); 1162 init_waitqueue_head(&ccp2->wait);
1153 1163
1154 /* On the OMAP36xx, the CCP2 uses the CSI PHY1 or PHY2, shared with 1164 /*
1165 * On the OMAP34xx the CSI1 receiver is operated in the CSIb IO
1166 * complex, which is powered by vdds_csib power rail. Hence the
1167 * request for the regulator.
1168 *
1169 * On the OMAP36xx, the CCP2 uses the CSI PHY1 or PHY2, shared with
1155 * the CSI2c or CSI2a receivers. The PHY then needs to be explicitly 1170 * the CSI2c or CSI2a receivers. The PHY then needs to be explicitly
1156 * configured. 1171 * configured.
1157 * 1172 *
1158 * TODO: Don't hardcode the usage of PHY1 (shared with CSI2c). 1173 * TODO: Don't hardcode the usage of PHY1 (shared with CSI2c).
1159 */ 1174 */
1160 if (isp->revision == ISP_REVISION_15_0) 1175 if (isp->revision == ISP_REVISION_2_0) {
1176 ccp2->vdds_csib = regulator_get(isp->dev, "vdds_csib");
1177 if (IS_ERR(ccp2->vdds_csib)) {
1178 dev_dbg(isp->dev,
1179 "Could not get regulator vdds_csib\n");
1180 ccp2->vdds_csib = NULL;
1181 }
1182 } else if (isp->revision == ISP_REVISION_15_0) {
1161 ccp2->phy = &isp->isp_csiphy1; 1183 ccp2->phy = &isp->isp_csiphy1;
1184 }
1162 1185
1163 ret = ccp2_init_entities(ccp2); 1186 ret = ccp2_init_entities(ccp2);
1164 if (ret < 0) 1187 if (ret < 0)
diff --git a/drivers/media/video/omap3isp/ispccp2.h b/drivers/media/video/omap3isp/ispccp2.h
index 5505a86a9a74..6674e9de2cd7 100644
--- a/drivers/media/video/omap3isp/ispccp2.h
+++ b/drivers/media/video/omap3isp/ispccp2.h
@@ -81,6 +81,7 @@ struct isp_ccp2_device {
81 struct isp_interface_mem_config mem_cfg; 81 struct isp_interface_mem_config mem_cfg;
82 struct isp_video video_in; 82 struct isp_video video_in;
83 struct isp_csiphy *phy; 83 struct isp_csiphy *phy;
84 struct regulator *vdds_csib;
84 unsigned int error; 85 unsigned int error;
85 enum isp_pipeline_stream_state state; 86 enum isp_pipeline_stream_state state;
86 wait_queue_head_t wait; 87 wait_queue_head_t wait;
diff --git a/drivers/media/video/omap3isp/ispstat.c b/drivers/media/video/omap3isp/ispstat.c
index b44cb685236a..808065948ac1 100644
--- a/drivers/media/video/omap3isp/ispstat.c
+++ b/drivers/media/video/omap3isp/ispstat.c
@@ -1032,7 +1032,6 @@ static int isp_stat_init_entities(struct ispstat *stat, const char *name,
1032 snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "OMAP3 ISP %s", name); 1032 snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "OMAP3 ISP %s", name);
1033 subdev->grp_id = 1 << 16; /* group ID for isp subdevs */ 1033 subdev->grp_id = 1 << 16; /* group ID for isp subdevs */
1034 subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE; 1034 subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
1035 subdev->nevents = STAT_NEVENTS;
1036 v4l2_set_subdevdata(subdev, stat); 1035 v4l2_set_subdevdata(subdev, stat);
1037 1036
1038 stat->pad.flags = MEDIA_PAD_FL_SINK; 1037 stat->pad.flags = MEDIA_PAD_FL_SINK;
@@ -1050,7 +1049,7 @@ int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev,
1050 if (sub->type != stat->event_type) 1049 if (sub->type != stat->event_type)
1051 return -EINVAL; 1050 return -EINVAL;
1052 1051
1053 return v4l2_event_subscribe(fh, sub); 1052 return v4l2_event_subscribe(fh, sub, STAT_NEVENTS);
1054} 1053}
1055 1054
1056int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev, 1055int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev,
diff --git a/drivers/media/video/omap3isp/ispvideo.c b/drivers/media/video/omap3isp/ispvideo.c
index 9cd8f1aa567b..fd965adfd597 100644
--- a/drivers/media/video/omap3isp/ispvideo.c
+++ b/drivers/media/video/omap3isp/ispvideo.c
@@ -695,7 +695,6 @@ isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
695 strlcpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver)); 695 strlcpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver));
696 strlcpy(cap->card, video->video.name, sizeof(cap->card)); 696 strlcpy(cap->card, video->video.name, sizeof(cap->card));
697 strlcpy(cap->bus_info, "media", sizeof(cap->bus_info)); 697 strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
698 cap->version = ISP_VIDEO_DRIVER_VERSION;
699 698
700 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 699 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
701 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 700 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
diff --git a/drivers/media/video/omap3isp/ispvideo.h b/drivers/media/video/omap3isp/ispvideo.h
index 911bea64e78a..53160aa24e6e 100644
--- a/drivers/media/video/omap3isp/ispvideo.h
+++ b/drivers/media/video/omap3isp/ispvideo.h
@@ -27,7 +27,6 @@
27#define OMAP3_ISP_VIDEO_H 27#define OMAP3_ISP_VIDEO_H
28 28
29#include <linux/v4l2-mediabus.h> 29#include <linux/v4l2-mediabus.h>
30#include <linux/version.h>
31#include <media/media-entity.h> 30#include <media/media-entity.h>
32#include <media/v4l2-dev.h> 31#include <media/v4l2-dev.h>
33#include <media/v4l2-fh.h> 32#include <media/v4l2-fh.h>
@@ -35,7 +34,7 @@
35#include "ispqueue.h" 34#include "ispqueue.h"
36 35
37#define ISP_VIDEO_DRIVER_NAME "ispvideo" 36#define ISP_VIDEO_DRIVER_NAME "ispvideo"
38#define ISP_VIDEO_DRIVER_VERSION KERNEL_VERSION(0, 0, 1) 37#define ISP_VIDEO_DRIVER_VERSION "0.0.2"
39 38
40struct isp_device; 39struct isp_device;
41struct isp_video; 40struct isp_video;
diff --git a/drivers/media/video/ov2640.c b/drivers/media/video/ov2640.c
index 0cea0cf36679..9ce2fa037b94 100644
--- a/drivers/media/video/ov2640.c
+++ b/drivers/media/video/ov2640.c
@@ -1031,16 +1031,9 @@ static int ov2640_video_probe(struct soc_camera_device *icd,
1031 const char *devname; 1031 const char *devname;
1032 int ret; 1032 int ret;
1033 1033
1034 /* 1034 /* We must have a parent by now. And it cannot be a wrong one. */
1035 * we must have a parent by now. And it cannot be a wrong one. 1035 BUG_ON(!icd->parent ||
1036 * So this entire test is completely redundant. 1036 to_soc_camera_host(icd->parent)->nr != icd->iface);
1037 */
1038 if (!icd->dev.parent ||
1039 to_soc_camera_host(icd->dev.parent)->nr != icd->iface) {
1040 dev_err(&client->dev, "Parent missing or invalid!\n");
1041 ret = -ENODEV;
1042 goto err;
1043 }
1044 1037
1045 /* 1038 /*
1046 * check and show product ID and manufacturer ID 1039 * check and show product ID and manufacturer ID
diff --git a/drivers/media/video/ov5642.c b/drivers/media/video/ov5642.c
new file mode 100644
index 000000000000..349a4ad3ccc1
--- /dev/null
+++ b/drivers/media/video/ov5642.c
@@ -0,0 +1,1012 @@
1/*
2 * Driver for OV5642 CMOS Image Sensor from Omnivision
3 *
4 * Copyright (C) 2011, Bastian Hecht <hechtb@gmail.com>
5 *
6 * Based on Sony IMX074 Camera Driver
7 * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
8 *
9 * Based on Omnivision OV7670 Camera Driver
10 * Copyright (C) 2006-7 Jonathan Corbet <corbet@lwn.net>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#include <linux/delay.h>
18#include <linux/i2c.h>
19#include <linux/slab.h>
20#include <linux/videodev2.h>
21#include <linux/module.h>
22
23#include <media/soc_camera.h>
24#include <media/soc_mediabus.h>
25#include <media/v4l2-chip-ident.h>
26#include <media/v4l2-subdev.h>
27
28/* OV5642 registers */
29#define REG_CHIP_ID_HIGH 0x300a
30#define REG_CHIP_ID_LOW 0x300b
31
32#define REG_WINDOW_START_X_HIGH 0x3800
33#define REG_WINDOW_START_X_LOW 0x3801
34#define REG_WINDOW_START_Y_HIGH 0x3802
35#define REG_WINDOW_START_Y_LOW 0x3803
36#define REG_WINDOW_WIDTH_HIGH 0x3804
37#define REG_WINDOW_WIDTH_LOW 0x3805
38#define REG_WINDOW_HEIGHT_HIGH 0x3806
39#define REG_WINDOW_HEIGHT_LOW 0x3807
40#define REG_OUT_WIDTH_HIGH 0x3808
41#define REG_OUT_WIDTH_LOW 0x3809
42#define REG_OUT_HEIGHT_HIGH 0x380a
43#define REG_OUT_HEIGHT_LOW 0x380b
44#define REG_OUT_TOTAL_WIDTH_HIGH 0x380c
45#define REG_OUT_TOTAL_WIDTH_LOW 0x380d
46#define REG_OUT_TOTAL_HEIGHT_HIGH 0x380e
47#define REG_OUT_TOTAL_HEIGHT_LOW 0x380f
48
49/*
50 * define standard resolution.
51 * Works currently only for up to 720 lines
52 * eg. 320x240, 640x480, 800x600, 1280x720, 2048x720
53 */
54
55#define OV5642_WIDTH 1280
56#define OV5642_HEIGHT 720
57#define OV5642_TOTAL_WIDTH 3200
58#define OV5642_TOTAL_HEIGHT 2000
59#define OV5642_SENSOR_SIZE_X 2592
60#define OV5642_SENSOR_SIZE_Y 1944
61
62struct regval_list {
63 u16 reg_num;
64 u8 value;
65};
66
67static struct regval_list ov5642_default_regs_init[] = {
68 { 0x3103, 0x93 },
69 { 0x3008, 0x82 },
70 { 0x3017, 0x7f },
71 { 0x3018, 0xfc },
72 { 0x3810, 0xc2 },
73 { 0x3615, 0xf0 },
74 { 0x3000, 0x0 },
75 { 0x3001, 0x0 },
76 { 0x3002, 0x0 },
77 { 0x3003, 0x0 },
78 { 0x3004, 0xff },
79 { 0x3030, 0x2b },
80 { 0x3011, 0x8 },
81 { 0x3010, 0x10 },
82 { 0x3604, 0x60 },
83 { 0x3622, 0x60 },
84 { 0x3621, 0x9 },
85 { 0x3709, 0x0 },
86 { 0x4000, 0x21 },
87 { 0x401d, 0x22 },
88 { 0x3600, 0x54 },
89 { 0x3605, 0x4 },
90 { 0x3606, 0x3f },
91 { 0x3c01, 0x80 },
92 { 0x300d, 0x22 },
93 { 0x3623, 0x22 },
94 { 0x5000, 0x4f },
95 { 0x5020, 0x4 },
96 { 0x5181, 0x79 },
97 { 0x5182, 0x0 },
98 { 0x5185, 0x22 },
99 { 0x5197, 0x1 },
100 { 0x5500, 0xa },
101 { 0x5504, 0x0 },
102 { 0x5505, 0x7f },
103 { 0x5080, 0x8 },
104 { 0x300e, 0x18 },
105 { 0x4610, 0x0 },
106 { 0x471d, 0x5 },
107 { 0x4708, 0x6 },
108 { 0x370c, 0xa0 },
109 { 0x5687, 0x94 },
110 { 0x501f, 0x0 },
111 { 0x5000, 0x4f },
112 { 0x5001, 0xcf },
113 { 0x4300, 0x30 },
114 { 0x4300, 0x30 },
115 { 0x460b, 0x35 },
116 { 0x471d, 0x0 },
117 { 0x3002, 0xc },
118 { 0x3002, 0x0 },
119 { 0x4713, 0x3 },
120 { 0x471c, 0x50 },
121 { 0x4721, 0x2 },
122 { 0x4402, 0x90 },
123 { 0x460c, 0x22 },
124 { 0x3815, 0x44 },
125 { 0x3503, 0x7 },
126 { 0x3501, 0x73 },
127 { 0x3502, 0x80 },
128 { 0x350b, 0x0 },
129 { 0x3818, 0xc8 },
130 { 0x3824, 0x11 },
131 { 0x3a00, 0x78 },
132 { 0x3a1a, 0x4 },
133 { 0x3a13, 0x30 },
134 { 0x3a18, 0x0 },
135 { 0x3a19, 0x7c },
136 { 0x3a08, 0x12 },
137 { 0x3a09, 0xc0 },
138 { 0x3a0a, 0xf },
139 { 0x3a0b, 0xa0 },
140 { 0x350c, 0x7 },
141 { 0x350d, 0xd0 },
142 { 0x3a0d, 0x8 },
143 { 0x3a0e, 0x6 },
144 { 0x3500, 0x0 },
145 { 0x3501, 0x0 },
146 { 0x3502, 0x0 },
147 { 0x350a, 0x0 },
148 { 0x350b, 0x0 },
149 { 0x3503, 0x0 },
150 { 0x3a0f, 0x3c },
151 { 0x3a10, 0x32 },
152 { 0x3a1b, 0x3c },
153 { 0x3a1e, 0x32 },
154 { 0x3a11, 0x80 },
155 { 0x3a1f, 0x20 },
156 { 0x3030, 0x2b },
157 { 0x3a02, 0x0 },
158 { 0x3a03, 0x7d },
159 { 0x3a04, 0x0 },
160 { 0x3a14, 0x0 },
161 { 0x3a15, 0x7d },
162 { 0x3a16, 0x0 },
163 { 0x3a00, 0x78 },
164 { 0x3a08, 0x9 },
165 { 0x3a09, 0x60 },
166 { 0x3a0a, 0x7 },
167 { 0x3a0b, 0xd0 },
168 { 0x3a0d, 0x10 },
169 { 0x3a0e, 0xd },
170 { 0x4407, 0x4 },
171 { 0x5193, 0x70 },
172 { 0x589b, 0x0 },
173 { 0x589a, 0xc0 },
174 { 0x401e, 0x20 },
175 { 0x4001, 0x42 },
176 { 0x401c, 0x6 },
177 { 0x3825, 0xac },
178 { 0x3827, 0xc },
179 { 0x528a, 0x1 },
180 { 0x528b, 0x4 },
181 { 0x528c, 0x8 },
182 { 0x528d, 0x10 },
183 { 0x528e, 0x20 },
184 { 0x528f, 0x28 },
185 { 0x5290, 0x30 },
186 { 0x5292, 0x0 },
187 { 0x5293, 0x1 },
188 { 0x5294, 0x0 },
189 { 0x5295, 0x4 },
190 { 0x5296, 0x0 },
191 { 0x5297, 0x8 },
192 { 0x5298, 0x0 },
193 { 0x5299, 0x10 },
194 { 0x529a, 0x0 },
195 { 0x529b, 0x20 },
196 { 0x529c, 0x0 },
197 { 0x529d, 0x28 },
198 { 0x529e, 0x0 },
199 { 0x529f, 0x30 },
200 { 0x5282, 0x0 },
201 { 0x5300, 0x0 },
202 { 0x5301, 0x20 },
203 { 0x5302, 0x0 },
204 { 0x5303, 0x7c },
205 { 0x530c, 0x0 },
206 { 0x530d, 0xc },
207 { 0x530e, 0x20 },
208 { 0x530f, 0x80 },
209 { 0x5310, 0x20 },
210 { 0x5311, 0x80 },
211 { 0x5308, 0x20 },
212 { 0x5309, 0x40 },
213 { 0x5304, 0x0 },
214 { 0x5305, 0x30 },
215 { 0x5306, 0x0 },
216 { 0x5307, 0x80 },
217 { 0x5314, 0x8 },
218 { 0x5315, 0x20 },
219 { 0x5319, 0x30 },
220 { 0x5316, 0x10 },
221 { 0x5317, 0x0 },
222 { 0x5318, 0x2 },
223 { 0x5380, 0x1 },
224 { 0x5381, 0x0 },
225 { 0x5382, 0x0 },
226 { 0x5383, 0x4e },
227 { 0x5384, 0x0 },
228 { 0x5385, 0xf },
229 { 0x5386, 0x0 },
230 { 0x5387, 0x0 },
231 { 0x5388, 0x1 },
232 { 0x5389, 0x15 },
233 { 0x538a, 0x0 },
234 { 0x538b, 0x31 },
235 { 0x538c, 0x0 },
236 { 0x538d, 0x0 },
237 { 0x538e, 0x0 },
238 { 0x538f, 0xf },
239 { 0x5390, 0x0 },
240 { 0x5391, 0xab },
241 { 0x5392, 0x0 },
242 { 0x5393, 0xa2 },
243 { 0x5394, 0x8 },
244 { 0x5480, 0x14 },
245 { 0x5481, 0x21 },
246 { 0x5482, 0x36 },
247 { 0x5483, 0x57 },
248 { 0x5484, 0x65 },
249 { 0x5485, 0x71 },
250 { 0x5486, 0x7d },
251 { 0x5487, 0x87 },
252 { 0x5488, 0x91 },
253 { 0x5489, 0x9a },
254 { 0x548a, 0xaa },
255 { 0x548b, 0xb8 },
256 { 0x548c, 0xcd },
257 { 0x548d, 0xdd },
258 { 0x548e, 0xea },
259 { 0x548f, 0x1d },
260 { 0x5490, 0x5 },
261 { 0x5491, 0x0 },
262 { 0x5492, 0x4 },
263 { 0x5493, 0x20 },
264 { 0x5494, 0x3 },
265 { 0x5495, 0x60 },
266 { 0x5496, 0x2 },
267 { 0x5497, 0xb8 },
268 { 0x5498, 0x2 },
269 { 0x5499, 0x86 },
270 { 0x549a, 0x2 },
271 { 0x549b, 0x5b },
272 { 0x549c, 0x2 },
273 { 0x549d, 0x3b },
274 { 0x549e, 0x2 },
275 { 0x549f, 0x1c },
276 { 0x54a0, 0x2 },
277 { 0x54a1, 0x4 },
278 { 0x54a2, 0x1 },
279 { 0x54a3, 0xed },
280 { 0x54a4, 0x1 },
281 { 0x54a5, 0xc5 },
282 { 0x54a6, 0x1 },
283 { 0x54a7, 0xa5 },
284 { 0x54a8, 0x1 },
285 { 0x54a9, 0x6c },
286 { 0x54aa, 0x1 },
287 { 0x54ab, 0x41 },
288 { 0x54ac, 0x1 },
289 { 0x54ad, 0x20 },
290 { 0x54ae, 0x0 },
291 { 0x54af, 0x16 },
292 { 0x54b0, 0x1 },
293 { 0x54b1, 0x20 },
294 { 0x54b2, 0x0 },
295 { 0x54b3, 0x10 },
296 { 0x54b4, 0x0 },
297 { 0x54b5, 0xf0 },
298 { 0x54b6, 0x0 },
299 { 0x54b7, 0xdf },
300 { 0x5402, 0x3f },
301 { 0x5403, 0x0 },
302 { 0x3406, 0x0 },
303 { 0x5180, 0xff },
304 { 0x5181, 0x52 },
305 { 0x5182, 0x11 },
306 { 0x5183, 0x14 },
307 { 0x5184, 0x25 },
308 { 0x5185, 0x24 },
309 { 0x5186, 0x6 },
310 { 0x5187, 0x8 },
311 { 0x5188, 0x8 },
312 { 0x5189, 0x7c },
313 { 0x518a, 0x60 },
314 { 0x518b, 0xb2 },
315 { 0x518c, 0xb2 },
316 { 0x518d, 0x44 },
317 { 0x518e, 0x3d },
318 { 0x518f, 0x58 },
319 { 0x5190, 0x46 },
320 { 0x5191, 0xf8 },
321 { 0x5192, 0x4 },
322 { 0x5193, 0x70 },
323 { 0x5194, 0xf0 },
324 { 0x5195, 0xf0 },
325 { 0x5196, 0x3 },
326 { 0x5197, 0x1 },
327 { 0x5198, 0x4 },
328 { 0x5199, 0x12 },
329 { 0x519a, 0x4 },
330 { 0x519b, 0x0 },
331 { 0x519c, 0x6 },
332 { 0x519d, 0x82 },
333 { 0x519e, 0x0 },
334 { 0x5025, 0x80 },
335 { 0x3a0f, 0x38 },
336 { 0x3a10, 0x30 },
337 { 0x3a1b, 0x3a },
338 { 0x3a1e, 0x2e },
339 { 0x3a11, 0x60 },
340 { 0x3a1f, 0x10 },
341 { 0x5688, 0xa6 },
342 { 0x5689, 0x6a },
343 { 0x568a, 0xea },
344 { 0x568b, 0xae },
345 { 0x568c, 0xa6 },
346 { 0x568d, 0x6a },
347 { 0x568e, 0x62 },
348 { 0x568f, 0x26 },
349 { 0x5583, 0x40 },
350 { 0x5584, 0x40 },
351 { 0x5580, 0x2 },
352 { 0x5000, 0xcf },
353 { 0x5800, 0x27 },
354 { 0x5801, 0x19 },
355 { 0x5802, 0x12 },
356 { 0x5803, 0xf },
357 { 0x5804, 0x10 },
358 { 0x5805, 0x15 },
359 { 0x5806, 0x1e },
360 { 0x5807, 0x2f },
361 { 0x5808, 0x15 },
362 { 0x5809, 0xd },
363 { 0x580a, 0xa },
364 { 0x580b, 0x9 },
365 { 0x580c, 0xa },
366 { 0x580d, 0xc },
367 { 0x580e, 0x12 },
368 { 0x580f, 0x19 },
369 { 0x5810, 0xb },
370 { 0x5811, 0x7 },
371 { 0x5812, 0x4 },
372 { 0x5813, 0x3 },
373 { 0x5814, 0x3 },
374 { 0x5815, 0x6 },
375 { 0x5816, 0xa },
376 { 0x5817, 0xf },
377 { 0x5818, 0xa },
378 { 0x5819, 0x5 },
379 { 0x581a, 0x1 },
380 { 0x581b, 0x0 },
381 { 0x581c, 0x0 },
382 { 0x581d, 0x3 },
383 { 0x581e, 0x8 },
384 { 0x581f, 0xc },
385 { 0x5820, 0xa },
386 { 0x5821, 0x5 },
387 { 0x5822, 0x1 },
388 { 0x5823, 0x0 },
389 { 0x5824, 0x0 },
390 { 0x5825, 0x3 },
391 { 0x5826, 0x8 },
392 { 0x5827, 0xc },
393 { 0x5828, 0xe },
394 { 0x5829, 0x8 },
395 { 0x582a, 0x6 },
396 { 0x582b, 0x4 },
397 { 0x582c, 0x5 },
398 { 0x582d, 0x7 },
399 { 0x582e, 0xb },
400 { 0x582f, 0x12 },
401 { 0x5830, 0x18 },
402 { 0x5831, 0x10 },
403 { 0x5832, 0xc },
404 { 0x5833, 0xa },
405 { 0x5834, 0xb },
406 { 0x5835, 0xe },
407 { 0x5836, 0x15 },
408 { 0x5837, 0x19 },
409 { 0x5838, 0x32 },
410 { 0x5839, 0x1f },
411 { 0x583a, 0x18 },
412 { 0x583b, 0x16 },
413 { 0x583c, 0x17 },
414 { 0x583d, 0x1e },
415 { 0x583e, 0x26 },
416 { 0x583f, 0x53 },
417 { 0x5840, 0x10 },
418 { 0x5841, 0xf },
419 { 0x5842, 0xd },
420 { 0x5843, 0xc },
421 { 0x5844, 0xe },
422 { 0x5845, 0x9 },
423 { 0x5846, 0x11 },
424 { 0x5847, 0x10 },
425 { 0x5848, 0x10 },
426 { 0x5849, 0x10 },
427 { 0x584a, 0x10 },
428 { 0x584b, 0xe },
429 { 0x584c, 0x10 },
430 { 0x584d, 0x10 },
431 { 0x584e, 0x11 },
432 { 0x584f, 0x10 },
433 { 0x5850, 0xf },
434 { 0x5851, 0xc },
435 { 0x5852, 0xf },
436 { 0x5853, 0x10 },
437 { 0x5854, 0x10 },
438 { 0x5855, 0xf },
439 { 0x5856, 0xe },
440 { 0x5857, 0xb },
441 { 0x5858, 0x10 },
442 { 0x5859, 0xd },
443 { 0x585a, 0xd },
444 { 0x585b, 0xc },
445 { 0x585c, 0xc },
446 { 0x585d, 0xc },
447 { 0x585e, 0xb },
448 { 0x585f, 0xc },
449 { 0x5860, 0xc },
450 { 0x5861, 0xc },
451 { 0x5862, 0xd },
452 { 0x5863, 0x8 },
453 { 0x5864, 0x11 },
454 { 0x5865, 0x18 },
455 { 0x5866, 0x18 },
456 { 0x5867, 0x19 },
457 { 0x5868, 0x17 },
458 { 0x5869, 0x19 },
459 { 0x586a, 0x16 },
460 { 0x586b, 0x13 },
461 { 0x586c, 0x13 },
462 { 0x586d, 0x12 },
463 { 0x586e, 0x13 },
464 { 0x586f, 0x16 },
465 { 0x5870, 0x14 },
466 { 0x5871, 0x12 },
467 { 0x5872, 0x10 },
468 { 0x5873, 0x11 },
469 { 0x5874, 0x11 },
470 { 0x5875, 0x16 },
471 { 0x5876, 0x14 },
472 { 0x5877, 0x11 },
473 { 0x5878, 0x10 },
474 { 0x5879, 0xf },
475 { 0x587a, 0x10 },
476 { 0x587b, 0x14 },
477 { 0x587c, 0x13 },
478 { 0x587d, 0x12 },
479 { 0x587e, 0x11 },
480 { 0x587f, 0x11 },
481 { 0x5880, 0x12 },
482 { 0x5881, 0x15 },
483 { 0x5882, 0x14 },
484 { 0x5883, 0x15 },
485 { 0x5884, 0x15 },
486 { 0x5885, 0x15 },
487 { 0x5886, 0x13 },
488 { 0x5887, 0x17 },
489 { 0x3710, 0x10 },
490 { 0x3632, 0x51 },
491 { 0x3702, 0x10 },
492 { 0x3703, 0xb2 },
493 { 0x3704, 0x18 },
494 { 0x370b, 0x40 },
495 { 0x370d, 0x3 },
496 { 0x3631, 0x1 },
497 { 0x3632, 0x52 },
498 { 0x3606, 0x24 },
499 { 0x3620, 0x96 },
500 { 0x5785, 0x7 },
501 { 0x3a13, 0x30 },
502 { 0x3600, 0x52 },
503 { 0x3604, 0x48 },
504 { 0x3606, 0x1b },
505 { 0x370d, 0xb },
506 { 0x370f, 0xc0 },
507 { 0x3709, 0x1 },
508 { 0x3823, 0x0 },
509 { 0x5007, 0x0 },
510 { 0x5009, 0x0 },
511 { 0x5011, 0x0 },
512 { 0x5013, 0x0 },
513 { 0x519e, 0x0 },
514 { 0x5086, 0x0 },
515 { 0x5087, 0x0 },
516 { 0x5088, 0x0 },
517 { 0x5089, 0x0 },
518 { 0x302b, 0x0 },
519 { 0x3503, 0x7 },
520 { 0x3011, 0x8 },
521 { 0x350c, 0x2 },
522 { 0x350d, 0xe4 },
523 { 0x3621, 0xc9 },
524 { 0x370a, 0x81 },
525 { 0xffff, 0xff },
526};
527
528static struct regval_list ov5642_default_regs_finalise[] = {
529 { 0x3810, 0xc2 },
530 { 0x3818, 0xc9 },
531 { 0x381c, 0x10 },
532 { 0x381d, 0xa0 },
533 { 0x381e, 0x5 },
534 { 0x381f, 0xb0 },
535 { 0x3820, 0x0 },
536 { 0x3821, 0x0 },
537 { 0x3824, 0x11 },
538 { 0x3a08, 0x1b },
539 { 0x3a09, 0xc0 },
540 { 0x3a0a, 0x17 },
541 { 0x3a0b, 0x20 },
542 { 0x3a0d, 0x2 },
543 { 0x3a0e, 0x1 },
544 { 0x401c, 0x4 },
545 { 0x5682, 0x5 },
546 { 0x5683, 0x0 },
547 { 0x5686, 0x2 },
548 { 0x5687, 0xcc },
549 { 0x5001, 0x4f },
550 { 0x589b, 0x6 },
551 { 0x589a, 0xc5 },
552 { 0x3503, 0x0 },
553 { 0x460c, 0x20 },
554 { 0x460b, 0x37 },
555 { 0x471c, 0xd0 },
556 { 0x471d, 0x5 },
557 { 0x3815, 0x1 },
558 { 0x3818, 0xc1 },
559 { 0x501f, 0x0 },
560 { 0x5002, 0xe0 },
561 { 0x4300, 0x32 }, /* UYVY */
562 { 0x3002, 0x1c },
563 { 0x4800, 0x14 },
564 { 0x4801, 0xf },
565 { 0x3007, 0x3b },
566 { 0x300e, 0x4 },
567 { 0x4803, 0x50 },
568 { 0x3815, 0x1 },
569 { 0x4713, 0x2 },
570 { 0x4842, 0x1 },
571 { 0x300f, 0xe },
572 { 0x3003, 0x3 },
573 { 0x3003, 0x1 },
574 { 0xffff, 0xff },
575};
576
577struct ov5642_datafmt {
578 enum v4l2_mbus_pixelcode code;
579 enum v4l2_colorspace colorspace;
580};
581
582struct ov5642 {
583 struct v4l2_subdev subdev;
584 const struct ov5642_datafmt *fmt;
585};
586
587static const struct ov5642_datafmt ov5642_colour_fmts[] = {
588 {V4L2_MBUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG},
589};
590
591static struct ov5642 *to_ov5642(const struct i2c_client *client)
592{
593 return container_of(i2c_get_clientdata(client), struct ov5642, subdev);
594}
595
596/* Find a data format by a pixel code in an array */
597static const struct ov5642_datafmt
598 *ov5642_find_datafmt(enum v4l2_mbus_pixelcode code)
599{
600 int i;
601
602 for (i = 0; i < ARRAY_SIZE(ov5642_colour_fmts); i++)
603 if (ov5642_colour_fmts[i].code == code)
604 return ov5642_colour_fmts + i;
605
606 return NULL;
607}
608
609static int reg_read(struct i2c_client *client, u16 reg, u8 *val)
610{
611 int ret;
612 /* We have 16-bit i2c addresses - care for endianess */
613 unsigned char data[2] = { reg >> 8, reg & 0xff };
614
615 ret = i2c_master_send(client, data, 2);
616 if (ret < 2) {
617 dev_err(&client->dev, "%s: i2c read error, reg: %x\n",
618 __func__, reg);
619 return ret < 0 ? ret : -EIO;
620 }
621
622 ret = i2c_master_recv(client, val, 1);
623 if (ret < 1) {
624 dev_err(&client->dev, "%s: i2c read error, reg: %x\n",
625 __func__, reg);
626 return ret < 0 ? ret : -EIO;
627 }
628 return 0;
629}
630
631static int reg_write(struct i2c_client *client, u16 reg, u8 val)
632{
633 int ret;
634 unsigned char data[3] = { reg >> 8, reg & 0xff, val };
635
636 ret = i2c_master_send(client, data, 3);
637 if (ret < 3) {
638 dev_err(&client->dev, "%s: i2c write error, reg: %x\n",
639 __func__, reg);
640 return ret < 0 ? ret : -EIO;
641 }
642
643 return 0;
644}
645#ifdef CONFIG_VIDEO_ADV_DEBUG
646static int ov5642_get_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
647{
648 struct i2c_client *client = v4l2_get_subdevdata(sd);
649 int ret;
650 u8 val;
651
652 if (reg->reg & ~0xffff)
653 return -EINVAL;
654
655 reg->size = 1;
656
657 ret = reg_read(client, reg->reg, &val);
658 if (!ret)
659 reg->val = (__u64)val;
660
661 return ret;
662}
663
664static int ov5642_set_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
665{
666 struct i2c_client *client = v4l2_get_subdevdata(sd);
667
668 if (reg->reg & ~0xffff || reg->val & ~0xff)
669 return -EINVAL;
670
671 return reg_write(client, reg->reg, reg->val);
672}
673#endif
674
675static int ov5642_write_array(struct i2c_client *client,
676 struct regval_list *vals)
677{
678 while (vals->reg_num != 0xffff || vals->value != 0xff) {
679 int ret = reg_write(client, vals->reg_num, vals->value);
680 if (ret < 0)
681 return ret;
682 vals++;
683 }
684 dev_dbg(&client->dev, "Register list loaded\n");
685 return 0;
686}
687
688static int ov5642_set_resolution(struct i2c_client *client)
689{
690 int ret;
691 u8 start_x_high = ((OV5642_SENSOR_SIZE_X - OV5642_WIDTH) / 2) >> 8;
692 u8 start_x_low = ((OV5642_SENSOR_SIZE_X - OV5642_WIDTH) / 2) & 0xff;
693 u8 start_y_high = ((OV5642_SENSOR_SIZE_Y - OV5642_HEIGHT) / 2) >> 8;
694 u8 start_y_low = ((OV5642_SENSOR_SIZE_Y - OV5642_HEIGHT) / 2) & 0xff;
695
696 u8 width_high = OV5642_WIDTH >> 8;
697 u8 width_low = OV5642_WIDTH & 0xff;
698 u8 height_high = OV5642_HEIGHT >> 8;
699 u8 height_low = OV5642_HEIGHT & 0xff;
700
701 u8 total_width_high = OV5642_TOTAL_WIDTH >> 8;
702 u8 total_width_low = OV5642_TOTAL_WIDTH & 0xff;
703 u8 total_height_high = OV5642_TOTAL_HEIGHT >> 8;
704 u8 total_height_low = OV5642_TOTAL_HEIGHT & 0xff;
705
706 ret = reg_write(client, REG_WINDOW_START_X_HIGH, start_x_high);
707 if (!ret)
708 ret = reg_write(client, REG_WINDOW_START_X_LOW, start_x_low);
709 if (!ret)
710 ret = reg_write(client, REG_WINDOW_START_Y_HIGH, start_y_high);
711 if (!ret)
712 ret = reg_write(client, REG_WINDOW_START_Y_LOW, start_y_low);
713
714 if (!ret)
715 ret = reg_write(client, REG_WINDOW_WIDTH_HIGH, width_high);
716 if (!ret)
717 ret = reg_write(client, REG_WINDOW_WIDTH_LOW , width_low);
718 if (!ret)
719 ret = reg_write(client, REG_WINDOW_HEIGHT_HIGH, height_high);
720 if (!ret)
721 ret = reg_write(client, REG_WINDOW_HEIGHT_LOW, height_low);
722
723 if (!ret)
724 ret = reg_write(client, REG_OUT_WIDTH_HIGH, width_high);
725 if (!ret)
726 ret = reg_write(client, REG_OUT_WIDTH_LOW , width_low);
727 if (!ret)
728 ret = reg_write(client, REG_OUT_HEIGHT_HIGH, height_high);
729 if (!ret)
730 ret = reg_write(client, REG_OUT_HEIGHT_LOW, height_low);
731
732 if (!ret)
733 ret = reg_write(client, REG_OUT_TOTAL_WIDTH_HIGH, total_width_high);
734 if (!ret)
735 ret = reg_write(client, REG_OUT_TOTAL_WIDTH_LOW, total_width_low);
736 if (!ret)
737 ret = reg_write(client, REG_OUT_TOTAL_HEIGHT_HIGH, total_height_high);
738 if (!ret)
739 ret = reg_write(client, REG_OUT_TOTAL_HEIGHT_LOW, total_height_low);
740
741 return ret;
742}
743
744static int ov5642_try_fmt(struct v4l2_subdev *sd,
745 struct v4l2_mbus_framefmt *mf)
746{
747 const struct ov5642_datafmt *fmt = ov5642_find_datafmt(mf->code);
748
749 dev_dbg(sd->v4l2_dev->dev, "%s(%u) width: %u heigth: %u\n",
750 __func__, mf->code, mf->width, mf->height);
751
752 if (!fmt) {
753 mf->code = ov5642_colour_fmts[0].code;
754 mf->colorspace = ov5642_colour_fmts[0].colorspace;
755 }
756
757 mf->width = OV5642_WIDTH;
758 mf->height = OV5642_HEIGHT;
759 mf->field = V4L2_FIELD_NONE;
760
761 return 0;
762}
763
764static int ov5642_s_fmt(struct v4l2_subdev *sd,
765 struct v4l2_mbus_framefmt *mf)
766{
767 struct i2c_client *client = v4l2_get_subdevdata(sd);
768 struct ov5642 *priv = to_ov5642(client);
769
770 dev_dbg(sd->v4l2_dev->dev, "%s(%u)\n", __func__, mf->code);
771
772 /* MIPI CSI could have changed the format, double-check */
773 if (!ov5642_find_datafmt(mf->code))
774 return -EINVAL;
775
776 ov5642_try_fmt(sd, mf);
777
778 priv->fmt = ov5642_find_datafmt(mf->code);
779
780 ov5642_write_array(client, ov5642_default_regs_init);
781 ov5642_set_resolution(client);
782 ov5642_write_array(client, ov5642_default_regs_finalise);
783
784 return 0;
785}
786
787static int ov5642_g_fmt(struct v4l2_subdev *sd,
788 struct v4l2_mbus_framefmt *mf)
789{
790 struct i2c_client *client = v4l2_get_subdevdata(sd);
791 struct ov5642 *priv = to_ov5642(client);
792
793 const struct ov5642_datafmt *fmt = priv->fmt;
794
795 mf->code = fmt->code;
796 mf->colorspace = fmt->colorspace;
797 mf->width = OV5642_WIDTH;
798 mf->height = OV5642_HEIGHT;
799 mf->field = V4L2_FIELD_NONE;
800
801 return 0;
802}
803
804static int ov5642_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
805 enum v4l2_mbus_pixelcode *code)
806{
807 if (index >= ARRAY_SIZE(ov5642_colour_fmts))
808 return -EINVAL;
809
810 *code = ov5642_colour_fmts[index].code;
811 return 0;
812}
813
814static int ov5642_g_chip_ident(struct v4l2_subdev *sd,
815 struct v4l2_dbg_chip_ident *id)
816{
817 struct i2c_client *client = v4l2_get_subdevdata(sd);
818
819 if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
820 return -EINVAL;
821
822 if (id->match.addr != client->addr)
823 return -ENODEV;
824
825 id->ident = V4L2_IDENT_OV5642;
826 id->revision = 0;
827
828 return 0;
829}
830
831static int ov5642_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
832{
833 struct v4l2_rect *rect = &a->c;
834
835 a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
836 rect->top = 0;
837 rect->left = 0;
838 rect->width = OV5642_WIDTH;
839 rect->height = OV5642_HEIGHT;
840
841 return 0;
842}
843
844static int ov5642_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
845{
846 a->bounds.left = 0;
847 a->bounds.top = 0;
848 a->bounds.width = OV5642_WIDTH;
849 a->bounds.height = OV5642_HEIGHT;
850 a->defrect = a->bounds;
851 a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
852 a->pixelaspect.numerator = 1;
853 a->pixelaspect.denominator = 1;
854
855 return 0;
856}
857
858static struct v4l2_subdev_video_ops ov5642_subdev_video_ops = {
859 .s_mbus_fmt = ov5642_s_fmt,
860 .g_mbus_fmt = ov5642_g_fmt,
861 .try_mbus_fmt = ov5642_try_fmt,
862 .enum_mbus_fmt = ov5642_enum_fmt,
863 .g_crop = ov5642_g_crop,
864 .cropcap = ov5642_cropcap,
865};
866
867static struct v4l2_subdev_core_ops ov5642_subdev_core_ops = {
868 .g_chip_ident = ov5642_g_chip_ident,
869#ifdef CONFIG_VIDEO_ADV_DEBUG
870 .g_register = ov5642_get_register,
871 .s_register = ov5642_set_register,
872#endif
873};
874
875static struct v4l2_subdev_ops ov5642_subdev_ops = {
876 .core = &ov5642_subdev_core_ops,
877 .video = &ov5642_subdev_video_ops,
878};
879
880/*
881 * We have to provide soc-camera operations, but we don't have anything to say
882 * there. The MIPI CSI2 driver will provide .query_bus_param and .set_bus_param
883 */
884static unsigned long soc_ov5642_query_bus_param(struct soc_camera_device *icd)
885{
886 return 0;
887}
888
889static int soc_ov5642_set_bus_param(struct soc_camera_device *icd,
890 unsigned long flags)
891{
892 return -EINVAL;
893}
894
895static struct soc_camera_ops soc_ov5642_ops = {
896 .query_bus_param = soc_ov5642_query_bus_param,
897 .set_bus_param = soc_ov5642_set_bus_param,
898};
899
900static int ov5642_video_probe(struct soc_camera_device *icd,
901 struct i2c_client *client)
902{
903 int ret;
904 u8 id_high, id_low;
905 u16 id;
906
907 /* Read sensor Model ID */
908 ret = reg_read(client, REG_CHIP_ID_HIGH, &id_high);
909 if (ret < 0)
910 return ret;
911
912 id = id_high << 8;
913
914 ret = reg_read(client, REG_CHIP_ID_LOW, &id_low);
915 if (ret < 0)
916 return ret;
917
918 id |= id_low;
919
920 dev_info(&client->dev, "Chip ID 0x%04x detected\n", id);
921
922 if (id != 0x5642)
923 return -ENODEV;
924
925 return 0;
926}
927
928static int ov5642_probe(struct i2c_client *client,
929 const struct i2c_device_id *did)
930{
931 struct ov5642 *priv;
932 struct soc_camera_device *icd = client->dev.platform_data;
933 struct soc_camera_link *icl;
934 int ret;
935
936 if (!icd) {
937 dev_err(&client->dev, "OV5642: missing soc-camera data!\n");
938 return -EINVAL;
939 }
940
941 icl = to_soc_camera_link(icd);
942 if (!icl) {
943 dev_err(&client->dev, "OV5642: missing platform data!\n");
944 return -EINVAL;
945 }
946
947 priv = kzalloc(sizeof(struct ov5642), GFP_KERNEL);
948 if (!priv)
949 return -ENOMEM;
950
951 v4l2_i2c_subdev_init(&priv->subdev, client, &ov5642_subdev_ops);
952
953 icd->ops = &soc_ov5642_ops;
954 priv->fmt = &ov5642_colour_fmts[0];
955
956 ret = ov5642_video_probe(icd, client);
957 if (ret < 0)
958 goto error;
959
960 return 0;
961
962error:
963 icd->ops = NULL;
964 kfree(priv);
965 return ret;
966}
967
968static int ov5642_remove(struct i2c_client *client)
969{
970 struct ov5642 *priv = to_ov5642(client);
971 struct soc_camera_device *icd = client->dev.platform_data;
972 struct soc_camera_link *icl = to_soc_camera_link(icd);
973
974 icd->ops = NULL;
975 if (icl->free_bus)
976 icl->free_bus(icl);
977 kfree(priv);
978
979 return 0;
980}
981
982static const struct i2c_device_id ov5642_id[] = {
983 { "ov5642", 0 },
984 { }
985};
986MODULE_DEVICE_TABLE(i2c, ov5642_id);
987
988static struct i2c_driver ov5642_i2c_driver = {
989 .driver = {
990 .name = "ov5642",
991 },
992 .probe = ov5642_probe,
993 .remove = ov5642_remove,
994 .id_table = ov5642_id,
995};
996
997static int __init ov5642_mod_init(void)
998{
999 return i2c_add_driver(&ov5642_i2c_driver);
1000}
1001
1002static void __exit ov5642_mod_exit(void)
1003{
1004 i2c_del_driver(&ov5642_i2c_driver);
1005}
1006
1007module_init(ov5642_mod_init);
1008module_exit(ov5642_mod_exit);
1009
1010MODULE_DESCRIPTION("Omnivision OV5642 Camera driver");
1011MODULE_AUTHOR("Bastian Hecht <hechtb@gmail.com>");
1012MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c
index d4e7c11553c3..8aa058531280 100644
--- a/drivers/media/video/ov7670.c
+++ b/drivers/media/video/ov7670.c
@@ -19,8 +19,7 @@
19#include <media/v4l2-device.h> 19#include <media/v4l2-device.h>
20#include <media/v4l2-chip-ident.h> 20#include <media/v4l2-chip-ident.h>
21#include <media/v4l2-mediabus.h> 21#include <media/v4l2-mediabus.h>
22 22#include <media/ov7670.h>
23#include "ov7670.h"
24 23
25MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>"); 24MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
26MODULE_DESCRIPTION("A low-level driver for OmniVision ov7670 sensors"); 25MODULE_DESCRIPTION("A low-level driver for OmniVision ov7670 sensors");
diff --git a/drivers/media/video/ov7670.h b/drivers/media/video/ov7670.h
deleted file mode 100644
index b133bc123031..000000000000
--- a/drivers/media/video/ov7670.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * A V4L2 driver for OmniVision OV7670 cameras.
3 *
4 * Copyright 2010 One Laptop Per Child
5 *
6 * This file may be distributed under the terms of the GNU General
7 * Public License, version 2.
8 */
9
10#ifndef __OV7670_H
11#define __OV7670_H
12
13struct ov7670_config {
14 int min_width; /* Filter out smaller sizes */
15 int min_height; /* Filter out smaller sizes */
16 int clock_speed; /* External clock speed (MHz) */
17 bool use_smbus; /* Use smbus I/O instead of I2C */
18};
19
20#endif
diff --git a/drivers/media/video/ov772x.c b/drivers/media/video/ov772x.c
index 48895ef863ff..397870f076c1 100644
--- a/drivers/media/video/ov772x.c
+++ b/drivers/media/video/ov772x.c
@@ -1032,13 +1032,9 @@ static int ov772x_video_probe(struct soc_camera_device *icd,
1032 u8 pid, ver; 1032 u8 pid, ver;
1033 const char *devname; 1033 const char *devname;
1034 1034
1035 /* 1035 /* We must have a parent by now. And it cannot be a wrong one. */
1036 * We must have a parent by now. And it cannot be a wrong one. 1036 BUG_ON(!icd->parent ||
1037 * So this entire test is completely redundant. 1037 to_soc_camera_host(icd->parent)->nr != icd->iface);
1038 */
1039 if (!icd->dev.parent ||
1040 to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
1041 return -ENODEV;
1042 1038
1043 /* 1039 /*
1044 * check and show product ID and manufacturer ID 1040 * check and show product ID and manufacturer ID
diff --git a/drivers/media/video/ov9640.c b/drivers/media/video/ov9640.c
index 5173ac449dd8..3681a6ff0815 100644
--- a/drivers/media/video/ov9640.c
+++ b/drivers/media/video/ov9640.c
@@ -657,16 +657,9 @@ static int ov9640_video_probe(struct soc_camera_device *icd,
657 const char *devname; 657 const char *devname;
658 int ret = 0; 658 int ret = 0;
659 659
660 /* 660 /* We must have a parent by now. And it cannot be a wrong one. */
661 * We must have a parent by now. And it cannot be a wrong one. 661 BUG_ON(!icd->parent ||
662 * So this entire test is completely redundant. 662 to_soc_camera_host(icd->parent)->nr != icd->iface);
663 */
664 if (!icd->dev.parent ||
665 to_soc_camera_host(icd->dev.parent)->nr != icd->iface) {
666 dev_err(&client->dev, "Parent missing or invalid!\n");
667 ret = -ENODEV;
668 goto err;
669 }
670 663
671 /* 664 /*
672 * check and show product ID and manufacturer ID 665 * check and show product ID and manufacturer ID
diff --git a/drivers/media/video/ov9740.c b/drivers/media/video/ov9740.c
index 4d4ee4faca69..edd1ffcca30b 100644
--- a/drivers/media/video/ov9740.c
+++ b/drivers/media/video/ov9740.c
@@ -44,12 +44,12 @@
44#define OV9740_Y_ADDR_START_LO 0x0347 44#define OV9740_Y_ADDR_START_LO 0x0347
45#define OV9740_X_ADDR_END_HI 0x0348 45#define OV9740_X_ADDR_END_HI 0x0348
46#define OV9740_X_ADDR_END_LO 0x0349 46#define OV9740_X_ADDR_END_LO 0x0349
47#define OV9740_Y_ADDR_END_HI 0x034A 47#define OV9740_Y_ADDR_END_HI 0x034a
48#define OV9740_Y_ADDR_END_LO 0x034B 48#define OV9740_Y_ADDR_END_LO 0x034b
49#define OV9740_X_OUTPUT_SIZE_HI 0x034C 49#define OV9740_X_OUTPUT_SIZE_HI 0x034c
50#define OV9740_X_OUTPUT_SIZE_LO 0x034D 50#define OV9740_X_OUTPUT_SIZE_LO 0x034d
51#define OV9740_Y_OUTPUT_SIZE_HI 0x034E 51#define OV9740_Y_OUTPUT_SIZE_HI 0x034e
52#define OV9740_Y_OUTPUT_SIZE_LO 0x034F 52#define OV9740_Y_OUTPUT_SIZE_LO 0x034f
53 53
54/* IO Control Registers */ 54/* IO Control Registers */
55#define OV9740_IO_CREL00 0x3002 55#define OV9740_IO_CREL00 0x3002
@@ -68,6 +68,7 @@
68#define OV9740_ANALOG_CTRL04 0x3604 68#define OV9740_ANALOG_CTRL04 0x3604
69#define OV9740_ANALOG_CTRL10 0x3610 69#define OV9740_ANALOG_CTRL10 0x3610
70#define OV9740_ANALOG_CTRL12 0x3612 70#define OV9740_ANALOG_CTRL12 0x3612
71#define OV9740_ANALOG_CTRL15 0x3615
71#define OV9740_ANALOG_CTRL20 0x3620 72#define OV9740_ANALOG_CTRL20 0x3620
72#define OV9740_ANALOG_CTRL21 0x3621 73#define OV9740_ANALOG_CTRL21 0x3621
73#define OV9740_ANALOG_CTRL22 0x3622 74#define OV9740_ANALOG_CTRL22 0x3622
@@ -89,28 +90,28 @@
89#define OV9740_TIMING_CTRL35 0x3835 90#define OV9740_TIMING_CTRL35 0x3835
90 91
91/* Banding Filter */ 92/* Banding Filter */
92#define OV9740_AEC_MAXEXPO_60_H 0x3A02 93#define OV9740_AEC_MAXEXPO_60_H 0x3a02
93#define OV9740_AEC_MAXEXPO_60_L 0x3A03 94#define OV9740_AEC_MAXEXPO_60_L 0x3a03
94#define OV9740_AEC_B50_STEP_HI 0x3A08 95#define OV9740_AEC_B50_STEP_HI 0x3a08
95#define OV9740_AEC_B50_STEP_LO 0x3A09 96#define OV9740_AEC_B50_STEP_LO 0x3a09
96#define OV9740_AEC_B60_STEP_HI 0x3A0A 97#define OV9740_AEC_B60_STEP_HI 0x3a0a
97#define OV9740_AEC_B60_STEP_LO 0x3A0B 98#define OV9740_AEC_B60_STEP_LO 0x3a0b
98#define OV9740_AEC_CTRL0D 0x3A0D 99#define OV9740_AEC_CTRL0D 0x3a0d
99#define OV9740_AEC_CTRL0E 0x3A0E 100#define OV9740_AEC_CTRL0E 0x3a0e
100#define OV9740_AEC_MAXEXPO_50_H 0x3A14 101#define OV9740_AEC_MAXEXPO_50_H 0x3a14
101#define OV9740_AEC_MAXEXPO_50_L 0x3A15 102#define OV9740_AEC_MAXEXPO_50_L 0x3a15
102 103
103/* AEC/AGC Control */ 104/* AEC/AGC Control */
104#define OV9740_AEC_ENABLE 0x3503 105#define OV9740_AEC_ENABLE 0x3503
105#define OV9740_GAIN_CEILING_01 0x3A18 106#define OV9740_GAIN_CEILING_01 0x3a18
106#define OV9740_GAIN_CEILING_02 0x3A19 107#define OV9740_GAIN_CEILING_02 0x3a19
107#define OV9740_AEC_HI_THRESHOLD 0x3A11 108#define OV9740_AEC_HI_THRESHOLD 0x3a11
108#define OV9740_AEC_3A1A 0x3A1A 109#define OV9740_AEC_3A1A 0x3a1a
109#define OV9740_AEC_CTRL1B_WPT2 0x3A1B 110#define OV9740_AEC_CTRL1B_WPT2 0x3a1b
110#define OV9740_AEC_CTRL0F_WPT 0x3A0F 111#define OV9740_AEC_CTRL0F_WPT 0x3a0f
111#define OV9740_AEC_CTRL10_BPT 0x3A10 112#define OV9740_AEC_CTRL10_BPT 0x3a10
112#define OV9740_AEC_CTRL1E_BPT2 0x3A1E 113#define OV9740_AEC_CTRL1E_BPT2 0x3a1e
113#define OV9740_AEC_LO_THRESHOLD 0x3A1F 114#define OV9740_AEC_LO_THRESHOLD 0x3a1f
114 115
115/* BLC Control */ 116/* BLC Control */
116#define OV9740_BLC_AUTO_ENABLE 0x4002 117#define OV9740_BLC_AUTO_ENABLE 0x4002
@@ -132,7 +133,7 @@
132#define OV9740_VT_SYS_CLK_DIV 0x0303 133#define OV9740_VT_SYS_CLK_DIV 0x0303
133#define OV9740_VT_PIX_CLK_DIV 0x0301 134#define OV9740_VT_PIX_CLK_DIV 0x0301
134#define OV9740_PLL_CTRL3010 0x3010 135#define OV9740_PLL_CTRL3010 0x3010
135#define OV9740_VFIFO_CTRL00 0x460E 136#define OV9740_VFIFO_CTRL00 0x460e
136 137
137/* ISP Control */ 138/* ISP Control */
138#define OV9740_ISP_CTRL00 0x5000 139#define OV9740_ISP_CTRL00 0x5000
@@ -141,9 +142,9 @@
141#define OV9740_ISP_CTRL05 0x5005 142#define OV9740_ISP_CTRL05 0x5005
142#define OV9740_ISP_CTRL12 0x5012 143#define OV9740_ISP_CTRL12 0x5012
143#define OV9740_ISP_CTRL19 0x5019 144#define OV9740_ISP_CTRL19 0x5019
144#define OV9740_ISP_CTRL1A 0x501A 145#define OV9740_ISP_CTRL1A 0x501a
145#define OV9740_ISP_CTRL1E 0x501E 146#define OV9740_ISP_CTRL1E 0x501e
146#define OV9740_ISP_CTRL1F 0x501F 147#define OV9740_ISP_CTRL1F 0x501f
147#define OV9740_ISP_CTRL20 0x5020 148#define OV9740_ISP_CTRL20 0x5020
148#define OV9740_ISP_CTRL21 0x5021 149#define OV9740_ISP_CTRL21 0x5021
149 150
@@ -158,12 +159,12 @@
158#define OV9740_AWB_ADV_CTRL04 0x5187 159#define OV9740_AWB_ADV_CTRL04 0x5187
159#define OV9740_AWB_ADV_CTRL05 0x5188 160#define OV9740_AWB_ADV_CTRL05 0x5188
160#define OV9740_AWB_ADV_CTRL06 0x5189 161#define OV9740_AWB_ADV_CTRL06 0x5189
161#define OV9740_AWB_ADV_CTRL07 0x518A 162#define OV9740_AWB_ADV_CTRL07 0x518a
162#define OV9740_AWB_ADV_CTRL08 0x518B 163#define OV9740_AWB_ADV_CTRL08 0x518b
163#define OV9740_AWB_ADV_CTRL09 0x518C 164#define OV9740_AWB_ADV_CTRL09 0x518c
164#define OV9740_AWB_ADV_CTRL10 0x518D 165#define OV9740_AWB_ADV_CTRL10 0x518d
165#define OV9740_AWB_ADV_CTRL11 0x518E 166#define OV9740_AWB_ADV_CTRL11 0x518e
166#define OV9740_AWB_CTRL0F 0x518F 167#define OV9740_AWB_CTRL0F 0x518f
167#define OV9740_AWB_CTRL10 0x5190 168#define OV9740_AWB_CTRL10 0x5190
168#define OV9740_AWB_CTRL11 0x5191 169#define OV9740_AWB_CTRL11 0x5191
169#define OV9740_AWB_CTRL12 0x5192 170#define OV9740_AWB_CTRL12 0x5192
@@ -180,27 +181,8 @@
180#define OV9740_MIPI_CTRL_3012 0x3012 181#define OV9740_MIPI_CTRL_3012 0x3012
181#define OV9740_SC_CMMM_MIPI_CTR 0x3014 182#define OV9740_SC_CMMM_MIPI_CTR 0x3014
182 183
183/* supported resolutions */ 184#define OV9740_MAX_WIDTH 1280
184enum { 185#define OV9740_MAX_HEIGHT 720
185 OV9740_VGA,
186 OV9740_720P,
187};
188
189struct ov9740_resolution {
190 unsigned int width;
191 unsigned int height;
192};
193
194static struct ov9740_resolution ov9740_resolutions[] = {
195 [OV9740_VGA] = {
196 .width = 640,
197 .height = 480,
198 },
199 [OV9740_720P] = {
200 .width = 1280,
201 .height = 720,
202 },
203};
204 186
205/* Misc. structures */ 187/* Misc. structures */
206struct ov9740_reg { 188struct ov9740_reg {
@@ -219,9 +201,16 @@ struct ov9740_priv {
219 201
220 bool flag_vflip; 202 bool flag_vflip;
221 bool flag_hflip; 203 bool flag_hflip;
204
205 /* For suspend/resume. */
206 struct v4l2_mbus_framefmt current_mf;
207 bool current_enable;
222}; 208};
223 209
224static const struct ov9740_reg ov9740_defaults[] = { 210static const struct ov9740_reg ov9740_defaults[] = {
211 /* Software Reset */
212 { OV9740_SOFTWARE_RESET, 0x01 },
213
225 /* Banding Filter */ 214 /* Banding Filter */
226 { OV9740_AEC_B50_STEP_HI, 0x00 }, 215 { OV9740_AEC_B50_STEP_HI, 0x00 },
227 { OV9740_AEC_B50_STEP_LO, 0xe8 }, 216 { OV9740_AEC_B50_STEP_LO, 0xe8 },
@@ -241,36 +230,36 @@ static const struct ov9740_reg ov9740_defaults[] = {
241 /* Un-documented OV9740 registers */ 230 /* Un-documented OV9740 registers */
242 { 0x5800, 0x29 }, { 0x5801, 0x25 }, { 0x5802, 0x20 }, { 0x5803, 0x21 }, 231 { 0x5800, 0x29 }, { 0x5801, 0x25 }, { 0x5802, 0x20 }, { 0x5803, 0x21 },
243 { 0x5804, 0x26 }, { 0x5805, 0x2e }, { 0x5806, 0x11 }, { 0x5807, 0x0c }, 232 { 0x5804, 0x26 }, { 0x5805, 0x2e }, { 0x5806, 0x11 }, { 0x5807, 0x0c },
244 { 0x5808, 0x09 }, { 0x5809, 0x0a }, { 0x580A, 0x0e }, { 0x580B, 0x16 }, 233 { 0x5808, 0x09 }, { 0x5809, 0x0a }, { 0x580a, 0x0e }, { 0x580b, 0x16 },
245 { 0x580C, 0x06 }, { 0x580D, 0x02 }, { 0x580E, 0x00 }, { 0x580F, 0x00 }, 234 { 0x580c, 0x06 }, { 0x580d, 0x02 }, { 0x580e, 0x00 }, { 0x580f, 0x00 },
246 { 0x5810, 0x04 }, { 0x5811, 0x0a }, { 0x5812, 0x05 }, { 0x5813, 0x02 }, 235 { 0x5810, 0x04 }, { 0x5811, 0x0a }, { 0x5812, 0x05 }, { 0x5813, 0x02 },
247 { 0x5814, 0x00 }, { 0x5815, 0x00 }, { 0x5816, 0x03 }, { 0x5817, 0x09 }, 236 { 0x5814, 0x00 }, { 0x5815, 0x00 }, { 0x5816, 0x03 }, { 0x5817, 0x09 },
248 { 0x5818, 0x0f }, { 0x5819, 0x0a }, { 0x581A, 0x07 }, { 0x581B, 0x08 }, 237 { 0x5818, 0x0f }, { 0x5819, 0x0a }, { 0x581a, 0x07 }, { 0x581b, 0x08 },
249 { 0x581C, 0x0b }, { 0x581D, 0x14 }, { 0x581E, 0x28 }, { 0x581F, 0x23 }, 238 { 0x581c, 0x0b }, { 0x581d, 0x14 }, { 0x581e, 0x28 }, { 0x581f, 0x23 },
250 { 0x5820, 0x1d }, { 0x5821, 0x1e }, { 0x5822, 0x24 }, { 0x5823, 0x2a }, 239 { 0x5820, 0x1d }, { 0x5821, 0x1e }, { 0x5822, 0x24 }, { 0x5823, 0x2a },
251 { 0x5824, 0x4f }, { 0x5825, 0x6f }, { 0x5826, 0x5f }, { 0x5827, 0x7f }, 240 { 0x5824, 0x4f }, { 0x5825, 0x6f }, { 0x5826, 0x5f }, { 0x5827, 0x7f },
252 { 0x5828, 0x9f }, { 0x5829, 0x5f }, { 0x582A, 0x8f }, { 0x582B, 0x9e }, 241 { 0x5828, 0x9f }, { 0x5829, 0x5f }, { 0x582a, 0x8f }, { 0x582b, 0x9e },
253 { 0x582C, 0x8f }, { 0x582D, 0x9f }, { 0x582E, 0x4f }, { 0x582F, 0x87 }, 242 { 0x582c, 0x8f }, { 0x582d, 0x9f }, { 0x582e, 0x4f }, { 0x582f, 0x87 },
254 { 0x5830, 0x86 }, { 0x5831, 0x97 }, { 0x5832, 0xae }, { 0x5833, 0x3f }, 243 { 0x5830, 0x86 }, { 0x5831, 0x97 }, { 0x5832, 0xae }, { 0x5833, 0x3f },
255 { 0x5834, 0x8e }, { 0x5835, 0x7c }, { 0x5836, 0x7e }, { 0x5837, 0xaf }, 244 { 0x5834, 0x8e }, { 0x5835, 0x7c }, { 0x5836, 0x7e }, { 0x5837, 0xaf },
256 { 0x5838, 0x8f }, { 0x5839, 0x8f }, { 0x583A, 0x9f }, { 0x583B, 0x7f }, 245 { 0x5838, 0x8f }, { 0x5839, 0x8f }, { 0x583a, 0x9f }, { 0x583b, 0x7f },
257 { 0x583C, 0x5f }, 246 { 0x583c, 0x5f },
258 247
259 /* Y Gamma */ 248 /* Y Gamma */
260 { 0x5480, 0x07 }, { 0x5481, 0x18 }, { 0x5482, 0x2c }, { 0x5483, 0x4e }, 249 { 0x5480, 0x07 }, { 0x5481, 0x18 }, { 0x5482, 0x2c }, { 0x5483, 0x4e },
261 { 0x5484, 0x5e }, { 0x5485, 0x6b }, { 0x5486, 0x77 }, { 0x5487, 0x82 }, 250 { 0x5484, 0x5e }, { 0x5485, 0x6b }, { 0x5486, 0x77 }, { 0x5487, 0x82 },
262 { 0x5488, 0x8c }, { 0x5489, 0x95 }, { 0x548A, 0xa4 }, { 0x548B, 0xb1 }, 251 { 0x5488, 0x8c }, { 0x5489, 0x95 }, { 0x548a, 0xa4 }, { 0x548b, 0xb1 },
263 { 0x548C, 0xc6 }, { 0x548D, 0xd8 }, { 0x548E, 0xe9 }, 252 { 0x548c, 0xc6 }, { 0x548d, 0xd8 }, { 0x548e, 0xe9 },
264 253
265 /* UV Gamma */ 254 /* UV Gamma */
266 { 0x5490, 0x0f }, { 0x5491, 0xff }, { 0x5492, 0x0d }, { 0x5493, 0x05 }, 255 { 0x5490, 0x0f }, { 0x5491, 0xff }, { 0x5492, 0x0d }, { 0x5493, 0x05 },
267 { 0x5494, 0x07 }, { 0x5495, 0x1a }, { 0x5496, 0x04 }, { 0x5497, 0x01 }, 256 { 0x5494, 0x07 }, { 0x5495, 0x1a }, { 0x5496, 0x04 }, { 0x5497, 0x01 },
268 { 0x5498, 0x03 }, { 0x5499, 0x53 }, { 0x549A, 0x02 }, { 0x549B, 0xeb }, 257 { 0x5498, 0x03 }, { 0x5499, 0x53 }, { 0x549a, 0x02 }, { 0x549b, 0xeb },
269 { 0x549C, 0x02 }, { 0x549D, 0xa0 }, { 0x549E, 0x02 }, { 0x549F, 0x67 }, 258 { 0x549c, 0x02 }, { 0x549d, 0xa0 }, { 0x549e, 0x02 }, { 0x549f, 0x67 },
270 { 0x54A0, 0x02 }, { 0x54A1, 0x3b }, { 0x54A2, 0x02 }, { 0x54A3, 0x18 }, 259 { 0x54a0, 0x02 }, { 0x54a1, 0x3b }, { 0x54a2, 0x02 }, { 0x54a3, 0x18 },
271 { 0x54A4, 0x01 }, { 0x54A5, 0xe7 }, { 0x54A6, 0x01 }, { 0x54A7, 0xc3 }, 260 { 0x54a4, 0x01 }, { 0x54a5, 0xe7 }, { 0x54a6, 0x01 }, { 0x54a7, 0xc3 },
272 { 0x54A8, 0x01 }, { 0x54A9, 0x94 }, { 0x54AA, 0x01 }, { 0x54AB, 0x72 }, 261 { 0x54a8, 0x01 }, { 0x54a9, 0x94 }, { 0x54aa, 0x01 }, { 0x54ab, 0x72 },
273 { 0x54AC, 0x01 }, { 0x54AD, 0x57 }, 262 { 0x54ac, 0x01 }, { 0x54ad, 0x57 },
274 263
275 /* AWB */ 264 /* AWB */
276 { OV9740_AWB_CTRL00, 0xf0 }, 265 { OV9740_AWB_CTRL00, 0xf0 },
@@ -296,18 +285,18 @@ static const struct ov9740_reg ov9740_defaults[] = {
296 { OV9740_AWB_CTRL14, 0x00 }, 285 { OV9740_AWB_CTRL14, 0x00 },
297 286
298 /* CIP */ 287 /* CIP */
299 { 0x530D, 0x12 }, 288 { 0x530d, 0x12 },
300 289
301 /* CMX */ 290 /* CMX */
302 { 0x5380, 0x01 }, { 0x5381, 0x00 }, { 0x5382, 0x00 }, { 0x5383, 0x17 }, 291 { 0x5380, 0x01 }, { 0x5381, 0x00 }, { 0x5382, 0x00 }, { 0x5383, 0x17 },
303 { 0x5384, 0x00 }, { 0x5385, 0x01 }, { 0x5386, 0x00 }, { 0x5387, 0x00 }, 292 { 0x5384, 0x00 }, { 0x5385, 0x01 }, { 0x5386, 0x00 }, { 0x5387, 0x00 },
304 { 0x5388, 0x00 }, { 0x5389, 0xe0 }, { 0x538A, 0x00 }, { 0x538B, 0x20 }, 293 { 0x5388, 0x00 }, { 0x5389, 0xe0 }, { 0x538a, 0x00 }, { 0x538b, 0x20 },
305 { 0x538C, 0x00 }, { 0x538D, 0x00 }, { 0x538E, 0x00 }, { 0x538F, 0x16 }, 294 { 0x538c, 0x00 }, { 0x538d, 0x00 }, { 0x538e, 0x00 }, { 0x538f, 0x16 },
306 { 0x5390, 0x00 }, { 0x5391, 0x9c }, { 0x5392, 0x00 }, { 0x5393, 0xa0 }, 295 { 0x5390, 0x00 }, { 0x5391, 0x9c }, { 0x5392, 0x00 }, { 0x5393, 0xa0 },
307 { 0x5394, 0x18 }, 296 { 0x5394, 0x18 },
308 297
309 /* 50/60 Detection */ 298 /* 50/60 Detection */
310 { 0x3C0A, 0x9c }, { 0x3C0B, 0x3f }, 299 { 0x3c0a, 0x9c }, { 0x3c0b, 0x3f },
311 300
312 /* Output Select */ 301 /* Output Select */
313 { OV9740_IO_OUTPUT_SEL01, 0x00 }, 302 { OV9740_IO_OUTPUT_SEL01, 0x00 },
@@ -333,6 +322,7 @@ static const struct ov9740_reg ov9740_defaults[] = {
333 { OV9740_ANALOG_CTRL10, 0xa1 }, 322 { OV9740_ANALOG_CTRL10, 0xa1 },
334 { OV9740_ANALOG_CTRL12, 0x24 }, 323 { OV9740_ANALOG_CTRL12, 0x24 },
335 { OV9740_ANALOG_CTRL22, 0x9f }, 324 { OV9740_ANALOG_CTRL22, 0x9f },
325 { OV9740_ANALOG_CTRL15, 0xf0 },
336 326
337 /* Sensor Control */ 327 /* Sensor Control */
338 { OV9740_SENSOR_CTRL03, 0x42 }, 328 { OV9740_SENSOR_CTRL03, 0x42 },
@@ -385,7 +375,7 @@ static const struct ov9740_reg ov9740_defaults[] = {
385 { OV9740_LN_LENGTH_PCK_LO, 0x62 }, 375 { OV9740_LN_LENGTH_PCK_LO, 0x62 },
386 376
387 /* MIPI Control */ 377 /* MIPI Control */
388 { OV9740_MIPI_CTRL00, 0x44 }, 378 { OV9740_MIPI_CTRL00, 0x44 }, /* 0x64 for discontinuous clk */
389 { OV9740_MIPI_3837, 0x01 }, 379 { OV9740_MIPI_3837, 0x01 },
390 { OV9740_MIPI_CTRL01, 0x0f }, 380 { OV9740_MIPI_CTRL01, 0x0f },
391 { OV9740_MIPI_CTRL03, 0x05 }, 381 { OV9740_MIPI_CTRL03, 0x05 },
@@ -393,54 +383,9 @@ static const struct ov9740_reg ov9740_defaults[] = {
393 { OV9740_VFIFO_RD_CTRL, 0x16 }, 383 { OV9740_VFIFO_RD_CTRL, 0x16 },
394 { OV9740_MIPI_CTRL_3012, 0x70 }, 384 { OV9740_MIPI_CTRL_3012, 0x70 },
395 { OV9740_SC_CMMM_MIPI_CTR, 0x01 }, 385 { OV9740_SC_CMMM_MIPI_CTR, 0x01 },
396};
397
398static const struct ov9740_reg ov9740_regs_vga[] = {
399 { OV9740_X_ADDR_START_HI, 0x00 },
400 { OV9740_X_ADDR_START_LO, 0xa0 },
401 { OV9740_Y_ADDR_START_HI, 0x00 },
402 { OV9740_Y_ADDR_START_LO, 0x00 },
403 { OV9740_X_ADDR_END_HI, 0x04 },
404 { OV9740_X_ADDR_END_LO, 0x63 },
405 { OV9740_Y_ADDR_END_HI, 0x02 },
406 { OV9740_Y_ADDR_END_LO, 0xd3 },
407 { OV9740_X_OUTPUT_SIZE_HI, 0x02 },
408 { OV9740_X_OUTPUT_SIZE_LO, 0x80 },
409 { OV9740_Y_OUTPUT_SIZE_HI, 0x01 },
410 { OV9740_Y_OUTPUT_SIZE_LO, 0xe0 },
411 { OV9740_ISP_CTRL1E, 0x03 },
412 { OV9740_ISP_CTRL1F, 0xc0 },
413 { OV9740_ISP_CTRL20, 0x02 },
414 { OV9740_ISP_CTRL21, 0xd0 },
415 { OV9740_VFIFO_READ_START_HI, 0x01 },
416 { OV9740_VFIFO_READ_START_LO, 0x40 },
417 { OV9740_ISP_CTRL00, 0xff },
418 { OV9740_ISP_CTRL01, 0xff },
419 { OV9740_ISP_CTRL03, 0xff },
420};
421 386
422static const struct ov9740_reg ov9740_regs_720p[] = { 387 /* YUYV order */
423 { OV9740_X_ADDR_START_HI, 0x00 }, 388 { OV9740_ISP_CTRL19, 0x02 },
424 { OV9740_X_ADDR_START_LO, 0x00 },
425 { OV9740_Y_ADDR_START_HI, 0x00 },
426 { OV9740_Y_ADDR_START_LO, 0x00 },
427 { OV9740_X_ADDR_END_HI, 0x05 },
428 { OV9740_X_ADDR_END_LO, 0x03 },
429 { OV9740_Y_ADDR_END_HI, 0x02 },
430 { OV9740_Y_ADDR_END_LO, 0xd3 },
431 { OV9740_X_OUTPUT_SIZE_HI, 0x05 },
432 { OV9740_X_OUTPUT_SIZE_LO, 0x00 },
433 { OV9740_Y_OUTPUT_SIZE_HI, 0x02 },
434 { OV9740_Y_OUTPUT_SIZE_LO, 0xd0 },
435 { OV9740_ISP_CTRL1E, 0x05 },
436 { OV9740_ISP_CTRL1F, 0x00 },
437 { OV9740_ISP_CTRL20, 0x02 },
438 { OV9740_ISP_CTRL21, 0xd0 },
439 { OV9740_VFIFO_READ_START_HI, 0x02 },
440 { OV9740_VFIFO_READ_START_LO, 0x30 },
441 { OV9740_ISP_CTRL00, 0xff },
442 { OV9740_ISP_CTRL01, 0xef },
443 { OV9740_ISP_CTRL03, 0xff },
444}; 389};
445 390
446static enum v4l2_mbus_pixelcode ov9740_codes[] = { 391static enum v4l2_mbus_pixelcode ov9740_codes[] = {
@@ -537,7 +482,8 @@ static int ov9740_reg_rmw(struct i2c_client *client, u16 reg, u8 set, u8 unset)
537 ret = ov9740_reg_read(client, reg, &val); 482 ret = ov9740_reg_read(client, reg, &val);
538 if (ret < 0) { 483 if (ret < 0) {
539 dev_err(&client->dev, 484 dev_err(&client->dev,
540 "[Read]-Modify-Write of register %02x failed!\n", reg); 485 "[Read]-Modify-Write of register 0x%04x failed!\n",
486 reg);
541 return ret; 487 return ret;
542 } 488 }
543 489
@@ -547,7 +493,8 @@ static int ov9740_reg_rmw(struct i2c_client *client, u16 reg, u8 set, u8 unset)
547 ret = ov9740_reg_write(client, reg, val); 493 ret = ov9740_reg_write(client, reg, val);
548 if (ret < 0) { 494 if (ret < 0) {
549 dev_err(&client->dev, 495 dev_err(&client->dev,
550 "Read-Modify-[Write] of register %02x failed!\n", reg); 496 "Read-Modify-[Write] of register 0x%04x failed!\n",
497 reg);
551 return ret; 498 return ret;
552 } 499 }
553 500
@@ -608,6 +555,8 @@ static int ov9740_s_stream(struct v4l2_subdev *sd, int enable)
608 0x00); 555 0x00);
609 } 556 }
610 557
558 priv->current_enable = enable;
559
611 return ret; 560 return ret;
612} 561}
613 562
@@ -630,126 +579,127 @@ static unsigned long ov9740_query_bus_param(struct soc_camera_device *icd)
630 return soc_camera_apply_sensor_flags(icl, flags); 579 return soc_camera_apply_sensor_flags(icl, flags);
631} 580}
632 581
633/* Get status of additional camera capabilities */ 582/* select nearest higher resolution for capture */
634static int ov9740_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) 583static void ov9740_res_roundup(u32 *width, u32 *height)
635{
636 struct ov9740_priv *priv = to_ov9740(sd);
637
638 switch (ctrl->id) {
639 case V4L2_CID_VFLIP:
640 ctrl->value = priv->flag_vflip;
641 break;
642 case V4L2_CID_HFLIP:
643 ctrl->value = priv->flag_hflip;
644 break;
645 default:
646 return -EINVAL;
647 }
648
649 return 0;
650}
651
652/* Set status of additional camera capabilities */
653static int ov9740_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
654{
655 struct ov9740_priv *priv = to_ov9740(sd);
656
657 switch (ctrl->id) {
658 case V4L2_CID_VFLIP:
659 priv->flag_vflip = ctrl->value;
660 break;
661 case V4L2_CID_HFLIP:
662 priv->flag_hflip = ctrl->value;
663 break;
664 default:
665 return -EINVAL;
666 }
667
668 return 0;
669}
670
671/* Get chip identification */
672static int ov9740_g_chip_ident(struct v4l2_subdev *sd,
673 struct v4l2_dbg_chip_ident *id)
674{ 584{
675 struct ov9740_priv *priv = to_ov9740(sd); 585 /* Width must be a multiple of 4 pixels. */
586 *width = ALIGN(*width, 4);
676 587
677 id->ident = priv->ident; 588 /* Max resolution is 1280x720 (720p). */
678 id->revision = priv->revision; 589 if (*width > OV9740_MAX_WIDTH)
590 *width = OV9740_MAX_WIDTH;
679 591
680 return 0; 592 if (*height > OV9740_MAX_HEIGHT)
593 *height = OV9740_MAX_HEIGHT;
681} 594}
682 595
683#ifdef CONFIG_VIDEO_ADV_DEBUG 596/* Setup registers according to resolution and color encoding */
684static int ov9740_get_register(struct v4l2_subdev *sd, 597static int ov9740_set_res(struct i2c_client *client, u32 width, u32 height)
685 struct v4l2_dbg_register *reg)
686{ 598{
687 struct i2c_client *client = v4l2_get_subdevdata(sd); 599 u32 x_start;
600 u32 y_start;
601 u32 x_end;
602 u32 y_end;
603 bool scaling = 0;
604 u32 scale_input_x;
605 u32 scale_input_y;
688 int ret; 606 int ret;
689 u8 val;
690
691 if (reg->reg & ~0xffff)
692 return -EINVAL;
693 607
694 reg->size = 2; 608 if ((width != OV9740_MAX_WIDTH) || (height != OV9740_MAX_HEIGHT))
695 609 scaling = 1;
696 ret = ov9740_reg_read(client, reg->reg, &val);
697 if (ret)
698 return ret;
699
700 reg->val = (__u64)val;
701 610
702 return ret; 611 /*
703} 612 * Try to use as much of the sensor area as possible when supporting
704 613 * smaller resolutions. Depending on the aspect ratio of the
705static int ov9740_set_register(struct v4l2_subdev *sd, 614 * chosen resolution, we can either use the full width of the sensor,
706 struct v4l2_dbg_register *reg) 615 * or the full height of the sensor (or both if the aspect ratio is
707{ 616 * the same as 1280x720.
708 struct i2c_client *client = v4l2_get_subdevdata(sd); 617 */
618 if ((OV9740_MAX_WIDTH * height) > (OV9740_MAX_HEIGHT * width)) {
619 scale_input_x = (OV9740_MAX_HEIGHT * width) / height;
620 scale_input_y = OV9740_MAX_HEIGHT;
621 } else {
622 scale_input_x = OV9740_MAX_WIDTH;
623 scale_input_y = (OV9740_MAX_WIDTH * height) / width;
624 }
709 625
710 if (reg->reg & ~0xffff || reg->val & ~0xff) 626 /* These describe the area of the sensor to use. */
711 return -EINVAL; 627 x_start = (OV9740_MAX_WIDTH - scale_input_x) / 2;
628 y_start = (OV9740_MAX_HEIGHT - scale_input_y) / 2;
629 x_end = x_start + scale_input_x - 1;
630 y_end = y_start + scale_input_y - 1;
712 631
713 return ov9740_reg_write(client, reg->reg, reg->val); 632 ret = ov9740_reg_write(client, OV9740_X_ADDR_START_HI, x_start >> 8);
714} 633 if (ret)
715#endif 634 goto done;
635 ret = ov9740_reg_write(client, OV9740_X_ADDR_START_LO, x_start & 0xff);
636 if (ret)
637 goto done;
638 ret = ov9740_reg_write(client, OV9740_Y_ADDR_START_HI, y_start >> 8);
639 if (ret)
640 goto done;
641 ret = ov9740_reg_write(client, OV9740_Y_ADDR_START_LO, y_start & 0xff);
642 if (ret)
643 goto done;
716 644
717/* select nearest higher resolution for capture */ 645 ret = ov9740_reg_write(client, OV9740_X_ADDR_END_HI, x_end >> 8);
718static void ov9740_res_roundup(u32 *width, u32 *height) 646 if (ret)
719{ 647 goto done;
720 int i; 648 ret = ov9740_reg_write(client, OV9740_X_ADDR_END_LO, x_end & 0xff);
649 if (ret)
650 goto done;
651 ret = ov9740_reg_write(client, OV9740_Y_ADDR_END_HI, y_end >> 8);
652 if (ret)
653 goto done;
654 ret = ov9740_reg_write(client, OV9740_Y_ADDR_END_LO, y_end & 0xff);
655 if (ret)
656 goto done;
721 657
722 for (i = 0; i < ARRAY_SIZE(ov9740_resolutions); i++) 658 ret = ov9740_reg_write(client, OV9740_X_OUTPUT_SIZE_HI, width >> 8);
723 if ((ov9740_resolutions[i].width >= *width) && 659 if (ret)
724 (ov9740_resolutions[i].height >= *height)) { 660 goto done;
725 *width = ov9740_resolutions[i].width; 661 ret = ov9740_reg_write(client, OV9740_X_OUTPUT_SIZE_LO, width & 0xff);
726 *height = ov9740_resolutions[i].height; 662 if (ret)
727 return; 663 goto done;
728 } 664 ret = ov9740_reg_write(client, OV9740_Y_OUTPUT_SIZE_HI, height >> 8);
665 if (ret)
666 goto done;
667 ret = ov9740_reg_write(client, OV9740_Y_OUTPUT_SIZE_LO, height & 0xff);
668 if (ret)
669 goto done;
729 670
730 *width = ov9740_resolutions[OV9740_720P].width; 671 ret = ov9740_reg_write(client, OV9740_ISP_CTRL1E, scale_input_x >> 8);
731 *height = ov9740_resolutions[OV9740_720P].height; 672 if (ret)
732} 673 goto done;
674 ret = ov9740_reg_write(client, OV9740_ISP_CTRL1F, scale_input_x & 0xff);
675 if (ret)
676 goto done;
677 ret = ov9740_reg_write(client, OV9740_ISP_CTRL20, scale_input_y >> 8);
678 if (ret)
679 goto done;
680 ret = ov9740_reg_write(client, OV9740_ISP_CTRL21, scale_input_y & 0xff);
681 if (ret)
682 goto done;
733 683
734/* Setup registers according to resolution and color encoding */ 684 ret = ov9740_reg_write(client, OV9740_VFIFO_READ_START_HI,
735static int ov9740_set_res(struct i2c_client *client, u32 width) 685 (scale_input_x - width) >> 8);
736{ 686 if (ret)
737 int ret; 687 goto done;
688 ret = ov9740_reg_write(client, OV9740_VFIFO_READ_START_LO,
689 (scale_input_x - width) & 0xff);
690 if (ret)
691 goto done;
738 692
739 /* select register configuration for given resolution */ 693 ret = ov9740_reg_write(client, OV9740_ISP_CTRL00, 0xff);
740 if (width == ov9740_resolutions[OV9740_VGA].width) { 694 if (ret)
741 dev_dbg(&client->dev, "Setting image size to 640x480\n"); 695 goto done;
742 ret = ov9740_reg_write_array(client, ov9740_regs_vga, 696 ret = ov9740_reg_write(client, OV9740_ISP_CTRL01, 0xef |
743 ARRAY_SIZE(ov9740_regs_vga)); 697 (scaling << 4));
744 } else if (width == ov9740_resolutions[OV9740_720P].width) { 698 if (ret)
745 dev_dbg(&client->dev, "Setting image size to 1280x720\n"); 699 goto done;
746 ret = ov9740_reg_write_array(client, ov9740_regs_720p, 700 ret = ov9740_reg_write(client, OV9740_ISP_CTRL03, 0xff);
747 ARRAY_SIZE(ov9740_regs_720p));
748 } else {
749 dev_err(&client->dev, "Failed to select resolution!\n");
750 return -EINVAL;
751 }
752 701
702done:
753 return ret; 703 return ret;
754} 704}
755 705
@@ -758,6 +708,7 @@ static int ov9740_s_fmt(struct v4l2_subdev *sd,
758 struct v4l2_mbus_framefmt *mf) 708 struct v4l2_mbus_framefmt *mf)
759{ 709{
760 struct i2c_client *client = v4l2_get_subdevdata(sd); 710 struct i2c_client *client = v4l2_get_subdevdata(sd);
711 struct ov9740_priv *priv = to_ov9740(sd);
761 enum v4l2_colorspace cspace; 712 enum v4l2_colorspace cspace;
762 enum v4l2_mbus_pixelcode code = mf->code; 713 enum v4l2_mbus_pixelcode code = mf->code;
763 int ret; 714 int ret;
@@ -777,13 +728,15 @@ static int ov9740_s_fmt(struct v4l2_subdev *sd,
777 if (ret < 0) 728 if (ret < 0)
778 return ret; 729 return ret;
779 730
780 ret = ov9740_set_res(client, mf->width); 731 ret = ov9740_set_res(client, mf->width, mf->height);
781 if (ret < 0) 732 if (ret < 0)
782 return ret; 733 return ret;
783 734
784 mf->code = code; 735 mf->code = code;
785 mf->colorspace = cspace; 736 mf->colorspace = cspace;
786 737
738 memcpy(&priv->current_mf, mf, sizeof(struct v4l2_mbus_framefmt));
739
787 return ret; 740 return ret;
788} 741}
789 742
@@ -814,8 +767,8 @@ static int ov9740_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
814{ 767{
815 a->bounds.left = 0; 768 a->bounds.left = 0;
816 a->bounds.top = 0; 769 a->bounds.top = 0;
817 a->bounds.width = ov9740_resolutions[OV9740_720P].width; 770 a->bounds.width = OV9740_MAX_WIDTH;
818 a->bounds.height = ov9740_resolutions[OV9740_720P].height; 771 a->bounds.height = OV9740_MAX_HEIGHT;
819 a->defrect = a->bounds; 772 a->defrect = a->bounds;
820 a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 773 a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
821 a->pixelaspect.numerator = 1; 774 a->pixelaspect.numerator = 1;
@@ -828,13 +781,115 @@ static int ov9740_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
828{ 781{
829 a->c.left = 0; 782 a->c.left = 0;
830 a->c.top = 0; 783 a->c.top = 0;
831 a->c.width = ov9740_resolutions[OV9740_720P].width; 784 a->c.width = OV9740_MAX_WIDTH;
832 a->c.height = ov9740_resolutions[OV9740_720P].height; 785 a->c.height = OV9740_MAX_HEIGHT;
833 a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 786 a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
834 787
835 return 0; 788 return 0;
836} 789}
837 790
791/* Get status of additional camera capabilities */
792static int ov9740_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
793{
794 struct ov9740_priv *priv = to_ov9740(sd);
795
796 switch (ctrl->id) {
797 case V4L2_CID_VFLIP:
798 ctrl->value = priv->flag_vflip;
799 break;
800 case V4L2_CID_HFLIP:
801 ctrl->value = priv->flag_hflip;
802 break;
803 default:
804 return -EINVAL;
805 }
806
807 return 0;
808}
809
810/* Set status of additional camera capabilities */
811static int ov9740_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
812{
813 struct ov9740_priv *priv = to_ov9740(sd);
814
815 switch (ctrl->id) {
816 case V4L2_CID_VFLIP:
817 priv->flag_vflip = ctrl->value;
818 break;
819 case V4L2_CID_HFLIP:
820 priv->flag_hflip = ctrl->value;
821 break;
822 default:
823 return -EINVAL;
824 }
825
826 return 0;
827}
828
829/* Get chip identification */
830static int ov9740_g_chip_ident(struct v4l2_subdev *sd,
831 struct v4l2_dbg_chip_ident *id)
832{
833 struct ov9740_priv *priv = to_ov9740(sd);
834
835 id->ident = priv->ident;
836 id->revision = priv->revision;
837
838 return 0;
839}
840
841static int ov9740_s_power(struct v4l2_subdev *sd, int on)
842{
843 struct ov9740_priv *priv = to_ov9740(sd);
844
845 if (!priv->current_enable)
846 return 0;
847
848 if (on) {
849 ov9740_s_fmt(sd, &priv->current_mf);
850 ov9740_s_stream(sd, priv->current_enable);
851 } else {
852 ov9740_s_stream(sd, 0);
853 priv->current_enable = true;
854 }
855
856 return 0;
857}
858
859#ifdef CONFIG_VIDEO_ADV_DEBUG
860static int ov9740_get_register(struct v4l2_subdev *sd,
861 struct v4l2_dbg_register *reg)
862{
863 struct i2c_client *client = v4l2_get_subdevdata(sd);
864 int ret;
865 u8 val;
866
867 if (reg->reg & ~0xffff)
868 return -EINVAL;
869
870 reg->size = 2;
871
872 ret = ov9740_reg_read(client, reg->reg, &val);
873 if (ret)
874 return ret;
875
876 reg->val = (__u64)val;
877
878 return ret;
879}
880
881static int ov9740_set_register(struct v4l2_subdev *sd,
882 struct v4l2_dbg_register *reg)
883{
884 struct i2c_client *client = v4l2_get_subdevdata(sd);
885
886 if (reg->reg & ~0xffff || reg->val & ~0xff)
887 return -EINVAL;
888
889 return ov9740_reg_write(client, reg->reg, reg->val);
890}
891#endif
892
838static int ov9740_video_probe(struct soc_camera_device *icd, 893static int ov9740_video_probe(struct soc_camera_device *icd,
839 struct i2c_client *client) 894 struct i2c_client *client)
840{ 895{
@@ -843,16 +898,9 @@ static int ov9740_video_probe(struct soc_camera_device *icd,
843 u8 modelhi, modello; 898 u8 modelhi, modello;
844 int ret; 899 int ret;
845 900
846 /* 901 /* We must have a parent by now. And it cannot be a wrong one. */
847 * We must have a parent by now. And it cannot be a wrong one. 902 BUG_ON(!icd->parent ||
848 * So this entire test is completely redundant. 903 to_soc_camera_host(icd->parent)->nr != icd->iface);
849 */
850 if (!icd->dev.parent ||
851 to_soc_camera_host(icd->dev.parent)->nr != icd->iface) {
852 dev_err(&client->dev, "Parent missing or invalid!\n");
853 ret = -ENODEV;
854 goto err;
855 }
856 904
857 /* 905 /*
858 * check and show product ID and manufacturer ID 906 * check and show product ID and manufacturer ID
@@ -901,24 +949,24 @@ static struct soc_camera_ops ov9740_ops = {
901 .num_controls = ARRAY_SIZE(ov9740_controls), 949 .num_controls = ARRAY_SIZE(ov9740_controls),
902}; 950};
903 951
952static struct v4l2_subdev_video_ops ov9740_video_ops = {
953 .s_stream = ov9740_s_stream,
954 .s_mbus_fmt = ov9740_s_fmt,
955 .try_mbus_fmt = ov9740_try_fmt,
956 .enum_mbus_fmt = ov9740_enum_fmt,
957 .cropcap = ov9740_cropcap,
958 .g_crop = ov9740_g_crop,
959};
960
904static struct v4l2_subdev_core_ops ov9740_core_ops = { 961static struct v4l2_subdev_core_ops ov9740_core_ops = {
905 .g_ctrl = ov9740_g_ctrl, 962 .g_ctrl = ov9740_g_ctrl,
906 .s_ctrl = ov9740_s_ctrl, 963 .s_ctrl = ov9740_s_ctrl,
907 .g_chip_ident = ov9740_g_chip_ident, 964 .g_chip_ident = ov9740_g_chip_ident,
965 .s_power = ov9740_s_power,
908#ifdef CONFIG_VIDEO_ADV_DEBUG 966#ifdef CONFIG_VIDEO_ADV_DEBUG
909 .g_register = ov9740_get_register, 967 .g_register = ov9740_get_register,
910 .s_register = ov9740_set_register, 968 .s_register = ov9740_set_register,
911#endif 969#endif
912
913};
914
915static struct v4l2_subdev_video_ops ov9740_video_ops = {
916 .s_stream = ov9740_s_stream,
917 .s_mbus_fmt = ov9740_s_fmt,
918 .try_mbus_fmt = ov9740_try_fmt,
919 .enum_mbus_fmt = ov9740_enum_fmt,
920 .cropcap = ov9740_cropcap,
921 .g_crop = ov9740_g_crop,
922}; 970};
923 971
924static struct v4l2_subdev_ops ov9740_subdev_ops = { 972static struct v4l2_subdev_ops ov9740_subdev_ops = {
diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c
index 7551907f8c28..e753b5e4d2ce 100644
--- a/drivers/media/video/pms.c
+++ b/drivers/media/video/pms.c
@@ -28,7 +28,6 @@
28#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/ioport.h> 29#include <linux/ioport.h>
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/version.h>
32#include <linux/mutex.h> 31#include <linux/mutex.h>
33#include <linux/uaccess.h> 32#include <linux/uaccess.h>
34#include <asm/io.h> 33#include <asm/io.h>
@@ -39,7 +38,7 @@
39#include <media/v4l2-device.h> 38#include <media/v4l2-device.h>
40 39
41MODULE_LICENSE("GPL"); 40MODULE_LICENSE("GPL");
42 41MODULE_VERSION("0.0.4");
43 42
44#define MOTOROLA 1 43#define MOTOROLA 1
45#define PHILIPS2 2 /* SAA7191 */ 44#define PHILIPS2 2 /* SAA7191 */
@@ -678,7 +677,6 @@ static int pms_querycap(struct file *file, void *priv,
678 strlcpy(vcap->driver, dev->v4l2_dev.name, sizeof(vcap->driver)); 677 strlcpy(vcap->driver, dev->v4l2_dev.name, sizeof(vcap->driver));
679 strlcpy(vcap->card, "Mediavision PMS", sizeof(vcap->card)); 678 strlcpy(vcap->card, "Mediavision PMS", sizeof(vcap->card));
680 strlcpy(vcap->bus_info, "ISA", sizeof(vcap->bus_info)); 679 strlcpy(vcap->bus_info, "ISA", sizeof(vcap->bus_info));
681 vcap->version = KERNEL_VERSION(0, 0, 3);
682 vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE; 680 vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
683 return 0; 681 return 0;
684} 682}
diff --git a/drivers/media/video/pvrusb2/pvrusb2-main.c b/drivers/media/video/pvrusb2/pvrusb2-main.c
index 2254194aad57..c1d9bb61cd77 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-main.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-main.c
@@ -168,6 +168,7 @@ module_exit(pvr_exit);
168MODULE_AUTHOR(DRIVER_AUTHOR); 168MODULE_AUTHOR(DRIVER_AUTHOR);
169MODULE_DESCRIPTION(DRIVER_DESC); 169MODULE_DESCRIPTION(DRIVER_DESC);
170MODULE_LICENSE("GPL"); 170MODULE_LICENSE("GPL");
171MODULE_VERSION("0.9.1");
171 172
172 173
173/* 174/*
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index 38761142a4d9..e27f8ab76966 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -91,7 +91,7 @@ static struct v4l2_capability pvr_capability ={
91 .driver = "pvrusb2", 91 .driver = "pvrusb2",
92 .card = "Hauppauge WinTV pvr-usb2", 92 .card = "Hauppauge WinTV pvr-usb2",
93 .bus_info = "usb", 93 .bus_info = "usb",
94 .version = KERNEL_VERSION(0, 9, 0), 94 .version = LINUX_VERSION_CODE,
95 .capabilities = (V4L2_CAP_VIDEO_CAPTURE | 95 .capabilities = (V4L2_CAP_VIDEO_CAPTURE |
96 V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_RADIO | 96 V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_RADIO |
97 V4L2_CAP_READWRITE), 97 V4L2_CAP_READWRITE),
@@ -369,11 +369,6 @@ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
369 break; 369 break;
370 } 370 }
371 371
372 case VIDIOC_S_AUDIO:
373 {
374 ret = -EINVAL;
375 break;
376 }
377 case VIDIOC_G_TUNER: 372 case VIDIOC_G_TUNER:
378 { 373 {
379 struct v4l2_tuner *vt = (struct v4l2_tuner *)arg; 374 struct v4l2_tuner *vt = (struct v4l2_tuner *)arg;
@@ -850,7 +845,7 @@ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
850#endif 845#endif
851 846
852 default : 847 default :
853 ret = -EINVAL; 848 ret = -ENOTTY;
854 break; 849 break;
855 } 850 }
856 851
diff --git a/drivers/media/video/pwc/Kconfig b/drivers/media/video/pwc/Kconfig
index 8da42e4f1ba0..d63d0a850035 100644
--- a/drivers/media/video/pwc/Kconfig
+++ b/drivers/media/video/pwc/Kconfig
@@ -1,6 +1,7 @@
1config USB_PWC 1config USB_PWC
2 tristate "USB Philips Cameras" 2 tristate "USB Philips Cameras"
3 depends on VIDEO_V4L2 3 depends on VIDEO_V4L2
4 select VIDEOBUF2_VMALLOC
4 ---help--- 5 ---help---
5 Say Y or M here if you want to use one of these Philips & OEM 6 Say Y or M here if you want to use one of these Philips & OEM
6 webcams: 7 webcams:
diff --git a/drivers/media/video/pwc/pwc-ctrl.c b/drivers/media/video/pwc/pwc-ctrl.c
index 760b4de13adf..3977addf3ba8 100644
--- a/drivers/media/video/pwc/pwc-ctrl.c
+++ b/drivers/media/video/pwc/pwc-ctrl.c
@@ -3,6 +3,7 @@
3 video modes. 3 video modes.
4 (C) 1999-2003 Nemosoft Unv. 4 (C) 1999-2003 Nemosoft Unv.
5 (C) 2004-2006 Luc Saillard (luc@saillard.org) 5 (C) 2004-2006 Luc Saillard (luc@saillard.org)
6 (C) 2011 Hans de Goede <hdegoede@redhat.com>
6 7
7 NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx 8 NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
8 driver and thus may have bugs that are not present in the original version. 9 driver and thus may have bugs that are not present in the original version.
@@ -43,61 +44,12 @@
43#include <asm/errno.h> 44#include <asm/errno.h>
44 45
45#include "pwc.h" 46#include "pwc.h"
46#include "pwc-uncompress.h"
47#include "pwc-kiara.h" 47#include "pwc-kiara.h"
48#include "pwc-timon.h" 48#include "pwc-timon.h"
49#include "pwc-dec1.h" 49#include "pwc-dec1.h"
50#include "pwc-dec23.h" 50#include "pwc-dec23.h"
51 51
52/* Request types: video */ 52/* Selectors for status controls used only in this file */
53#define SET_LUM_CTL 0x01
54#define GET_LUM_CTL 0x02
55#define SET_CHROM_CTL 0x03
56#define GET_CHROM_CTL 0x04
57#define SET_STATUS_CTL 0x05
58#define GET_STATUS_CTL 0x06
59#define SET_EP_STREAM_CTL 0x07
60#define GET_EP_STREAM_CTL 0x08
61#define GET_XX_CTL 0x09
62#define SET_XX_CTL 0x0A
63#define GET_XY_CTL 0x0B
64#define SET_XY_CTL 0x0C
65#define SET_MPT_CTL 0x0D
66#define GET_MPT_CTL 0x0E
67
68/* Selectors for the Luminance controls [GS]ET_LUM_CTL */
69#define AGC_MODE_FORMATTER 0x2000
70#define PRESET_AGC_FORMATTER 0x2100
71#define SHUTTER_MODE_FORMATTER 0x2200
72#define PRESET_SHUTTER_FORMATTER 0x2300
73#define PRESET_CONTOUR_FORMATTER 0x2400
74#define AUTO_CONTOUR_FORMATTER 0x2500
75#define BACK_LIGHT_COMPENSATION_FORMATTER 0x2600
76#define CONTRAST_FORMATTER 0x2700
77#define DYNAMIC_NOISE_CONTROL_FORMATTER 0x2800
78#define FLICKERLESS_MODE_FORMATTER 0x2900
79#define AE_CONTROL_SPEED 0x2A00
80#define BRIGHTNESS_FORMATTER 0x2B00
81#define GAMMA_FORMATTER 0x2C00
82
83/* Selectors for the Chrominance controls [GS]ET_CHROM_CTL */
84#define WB_MODE_FORMATTER 0x1000
85#define AWB_CONTROL_SPEED_FORMATTER 0x1100
86#define AWB_CONTROL_DELAY_FORMATTER 0x1200
87#define PRESET_MANUAL_RED_GAIN_FORMATTER 0x1300
88#define PRESET_MANUAL_BLUE_GAIN_FORMATTER 0x1400
89#define COLOUR_MODE_FORMATTER 0x1500
90#define SATURATION_MODE_FORMATTER1 0x1600
91#define SATURATION_MODE_FORMATTER2 0x1700
92
93/* Selectors for the Status controls [GS]ET_STATUS_CTL */
94#define SAVE_USER_DEFAULTS_FORMATTER 0x0200
95#define RESTORE_USER_DEFAULTS_FORMATTER 0x0300
96#define RESTORE_FACTORY_DEFAULTS_FORMATTER 0x0400
97#define READ_AGC_FORMATTER 0x0500
98#define READ_SHUTTER_FORMATTER 0x0600
99#define READ_RED_GAIN_FORMATTER 0x0700
100#define READ_BLUE_GAIN_FORMATTER 0x0800
101#define GET_STATUS_B00 0x0B00 53#define GET_STATUS_B00 0x0B00
102#define SENSOR_TYPE_FORMATTER1 0x0C00 54#define SENSOR_TYPE_FORMATTER1 0x0C00
103#define GET_STATUS_3000 0x3000 55#define GET_STATUS_3000 0x3000
@@ -116,11 +68,6 @@
116/* Formatters for the Video Endpoint controls [GS]ET_EP_STREAM_CTL */ 68/* Formatters for the Video Endpoint controls [GS]ET_EP_STREAM_CTL */
117#define VIDEO_OUTPUT_CONTROL_FORMATTER 0x0100 69#define VIDEO_OUTPUT_CONTROL_FORMATTER 0x0100
118 70
119/* Formatters for the motorized pan & tilt [GS]ET_MPT_CTL */
120#define PT_RELATIVE_CONTROL_FORMATTER 0x01
121#define PT_RESET_CONTROL_FORMATTER 0x02
122#define PT_STATUS_FORMATTER 0x03
123
124static const char *size2name[PSZ_MAX] = 71static const char *size2name[PSZ_MAX] =
125{ 72{
126 "subQCIF", 73 "subQCIF",
@@ -160,7 +107,7 @@ static void pwc_set_image_buffer_size(struct pwc_device *pdev);
160/****************************************************************************/ 107/****************************************************************************/
161 108
162static int _send_control_msg(struct pwc_device *pdev, 109static int _send_control_msg(struct pwc_device *pdev,
163 u8 request, u16 value, int index, void *buf, int buflen, int timeout) 110 u8 request, u16 value, int index, void *buf, int buflen)
164{ 111{
165 int rc; 112 int rc;
166 void *kbuf = NULL; 113 void *kbuf = NULL;
@@ -177,7 +124,7 @@ static int _send_control_msg(struct pwc_device *pdev,
177 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 124 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
178 value, 125 value,
179 index, 126 index,
180 kbuf, buflen, timeout); 127 kbuf, buflen, USB_CTRL_SET_TIMEOUT);
181 128
182 kfree(kbuf); 129 kfree(kbuf);
183 return rc; 130 return rc;
@@ -197,9 +144,13 @@ static int recv_control_msg(struct pwc_device *pdev,
197 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 144 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
198 value, 145 value,
199 pdev->vcinterface, 146 pdev->vcinterface,
200 kbuf, buflen, 500); 147 kbuf, buflen, USB_CTRL_GET_TIMEOUT);
201 memcpy(buf, kbuf, buflen); 148 memcpy(buf, kbuf, buflen);
202 kfree(kbuf); 149 kfree(kbuf);
150
151 if (rc < 0)
152 PWC_ERROR("recv_control_msg error %d req %02x val %04x\n",
153 rc, request, value);
203 return rc; 154 return rc;
204} 155}
205 156
@@ -210,18 +161,16 @@ static inline int send_video_command(struct pwc_device *pdev,
210 SET_EP_STREAM_CTL, 161 SET_EP_STREAM_CTL,
211 VIDEO_OUTPUT_CONTROL_FORMATTER, 162 VIDEO_OUTPUT_CONTROL_FORMATTER,
212 index, 163 index,
213 buf, buflen, 1000); 164 buf, buflen);
214} 165}
215 166
216static inline int send_control_msg(struct pwc_device *pdev, 167int send_control_msg(struct pwc_device *pdev,
217 u8 request, u16 value, void *buf, int buflen) 168 u8 request, u16 value, void *buf, int buflen)
218{ 169{
219 return _send_control_msg(pdev, 170 return _send_control_msg(pdev,
220 request, value, pdev->vcinterface, buf, buflen, 500); 171 request, value, pdev->vcinterface, buf, buflen);
221} 172}
222 173
223
224
225static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames) 174static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames)
226{ 175{
227 unsigned char buf[3]; 176 unsigned char buf[3];
@@ -261,8 +210,11 @@ static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames)
261 PWC_DEBUG_MODULE("Failed to send video command... %d\n", ret); 210 PWC_DEBUG_MODULE("Failed to send video command... %d\n", ret);
262 return ret; 211 return ret;
263 } 212 }
264 if (pEntry->compressed && pdev->pixfmt == V4L2_PIX_FMT_YUV420) 213 if (pEntry->compressed && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
265 pwc_dec1_init(pdev->type, pdev->release, buf, pdev->decompress_data); 214 ret = pwc_dec1_init(pdev, pdev->type, pdev->release, buf);
215 if (ret < 0)
216 return ret;
217 }
266 218
267 pdev->cmd_len = 3; 219 pdev->cmd_len = 3;
268 memcpy(pdev->cmd_buf, buf, 3); 220 memcpy(pdev->cmd_buf, buf, 3);
@@ -321,8 +273,11 @@ static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames, i
321 if (ret < 0) 273 if (ret < 0)
322 return ret; 274 return ret;
323 275
324 if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) 276 if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
325 pwc_dec23_init(pdev, pdev->type, buf); 277 ret = pwc_dec23_init(pdev, pdev->type, buf);
278 if (ret < 0)
279 return ret;
280 }
326 281
327 pdev->cmd_len = 13; 282 pdev->cmd_len = 13;
328 memcpy(pdev->cmd_buf, buf, 13); 283 memcpy(pdev->cmd_buf, buf, 13);
@@ -394,8 +349,11 @@ static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames, i
394 if (ret < 0) 349 if (ret < 0)
395 return ret; 350 return ret;
396 351
397 if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) 352 if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
398 pwc_dec23_init(pdev, pdev->type, buf); 353 ret = pwc_dec23_init(pdev, pdev->type, buf);
354 if (ret < 0)
355 return ret;
356 }
399 357
400 pdev->cmd_len = 12; 358 pdev->cmd_len = 12;
401 memcpy(pdev->cmd_buf, buf, 12); 359 memcpy(pdev->cmd_buf, buf, 12);
@@ -452,6 +410,7 @@ int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, int frame
452 } 410 }
453 pdev->view.x = width; 411 pdev->view.x = width;
454 pdev->view.y = height; 412 pdev->view.y = height;
413 pdev->vcompression = compression;
455 pdev->frame_total_size = pdev->frame_size + pdev->frame_header_size + pdev->frame_trailer_size; 414 pdev->frame_total_size = pdev->frame_size + pdev->frame_header_size + pdev->frame_trailer_size;
456 pwc_set_image_buffer_size(pdev); 415 pwc_set_image_buffer_size(pdev);
457 PWC_DEBUG_SIZE("Set viewport to %dx%d, image size is %dx%d.\n", width, height, pwc_image_sizes[size].x, pwc_image_sizes[size].y); 416 PWC_DEBUG_SIZE("Set viewport to %dx%d, image size is %dx%d.\n", width, height, pwc_image_sizes[size].x, pwc_image_sizes[size].y);
@@ -511,13 +470,9 @@ unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned i
511 return ret; 470 return ret;
512} 471}
513 472
514#define BLACK_Y 0
515#define BLACK_U 128
516#define BLACK_V 128
517
518static void pwc_set_image_buffer_size(struct pwc_device *pdev) 473static void pwc_set_image_buffer_size(struct pwc_device *pdev)
519{ 474{
520 int i, factor = 0; 475 int factor = 0;
521 476
522 /* for V4L2_PIX_FMT_YUV420 */ 477 /* for V4L2_PIX_FMT_YUV420 */
523 switch (pdev->pixfmt) { 478 switch (pdev->pixfmt) {
@@ -541,442 +496,108 @@ static void pwc_set_image_buffer_size(struct pwc_device *pdev)
541 */ 496 */
542 pdev->offset.x = ((pdev->view.x - pdev->image.x) / 2) & 0xFFFC; 497 pdev->offset.x = ((pdev->view.x - pdev->image.x) / 2) & 0xFFFC;
543 pdev->offset.y = ((pdev->view.y - pdev->image.y) / 2) & 0xFFFE; 498 pdev->offset.y = ((pdev->view.y - pdev->image.y) / 2) & 0xFFFE;
544
545 /* Fill buffers with black colors */
546 for (i = 0; i < pwc_mbufs; i++) {
547 unsigned char *p = pdev->image_data + pdev->images[i].offset;
548 memset(p, BLACK_Y, pdev->view.x * pdev->view.y);
549 p += pdev->view.x * pdev->view.y;
550 memset(p, BLACK_U, pdev->view.x * pdev->view.y/4);
551 p += pdev->view.x * pdev->view.y/4;
552 memset(p, BLACK_V, pdev->view.x * pdev->view.y/4);
553 }
554} 499}
555 500
556 501int pwc_get_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
557
558/* BRIGHTNESS */
559
560int pwc_get_brightness(struct pwc_device *pdev)
561{ 502{
562 char buf;
563 int ret; 503 int ret;
504 u8 buf;
564 505
565 ret = recv_control_msg(pdev, 506 ret = recv_control_msg(pdev, request, value, &buf, sizeof(buf));
566 GET_LUM_CTL, BRIGHTNESS_FORMATTER, &buf, sizeof(buf));
567 if (ret < 0) 507 if (ret < 0)
568 return ret; 508 return ret;
569 return buf;
570}
571 509
572int pwc_set_brightness(struct pwc_device *pdev, int value) 510 *data = buf;
573{ 511 return 0;
574 char buf;
575
576 if (value < 0)
577 value = 0;
578 if (value > 0xffff)
579 value = 0xffff;
580 buf = (value >> 9) & 0x7f;
581 return send_control_msg(pdev,
582 SET_LUM_CTL, BRIGHTNESS_FORMATTER, &buf, sizeof(buf));
583} 512}
584 513
585/* CONTRAST */ 514int pwc_set_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, u8 data)
586
587int pwc_get_contrast(struct pwc_device *pdev)
588{ 515{
589 char buf;
590 int ret; 516 int ret;
591 517
592 ret = recv_control_msg(pdev, 518 ret = send_control_msg(pdev, request, value, &data, sizeof(data));
593 GET_LUM_CTL, CONTRAST_FORMATTER, &buf, sizeof(buf));
594 if (ret < 0) 519 if (ret < 0)
595 return ret; 520 return ret;
596 return buf;
597}
598 521
599int pwc_set_contrast(struct pwc_device *pdev, int value) 522 return 0;
600{
601 char buf;
602
603 if (value < 0)
604 value = 0;
605 if (value > 0xffff)
606 value = 0xffff;
607 buf = (value >> 10) & 0x3f;
608 return send_control_msg(pdev,
609 SET_LUM_CTL, CONTRAST_FORMATTER, &buf, sizeof(buf));
610} 523}
611 524
612/* GAMMA */ 525int pwc_get_s8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
613
614int pwc_get_gamma(struct pwc_device *pdev)
615{ 526{
616 char buf;
617 int ret; 527 int ret;
528 s8 buf;
618 529
619 ret = recv_control_msg(pdev, 530 ret = recv_control_msg(pdev, request, value, &buf, sizeof(buf));
620 GET_LUM_CTL, GAMMA_FORMATTER, &buf, sizeof(buf));
621 if (ret < 0) 531 if (ret < 0)
622 return ret; 532 return ret;
623 return buf;
624}
625
626int pwc_set_gamma(struct pwc_device *pdev, int value)
627{
628 char buf;
629 533
630 if (value < 0) 534 *data = buf;
631 value = 0;
632 if (value > 0xffff)
633 value = 0xffff;
634 buf = (value >> 11) & 0x1f;
635 return send_control_msg(pdev,
636 SET_LUM_CTL, GAMMA_FORMATTER, &buf, sizeof(buf));
637}
638
639
640/* SATURATION */
641
642/* return a value between [-100 , 100] */
643int pwc_get_saturation(struct pwc_device *pdev, int *value)
644{
645 char buf;
646 int ret, saturation_register;
647
648 if (pdev->type < 675)
649 return -EINVAL;
650 if (pdev->type < 730)
651 saturation_register = SATURATION_MODE_FORMATTER2;
652 else
653 saturation_register = SATURATION_MODE_FORMATTER1;
654 ret = recv_control_msg(pdev,
655 GET_CHROM_CTL, saturation_register, &buf, sizeof(buf));
656 if (ret < 0)
657 return ret;
658 *value = (signed)buf;
659 return 0; 535 return 0;
660} 536}
661 537
662/* @param value saturation color between [-100 , 100] */ 538int pwc_get_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
663int pwc_set_saturation(struct pwc_device *pdev, int value)
664{ 539{
665 char buf;
666 int saturation_register;
667
668 if (pdev->type < 675)
669 return -EINVAL;
670 if (value < -100)
671 value = -100;
672 if (value > 100)
673 value = 100;
674 if (pdev->type < 730)
675 saturation_register = SATURATION_MODE_FORMATTER2;
676 else
677 saturation_register = SATURATION_MODE_FORMATTER1;
678 return send_control_msg(pdev,
679 SET_CHROM_CTL, saturation_register, &buf, sizeof(buf));
680}
681
682/* AGC */
683
684int pwc_set_agc(struct pwc_device *pdev, int mode, int value)
685{
686 char buf;
687 int ret; 540 int ret;
541 u8 buf[2];
688 542
689 if (mode) 543 ret = recv_control_msg(pdev, request, value, buf, sizeof(buf));
690 buf = 0x0; /* auto */
691 else
692 buf = 0xff; /* fixed */
693
694 ret = send_control_msg(pdev,
695 SET_LUM_CTL, AGC_MODE_FORMATTER, &buf, sizeof(buf));
696
697 if (!mode && ret >= 0) {
698 if (value < 0)
699 value = 0;
700 if (value > 0xffff)
701 value = 0xffff;
702 buf = (value >> 10) & 0x3F;
703 ret = send_control_msg(pdev,
704 SET_LUM_CTL, PRESET_AGC_FORMATTER, &buf, sizeof(buf));
705 }
706 if (ret < 0) 544 if (ret < 0)
707 return ret; 545 return ret;
546
547 *data = (buf[1] << 8) | buf[0];
708 return 0; 548 return 0;
709} 549}
710 550
711int pwc_get_agc(struct pwc_device *pdev, int *value) 551int pwc_set_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, u16 data)
712{ 552{
713 unsigned char buf;
714 int ret; 553 int ret;
554 u8 buf[2];
715 555
716 ret = recv_control_msg(pdev, 556 buf[0] = data & 0xff;
717 GET_LUM_CTL, AGC_MODE_FORMATTER, &buf, sizeof(buf)); 557 buf[1] = data >> 8;
558 ret = send_control_msg(pdev, request, value, buf, sizeof(buf));
718 if (ret < 0) 559 if (ret < 0)
719 return ret; 560 return ret;
720 561
721 if (buf != 0) { /* fixed */
722 ret = recv_control_msg(pdev,
723 GET_LUM_CTL, PRESET_AGC_FORMATTER, &buf, sizeof(buf));
724 if (ret < 0)
725 return ret;
726 if (buf > 0x3F)
727 buf = 0x3F;
728 *value = (buf << 10);
729 }
730 else { /* auto */
731 ret = recv_control_msg(pdev,
732 GET_STATUS_CTL, READ_AGC_FORMATTER, &buf, sizeof(buf));
733 if (ret < 0)
734 return ret;
735 /* Gah... this value ranges from 0x00 ... 0x9F */
736 if (buf > 0x9F)
737 buf = 0x9F;
738 *value = -(48 + buf * 409);
739 }
740
741 return 0; 562 return 0;
742} 563}
743 564
744int pwc_set_shutter_speed(struct pwc_device *pdev, int mode, int value) 565int pwc_button_ctrl(struct pwc_device *pdev, u16 value)
745{
746 char buf[2];
747 int speed, ret;
748
749
750 if (mode)
751 buf[0] = 0x0; /* auto */
752 else
753 buf[0] = 0xff; /* fixed */
754
755 ret = send_control_msg(pdev,
756 SET_LUM_CTL, SHUTTER_MODE_FORMATTER, &buf, 1);
757
758 if (!mode && ret >= 0) {
759 if (value < 0)
760 value = 0;
761 if (value > 0xffff)
762 value = 0xffff;
763
764 if (DEVICE_USE_CODEC2(pdev->type)) {
765 /* speed ranges from 0x0 to 0x290 (656) */
766 speed = (value / 100);
767 buf[1] = speed >> 8;
768 buf[0] = speed & 0xff;
769 } else if (DEVICE_USE_CODEC3(pdev->type)) {
770 /* speed seems to range from 0x0 to 0xff */
771 buf[1] = 0;
772 buf[0] = value >> 8;
773 }
774
775 ret = send_control_msg(pdev,
776 SET_LUM_CTL, PRESET_SHUTTER_FORMATTER,
777 &buf, sizeof(buf));
778 }
779 return ret;
780}
781
782/* This function is not exported to v4l1, so output values between 0 -> 256 */
783int pwc_get_shutter_speed(struct pwc_device *pdev, int *value)
784{ 566{
785 unsigned char buf[2];
786 int ret; 567 int ret;
787 568
788 ret = recv_control_msg(pdev, 569 ret = send_control_msg(pdev, SET_STATUS_CTL, value, NULL, 0);
789 GET_STATUS_CTL, READ_SHUTTER_FORMATTER, &buf, sizeof(buf));
790 if (ret < 0) 570 if (ret < 0)
791 return ret; 571 return ret;
792 *value = buf[0] + (buf[1] << 8); 572
793 if (DEVICE_USE_CODEC2(pdev->type)) {
794 /* speed ranges from 0x0 to 0x290 (656) */
795 *value *= 256/656;
796 } else if (DEVICE_USE_CODEC3(pdev->type)) {
797 /* speed seems to range from 0x0 to 0xff */
798 }
799 return 0; 573 return 0;
800} 574}
801 575
802
803/* POWER */ 576/* POWER */
804 577void pwc_camera_power(struct pwc_device *pdev, int power)
805int pwc_camera_power(struct pwc_device *pdev, int power)
806{ 578{
807 char buf; 579 char buf;
580 int r;
581
582 if (!pdev->power_save)
583 return;
808 584
809 if (pdev->type < 675 || (pdev->type < 730 && pdev->release < 6)) 585 if (pdev->type < 675 || (pdev->type < 730 && pdev->release < 6))
810 return 0; /* Not supported by Nala or Timon < release 6 */ 586 return; /* Not supported by Nala or Timon < release 6 */
811 587
812 if (power) 588 if (power)
813 buf = 0x00; /* active */ 589 buf = 0x00; /* active */
814 else 590 else
815 buf = 0xFF; /* power save */ 591 buf = 0xFF; /* power save */
816 return send_control_msg(pdev, 592 r = send_control_msg(pdev,
817 SET_STATUS_CTL, SET_POWER_SAVE_MODE_FORMATTER, 593 SET_STATUS_CTL, SET_POWER_SAVE_MODE_FORMATTER,
818 &buf, sizeof(buf)); 594 &buf, sizeof(buf));
819}
820
821
822
823/* private calls */
824
825int pwc_restore_user(struct pwc_device *pdev)
826{
827 return send_control_msg(pdev,
828 SET_STATUS_CTL, RESTORE_USER_DEFAULTS_FORMATTER, NULL, 0);
829}
830
831int pwc_save_user(struct pwc_device *pdev)
832{
833 return send_control_msg(pdev,
834 SET_STATUS_CTL, SAVE_USER_DEFAULTS_FORMATTER, NULL, 0);
835}
836
837int pwc_restore_factory(struct pwc_device *pdev)
838{
839 return send_control_msg(pdev,
840 SET_STATUS_CTL, RESTORE_FACTORY_DEFAULTS_FORMATTER, NULL, 0);
841}
842
843 /* ************************************************* */
844 /* Patch by Alvarado: (not in the original version */
845
846 /*
847 * the camera recognizes modes from 0 to 4:
848 *
849 * 00: indoor (incandescant lighting)
850 * 01: outdoor (sunlight)
851 * 02: fluorescent lighting
852 * 03: manual
853 * 04: auto
854 */
855int pwc_set_awb(struct pwc_device *pdev, int mode)
856{
857 char buf;
858 int ret;
859
860 if (mode < 0)
861 mode = 0;
862
863 if (mode > 4)
864 mode = 4;
865
866 buf = mode & 0x07; /* just the lowest three bits */
867
868 ret = send_control_msg(pdev,
869 SET_CHROM_CTL, WB_MODE_FORMATTER, &buf, sizeof(buf));
870
871 if (ret < 0)
872 return ret;
873 return 0;
874}
875
876int pwc_get_awb(struct pwc_device *pdev)
877{
878 unsigned char buf;
879 int ret;
880
881 ret = recv_control_msg(pdev,
882 GET_CHROM_CTL, WB_MODE_FORMATTER, &buf, sizeof(buf));
883
884 if (ret < 0)
885 return ret;
886 return buf;
887}
888
889int pwc_set_red_gain(struct pwc_device *pdev, int value)
890{
891 unsigned char buf;
892
893 if (value < 0)
894 value = 0;
895 if (value > 0xffff)
896 value = 0xffff;
897 /* only the msb is considered */
898 buf = value >> 8;
899 return send_control_msg(pdev,
900 SET_CHROM_CTL, PRESET_MANUAL_RED_GAIN_FORMATTER,
901 &buf, sizeof(buf));
902}
903
904int pwc_get_red_gain(struct pwc_device *pdev, int *value)
905{
906 unsigned char buf;
907 int ret;
908
909 ret = recv_control_msg(pdev,
910 GET_CHROM_CTL, PRESET_MANUAL_RED_GAIN_FORMATTER,
911 &buf, sizeof(buf));
912 if (ret < 0)
913 return ret;
914 *value = buf << 8;
915 return 0;
916}
917
918
919int pwc_set_blue_gain(struct pwc_device *pdev, int value)
920{
921 unsigned char buf;
922
923 if (value < 0)
924 value = 0;
925 if (value > 0xffff)
926 value = 0xffff;
927 /* only the msb is considered */
928 buf = value >> 8;
929 return send_control_msg(pdev,
930 SET_CHROM_CTL, PRESET_MANUAL_BLUE_GAIN_FORMATTER,
931 &buf, sizeof(buf));
932}
933
934int pwc_get_blue_gain(struct pwc_device *pdev, int *value)
935{
936 unsigned char buf;
937 int ret;
938
939 ret = recv_control_msg(pdev,
940 GET_CHROM_CTL, PRESET_MANUAL_BLUE_GAIN_FORMATTER,
941 &buf, sizeof(buf));
942 if (ret < 0)
943 return ret;
944 *value = buf << 8;
945 return 0;
946}
947
948 595
949/* The following two functions are different, since they only read the 596 if (r < 0)
950 internal red/blue gains, which may be different from the manual 597 PWC_ERROR("Failed to power %s camera (%d)\n",
951 gains set or read above. 598 power ? "on" : "off", r);
952 */
953static int pwc_read_red_gain(struct pwc_device *pdev, int *value)
954{
955 unsigned char buf;
956 int ret;
957
958 ret = recv_control_msg(pdev,
959 GET_STATUS_CTL, READ_RED_GAIN_FORMATTER, &buf, sizeof(buf));
960 if (ret < 0)
961 return ret;
962 *value = buf << 8;
963 return 0;
964} 599}
965 600
966static int pwc_read_blue_gain(struct pwc_device *pdev, int *value)
967{
968 unsigned char buf;
969 int ret;
970
971 ret = recv_control_msg(pdev,
972 GET_STATUS_CTL, READ_BLUE_GAIN_FORMATTER, &buf, sizeof(buf));
973 if (ret < 0)
974 return ret;
975 *value = buf << 8;
976 return 0;
977}
978
979
980static int pwc_set_wb_speed(struct pwc_device *pdev, int speed) 601static int pwc_set_wb_speed(struct pwc_device *pdev, int speed)
981{ 602{
982 unsigned char buf; 603 unsigned char buf;
@@ -1028,6 +649,7 @@ static int pwc_get_wb_delay(struct pwc_device *pdev, int *value)
1028int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value) 649int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
1029{ 650{
1030 unsigned char buf[2]; 651 unsigned char buf[2];
652 int r;
1031 653
1032 if (pdev->type < 730) 654 if (pdev->type < 730)
1033 return 0; 655 return 0;
@@ -1045,8 +667,12 @@ int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
1045 buf[0] = on_value; 667 buf[0] = on_value;
1046 buf[1] = off_value; 668 buf[1] = off_value;
1047 669
1048 return send_control_msg(pdev, 670 r = send_control_msg(pdev,
1049 SET_STATUS_CTL, LED_FORMATTER, &buf, sizeof(buf)); 671 SET_STATUS_CTL, LED_FORMATTER, &buf, sizeof(buf));
672 if (r < 0)
673 PWC_ERROR("Failed to set LED on/off time (%d)\n", r);
674
675 return r;
1050} 676}
1051 677
1052static int pwc_get_leds(struct pwc_device *pdev, int *on_value, int *off_value) 678static int pwc_get_leds(struct pwc_device *pdev, int *on_value, int *off_value)
@@ -1069,164 +695,6 @@ static int pwc_get_leds(struct pwc_device *pdev, int *on_value, int *off_value)
1069 return 0; 695 return 0;
1070} 696}
1071 697
1072int pwc_set_contour(struct pwc_device *pdev, int contour)
1073{
1074 unsigned char buf;
1075 int ret;
1076
1077 if (contour < 0)
1078 buf = 0xff; /* auto contour on */
1079 else
1080 buf = 0x0; /* auto contour off */
1081 ret = send_control_msg(pdev,
1082 SET_LUM_CTL, AUTO_CONTOUR_FORMATTER, &buf, sizeof(buf));
1083 if (ret < 0)
1084 return ret;
1085
1086 if (contour < 0)
1087 return 0;
1088 if (contour > 0xffff)
1089 contour = 0xffff;
1090
1091 buf = (contour >> 10); /* contour preset is [0..3f] */
1092 ret = send_control_msg(pdev,
1093 SET_LUM_CTL, PRESET_CONTOUR_FORMATTER, &buf, sizeof(buf));
1094 if (ret < 0)
1095 return ret;
1096 return 0;
1097}
1098
1099int pwc_get_contour(struct pwc_device *pdev, int *contour)
1100{
1101 unsigned char buf;
1102 int ret;
1103
1104 ret = recv_control_msg(pdev,
1105 GET_LUM_CTL, AUTO_CONTOUR_FORMATTER, &buf, sizeof(buf));
1106 if (ret < 0)
1107 return ret;
1108
1109 if (buf == 0) {
1110 /* auto mode off, query current preset value */
1111 ret = recv_control_msg(pdev,
1112 GET_LUM_CTL, PRESET_CONTOUR_FORMATTER,
1113 &buf, sizeof(buf));
1114 if (ret < 0)
1115 return ret;
1116 *contour = buf << 10;
1117 }
1118 else
1119 *contour = -1;
1120 return 0;
1121}
1122
1123
1124int pwc_set_backlight(struct pwc_device *pdev, int backlight)
1125{
1126 unsigned char buf;
1127
1128 if (backlight)
1129 buf = 0xff;
1130 else
1131 buf = 0x0;
1132 return send_control_msg(pdev,
1133 SET_LUM_CTL, BACK_LIGHT_COMPENSATION_FORMATTER,
1134 &buf, sizeof(buf));
1135}
1136
1137int pwc_get_backlight(struct pwc_device *pdev, int *backlight)
1138{
1139 int ret;
1140 unsigned char buf;
1141
1142 ret = recv_control_msg(pdev,
1143 GET_LUM_CTL, BACK_LIGHT_COMPENSATION_FORMATTER,
1144 &buf, sizeof(buf));
1145 if (ret < 0)
1146 return ret;
1147 *backlight = !!buf;
1148 return 0;
1149}
1150
1151int pwc_set_colour_mode(struct pwc_device *pdev, int colour)
1152{
1153 unsigned char buf;
1154
1155 if (colour)
1156 buf = 0xff;
1157 else
1158 buf = 0x0;
1159 return send_control_msg(pdev,
1160 SET_CHROM_CTL, COLOUR_MODE_FORMATTER, &buf, sizeof(buf));
1161}
1162
1163int pwc_get_colour_mode(struct pwc_device *pdev, int *colour)
1164{
1165 int ret;
1166 unsigned char buf;
1167
1168 ret = recv_control_msg(pdev,
1169 GET_CHROM_CTL, COLOUR_MODE_FORMATTER, &buf, sizeof(buf));
1170 if (ret < 0)
1171 return ret;
1172 *colour = !!buf;
1173 return 0;
1174}
1175
1176
1177int pwc_set_flicker(struct pwc_device *pdev, int flicker)
1178{
1179 unsigned char buf;
1180
1181 if (flicker)
1182 buf = 0xff;
1183 else
1184 buf = 0x0;
1185 return send_control_msg(pdev,
1186 SET_LUM_CTL, FLICKERLESS_MODE_FORMATTER, &buf, sizeof(buf));
1187}
1188
1189int pwc_get_flicker(struct pwc_device *pdev, int *flicker)
1190{
1191 int ret;
1192 unsigned char buf;
1193
1194 ret = recv_control_msg(pdev,
1195 GET_LUM_CTL, FLICKERLESS_MODE_FORMATTER, &buf, sizeof(buf));
1196 if (ret < 0)
1197 return ret;
1198 *flicker = !!buf;
1199 return 0;
1200}
1201
1202int pwc_set_dynamic_noise(struct pwc_device *pdev, int noise)
1203{
1204 unsigned char buf;
1205
1206 if (noise < 0)
1207 noise = 0;
1208 if (noise > 3)
1209 noise = 3;
1210 buf = noise;
1211 return send_control_msg(pdev,
1212 SET_LUM_CTL, DYNAMIC_NOISE_CONTROL_FORMATTER,
1213 &buf, sizeof(buf));
1214}
1215
1216int pwc_get_dynamic_noise(struct pwc_device *pdev, int *noise)
1217{
1218 int ret;
1219 unsigned char buf;
1220
1221 ret = recv_control_msg(pdev,
1222 GET_LUM_CTL, DYNAMIC_NOISE_CONTROL_FORMATTER,
1223 &buf, sizeof(buf));
1224 if (ret < 0)
1225 return ret;
1226 *noise = buf;
1227 return 0;
1228}
1229
1230static int _pwc_mpt_reset(struct pwc_device *pdev, int flags) 698static int _pwc_mpt_reset(struct pwc_device *pdev, int flags)
1231{ 699{
1232 unsigned char buf; 700 unsigned char buf;
@@ -1309,7 +777,7 @@ static int pwc_mpt_get_status(struct pwc_device *pdev, struct pwc_mpt_status *st
1309 return 0; 777 return 0;
1310} 778}
1311 779
1312 780#ifdef CONFIG_USB_PWC_DEBUG
1313int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor) 781int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
1314{ 782{
1315 unsigned char buf; 783 unsigned char buf;
@@ -1332,7 +800,7 @@ int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
1332 *sensor = buf; 800 *sensor = buf;
1333 return 0; 801 return 0;
1334} 802}
1335 803#endif
1336 804
1337 /* End of Add-Ons */ 805 /* End of Add-Ons */
1338 /* ************************************************* */ 806 /* ************************************************* */
@@ -1356,37 +824,41 @@ int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
1356/* copy local variable to arg */ 824/* copy local variable to arg */
1357#define ARG_OUT(ARG_name) /* nothing */ 825#define ARG_OUT(ARG_name) /* nothing */
1358 826
827/*
828 * Our ctrls use native values, but the old custom pwc ioctl interface expects
829 * values from 0 - 65535, define 2 helper functions to scale things. */
830static int pwc_ioctl_g_ctrl(struct v4l2_ctrl *ctrl)
831{
832 return v4l2_ctrl_g_ctrl(ctrl) * 65535 / ctrl->maximum;
833}
834
835static int pwc_ioctl_s_ctrl(struct v4l2_ctrl *ctrl, int val)
836{
837 return v4l2_ctrl_s_ctrl(ctrl, val * ctrl->maximum / 65535);
838}
839
1359long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg) 840long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
1360{ 841{
1361 long ret = 0; 842 long ret = 0;
1362 843
1363 switch(cmd) { 844 switch(cmd) {
1364 case VIDIOCPWCRUSER: 845 case VIDIOCPWCRUSER:
1365 { 846 ret = pwc_button_ctrl(pdev, RESTORE_USER_DEFAULTS_FORMATTER);
1366 if (pwc_restore_user(pdev))
1367 ret = -EINVAL;
1368 break; 847 break;
1369 }
1370 848
1371 case VIDIOCPWCSUSER: 849 case VIDIOCPWCSUSER:
1372 { 850 ret = pwc_button_ctrl(pdev, SAVE_USER_DEFAULTS_FORMATTER);
1373 if (pwc_save_user(pdev))
1374 ret = -EINVAL;
1375 break; 851 break;
1376 }
1377 852
1378 case VIDIOCPWCFACTORY: 853 case VIDIOCPWCFACTORY:
1379 { 854 ret = pwc_button_ctrl(pdev, RESTORE_FACTORY_DEFAULTS_FORMATTER);
1380 if (pwc_restore_factory(pdev))
1381 ret = -EINVAL;
1382 break; 855 break;
1383 }
1384 856
1385 case VIDIOCPWCSCQUAL: 857 case VIDIOCPWCSCQUAL:
1386 { 858 {
1387 ARG_DEF(int, qual) 859 ARG_DEF(int, qual)
1388 860
1389 if (pdev->iso_init) { 861 if (vb2_is_streaming(&pdev->vb_queue)) {
1390 ret = -EBUSY; 862 ret = -EBUSY;
1391 break; 863 break;
1392 } 864 }
@@ -1396,8 +868,6 @@ long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
1396 ret = -EINVAL; 868 ret = -EINVAL;
1397 else 869 else
1398 ret = pwc_set_video_mode(pdev, pdev->view.x, pdev->view.y, pdev->vframes, ARGR(qual), pdev->vsnapshot); 870 ret = pwc_set_video_mode(pdev, pdev->view.x, pdev->view.y, pdev->vframes, ARGR(qual), pdev->vsnapshot);
1399 if (ret >= 0)
1400 pdev->vcompression = ARGR(qual);
1401 break; 871 break;
1402 } 872 }
1403 873
@@ -1432,71 +902,59 @@ long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
1432 case VIDIOCPWCSAGC: 902 case VIDIOCPWCSAGC:
1433 { 903 {
1434 ARG_DEF(int, agc) 904 ARG_DEF(int, agc)
1435
1436 ARG_IN(agc) 905 ARG_IN(agc)
1437 if (pwc_set_agc(pdev, ARGR(agc) < 0 ? 1 : 0, ARGR(agc))) 906 ret = v4l2_ctrl_s_ctrl(pdev->autogain, ARGR(agc) < 0);
1438 ret = -EINVAL; 907 if (ret == 0 && ARGR(agc) >= 0)
908 ret = pwc_ioctl_s_ctrl(pdev->gain, ARGR(agc));
1439 break; 909 break;
1440 } 910 }
1441 911
1442 case VIDIOCPWCGAGC: 912 case VIDIOCPWCGAGC:
1443 { 913 {
1444 ARG_DEF(int, agc) 914 ARG_DEF(int, agc)
1445 915 if (v4l2_ctrl_g_ctrl(pdev->autogain))
1446 if (pwc_get_agc(pdev, ARGA(agc))) 916 ARGR(agc) = -1;
1447 ret = -EINVAL; 917 else
918 ARGR(agc) = pwc_ioctl_g_ctrl(pdev->gain);
1448 ARG_OUT(agc) 919 ARG_OUT(agc)
1449 break; 920 break;
1450 } 921 }
1451 922
1452 case VIDIOCPWCSSHUTTER: 923 case VIDIOCPWCSSHUTTER:
1453 { 924 {
1454 ARG_DEF(int, shutter_speed) 925 ARG_DEF(int, shutter)
1455 926 ARG_IN(shutter)
1456 ARG_IN(shutter_speed) 927 ret = v4l2_ctrl_s_ctrl(pdev->exposure_auto,
1457 ret = pwc_set_shutter_speed(pdev, ARGR(shutter_speed) < 0 ? 1 : 0, ARGR(shutter_speed)); 928 /* Menu idx 0 = auto, idx 1 = manual */
929 ARGR(shutter) >= 0);
930 if (ret == 0 && ARGR(shutter) >= 0)
931 ret = pwc_ioctl_s_ctrl(pdev->exposure, ARGR(shutter));
1458 break; 932 break;
1459 } 933 }
1460 934
1461 case VIDIOCPWCSAWB: 935 case VIDIOCPWCSAWB:
1462 { 936 {
1463 ARG_DEF(struct pwc_whitebalance, wb) 937 ARG_DEF(struct pwc_whitebalance, wb)
1464
1465 ARG_IN(wb) 938 ARG_IN(wb)
1466 ret = pwc_set_awb(pdev, ARGR(wb).mode); 939 ret = v4l2_ctrl_s_ctrl(pdev->auto_white_balance,
1467 if (ret >= 0 && ARGR(wb).mode == PWC_WB_MANUAL) { 940 ARGR(wb).mode);
1468 pwc_set_red_gain(pdev, ARGR(wb).manual_red); 941 if (ret == 0 && ARGR(wb).mode == PWC_WB_MANUAL)
1469 pwc_set_blue_gain(pdev, ARGR(wb).manual_blue); 942 ret = pwc_ioctl_s_ctrl(pdev->red_balance,
1470 } 943 ARGR(wb).manual_red);
944 if (ret == 0 && ARGR(wb).mode == PWC_WB_MANUAL)
945 ret = pwc_ioctl_s_ctrl(pdev->blue_balance,
946 ARGR(wb).manual_blue);
1471 break; 947 break;
1472 } 948 }
1473 949
1474 case VIDIOCPWCGAWB: 950 case VIDIOCPWCGAWB:
1475 { 951 {
1476 ARG_DEF(struct pwc_whitebalance, wb) 952 ARG_DEF(struct pwc_whitebalance, wb)
1477 953 ARGR(wb).mode = v4l2_ctrl_g_ctrl(pdev->auto_white_balance);
1478 memset(ARGA(wb), 0, sizeof(struct pwc_whitebalance)); 954 ARGR(wb).manual_red = ARGR(wb).read_red =
1479 ARGR(wb).mode = pwc_get_awb(pdev); 955 pwc_ioctl_g_ctrl(pdev->red_balance);
1480 if (ARGR(wb).mode < 0) 956 ARGR(wb).manual_blue = ARGR(wb).read_blue =
1481 ret = -EINVAL; 957 pwc_ioctl_g_ctrl(pdev->blue_balance);
1482 else {
1483 if (ARGR(wb).mode == PWC_WB_MANUAL) {
1484 ret = pwc_get_red_gain(pdev, &ARGR(wb).manual_red);
1485 if (ret < 0)
1486 break;
1487 ret = pwc_get_blue_gain(pdev, &ARGR(wb).manual_blue);
1488 if (ret < 0)
1489 break;
1490 }
1491 if (ARGR(wb).mode == PWC_WB_AUTO) {
1492 ret = pwc_read_red_gain(pdev, &ARGR(wb).read_red);
1493 if (ret < 0)
1494 break;
1495 ret = pwc_read_blue_gain(pdev, &ARGR(wb).read_blue);
1496 if (ret < 0)
1497 break;
1498 }
1499 }
1500 ARG_OUT(wb) 958 ARG_OUT(wb)
1501 break; 959 break;
1502 } 960 }
@@ -1550,17 +1008,20 @@ long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
1550 case VIDIOCPWCSCONTOUR: 1008 case VIDIOCPWCSCONTOUR:
1551 { 1009 {
1552 ARG_DEF(int, contour) 1010 ARG_DEF(int, contour)
1553
1554 ARG_IN(contour) 1011 ARG_IN(contour)
1555 ret = pwc_set_contour(pdev, ARGR(contour)); 1012 ret = v4l2_ctrl_s_ctrl(pdev->autocontour, ARGR(contour) < 0);
1013 if (ret == 0 && ARGR(contour) >= 0)
1014 ret = pwc_ioctl_s_ctrl(pdev->contour, ARGR(contour));
1556 break; 1015 break;
1557 } 1016 }
1558 1017
1559 case VIDIOCPWCGCONTOUR: 1018 case VIDIOCPWCGCONTOUR:
1560 { 1019 {
1561 ARG_DEF(int, contour) 1020 ARG_DEF(int, contour)
1562 1021 if (v4l2_ctrl_g_ctrl(pdev->autocontour))
1563 ret = pwc_get_contour(pdev, ARGA(contour)); 1022 ARGR(contour) = -1;
1023 else
1024 ARGR(contour) = pwc_ioctl_g_ctrl(pdev->contour);
1564 ARG_OUT(contour) 1025 ARG_OUT(contour)
1565 break; 1026 break;
1566 } 1027 }
@@ -1568,17 +1029,15 @@ long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
1568 case VIDIOCPWCSBACKLIGHT: 1029 case VIDIOCPWCSBACKLIGHT:
1569 { 1030 {
1570 ARG_DEF(int, backlight) 1031 ARG_DEF(int, backlight)
1571
1572 ARG_IN(backlight) 1032 ARG_IN(backlight)
1573 ret = pwc_set_backlight(pdev, ARGR(backlight)); 1033 ret = v4l2_ctrl_s_ctrl(pdev->backlight, ARGR(backlight));
1574 break; 1034 break;
1575 } 1035 }
1576 1036
1577 case VIDIOCPWCGBACKLIGHT: 1037 case VIDIOCPWCGBACKLIGHT:
1578 { 1038 {
1579 ARG_DEF(int, backlight) 1039 ARG_DEF(int, backlight)
1580 1040 ARGR(backlight) = v4l2_ctrl_g_ctrl(pdev->backlight);
1581 ret = pwc_get_backlight(pdev, ARGA(backlight));
1582 ARG_OUT(backlight) 1041 ARG_OUT(backlight)
1583 break; 1042 break;
1584 } 1043 }
@@ -1586,17 +1045,15 @@ long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
1586 case VIDIOCPWCSFLICKER: 1045 case VIDIOCPWCSFLICKER:
1587 { 1046 {
1588 ARG_DEF(int, flicker) 1047 ARG_DEF(int, flicker)
1589
1590 ARG_IN(flicker) 1048 ARG_IN(flicker)
1591 ret = pwc_set_flicker(pdev, ARGR(flicker)); 1049 ret = v4l2_ctrl_s_ctrl(pdev->flicker, ARGR(flicker));
1592 break; 1050 break;
1593 } 1051 }
1594 1052
1595 case VIDIOCPWCGFLICKER: 1053 case VIDIOCPWCGFLICKER:
1596 { 1054 {
1597 ARG_DEF(int, flicker) 1055 ARG_DEF(int, flicker)
1598 1056 ARGR(flicker) = v4l2_ctrl_g_ctrl(pdev->flicker);
1599 ret = pwc_get_flicker(pdev, ARGA(flicker));
1600 ARG_OUT(flicker) 1057 ARG_OUT(flicker)
1601 break; 1058 break;
1602 } 1059 }
@@ -1604,17 +1061,15 @@ long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
1604 case VIDIOCPWCSDYNNOISE: 1061 case VIDIOCPWCSDYNNOISE:
1605 { 1062 {
1606 ARG_DEF(int, dynnoise) 1063 ARG_DEF(int, dynnoise)
1607
1608 ARG_IN(dynnoise) 1064 ARG_IN(dynnoise)
1609 ret = pwc_set_dynamic_noise(pdev, ARGR(dynnoise)); 1065 ret = v4l2_ctrl_s_ctrl(pdev->noise_reduction, ARGR(dynnoise));
1610 break; 1066 break;
1611 } 1067 }
1612 1068
1613 case VIDIOCPWCGDYNNOISE: 1069 case VIDIOCPWCGDYNNOISE:
1614 { 1070 {
1615 ARG_DEF(int, dynnoise) 1071 ARG_DEF(int, dynnoise)
1616 1072 ARGR(dynnoise) = v4l2_ctrl_g_ctrl(pdev->noise_reduction);
1617 ret = pwc_get_dynamic_noise(pdev, ARGA(dynnoise));
1618 ARG_OUT(dynnoise); 1073 ARG_OUT(dynnoise);
1619 break; 1074 break;
1620 } 1075 }
diff --git a/drivers/media/video/pwc/pwc-dec1.c b/drivers/media/video/pwc/pwc-dec1.c
index c29593f589eb..be0e02cb487f 100644
--- a/drivers/media/video/pwc/pwc-dec1.c
+++ b/drivers/media/video/pwc/pwc-dec1.c
@@ -22,29 +22,19 @@
22 along with this program; if not, write to the Free Software 22 along with this program; if not, write to the Free Software
23 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24*/ 24*/
25
26
27
28#include "pwc-dec1.h" 25#include "pwc-dec1.h"
29 26
30 27int pwc_dec1_init(struct pwc_device *pwc, int type, int release, void *buffer)
31void pwc_dec1_init(int type, int release, void *buffer, void *table)
32{ 28{
29 struct pwc_dec1_private *pdec;
33 30
34} 31 if (pwc->decompress_data == NULL) {
35 32 pdec = kmalloc(sizeof(struct pwc_dec1_private), GFP_KERNEL);
36void pwc_dec1_exit(void) 33 if (pdec == NULL)
37{ 34 return -ENOMEM;
35 pwc->decompress_data = pdec;
36 }
37 pdec = pwc->decompress_data;
38 38
39
40
41}
42
43int pwc_dec1_alloc(struct pwc_device *pwc)
44{
45 pwc->decompress_data = kmalloc(sizeof(struct pwc_dec1_private), GFP_KERNEL);
46 if (pwc->decompress_data == NULL)
47 return -ENOMEM;
48 return 0; 39 return 0;
49} 40}
50
diff --git a/drivers/media/video/pwc/pwc-dec1.h b/drivers/media/video/pwc/pwc-dec1.h
index 8b62ddcc5c7e..a57d8601080b 100644
--- a/drivers/media/video/pwc/pwc-dec1.h
+++ b/drivers/media/video/pwc/pwc-dec1.h
@@ -22,8 +22,6 @@
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23*/ 23*/
24 24
25
26
27#ifndef PWC_DEC1_H 25#ifndef PWC_DEC1_H
28#define PWC_DEC1_H 26#define PWC_DEC1_H
29 27
@@ -32,12 +30,8 @@
32struct pwc_dec1_private 30struct pwc_dec1_private
33{ 31{
34 int version; 32 int version;
35
36}; 33};
37 34
38int pwc_dec1_alloc(struct pwc_device *pwc); 35int pwc_dec1_init(struct pwc_device *pwc, int type, int release, void *buffer);
39void pwc_dec1_init(int type, int release, void *buffer, void *private_data);
40void pwc_dec1_exit(void);
41 36
42#endif 37#endif
43
diff --git a/drivers/media/video/pwc/pwc-dec23.c b/drivers/media/video/pwc/pwc-dec23.c
index 0c801b8f3eca..06a4e877ba40 100644
--- a/drivers/media/video/pwc/pwc-dec23.c
+++ b/drivers/media/video/pwc/pwc-dec23.c
@@ -916,27 +916,5 @@ void pwc_dec23_decompress(const struct pwc_device *pwc,
916 pout_planar_v += pwc->view.x; 916 pout_planar_v += pwc->view.x;
917 917
918 } 918 }
919
920 } 919 }
921
922} 920}
923
924void pwc_dec23_exit(void)
925{
926 /* Do nothing */
927
928}
929
930/**
931 * Allocate a private structure used by lookup table.
932 * You must call kfree() to free the memory allocated.
933 */
934int pwc_dec23_alloc(struct pwc_device *pwc)
935{
936 pwc->decompress_data = kmalloc(sizeof(struct pwc_dec23_private), GFP_KERNEL);
937 if (pwc->decompress_data == NULL)
938 return -ENOMEM;
939 return 0;
940}
941
942/* vim: set cino= formatoptions=croql cindent shiftwidth=8 tabstop=8: */
diff --git a/drivers/media/video/pwc/pwc-dec23.h b/drivers/media/video/pwc/pwc-dec23.h
index 1c55298ad153..a0ac4f3dff81 100644
--- a/drivers/media/video/pwc/pwc-dec23.h
+++ b/drivers/media/video/pwc/pwc-dec23.h
@@ -49,19 +49,9 @@ struct pwc_dec23_private
49 49
50}; 50};
51 51
52
53int pwc_dec23_alloc(struct pwc_device *pwc);
54int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd); 52int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd);
55void pwc_dec23_exit(void);
56void pwc_dec23_decompress(const struct pwc_device *pwc, 53void pwc_dec23_decompress(const struct pwc_device *pwc,
57 const void *src, 54 const void *src,
58 void *dst, 55 void *dst,
59 int flags); 56 int flags);
60
61
62
63#endif 57#endif
64
65
66/* vim: set cino= formatoptions=croql cindent shiftwidth=8 tabstop=8: */
67
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index b0bde5a87c8a..51ca3589b1b5 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -2,6 +2,7 @@
2 USB and Video4Linux interface part. 2 USB and Video4Linux interface part.
3 (C) 1999-2004 Nemosoft Unv. 3 (C) 1999-2004 Nemosoft Unv.
4 (C) 2004-2006 Luc Saillard (luc@saillard.org) 4 (C) 2004-2006 Luc Saillard (luc@saillard.org)
5 (C) 2011 Hans de Goede <hdegoede@redhat.com>
5 6
6 NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx 7 NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
7 driver and thus may have bugs that are not present in the original version. 8 driver and thus may have bugs that are not present in the original version.
@@ -74,7 +75,6 @@
74#include "pwc-timon.h" 75#include "pwc-timon.h"
75#include "pwc-dec23.h" 76#include "pwc-dec23.h"
76#include "pwc-dec1.h" 77#include "pwc-dec1.h"
77#include "pwc-uncompress.h"
78 78
79/* Function prototypes and driver templates */ 79/* Function prototypes and driver templates */
80 80
@@ -116,6 +116,7 @@ MODULE_DEVICE_TABLE(usb, pwc_device_table);
116 116
117static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id *id); 117static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id *id);
118static void usb_pwc_disconnect(struct usb_interface *intf); 118static void usb_pwc_disconnect(struct usb_interface *intf);
119static void pwc_isoc_cleanup(struct pwc_device *pdev);
119 120
120static struct usb_driver pwc_driver = { 121static struct usb_driver pwc_driver = {
121 .name = "Philips webcam", /* name */ 122 .name = "Philips webcam", /* name */
@@ -127,14 +128,11 @@ static struct usb_driver pwc_driver = {
127#define MAX_DEV_HINTS 20 128#define MAX_DEV_HINTS 20
128#define MAX_ISOC_ERRORS 20 129#define MAX_ISOC_ERRORS 20
129 130
130static int default_size = PSZ_QCIF;
131static int default_fps = 10; 131static int default_fps = 10;
132static int default_fbufs = 3; /* Default number of frame buffers */
133 int pwc_mbufs = 2; /* Default number of mmap() buffers */
134#ifdef CONFIG_USB_PWC_DEBUG 132#ifdef CONFIG_USB_PWC_DEBUG
135 int pwc_trace = PWC_DEBUG_LEVEL; 133 int pwc_trace = PWC_DEBUG_LEVEL;
136#endif 134#endif
137static int power_save; 135static int power_save = -1;
138static int led_on = 100, led_off; /* defaults to LED that is on while in use */ 136static int led_on = 100, led_off; /* defaults to LED that is on while in use */
139static int pwc_preferred_compression = 1; /* 0..3 = uncompressed..high */ 137static int pwc_preferred_compression = 1; /* 0..3 = uncompressed..high */
140static struct { 138static struct {
@@ -173,389 +171,20 @@ static struct video_device pwc_template = {
173/***************************************************************************/ 171/***************************************************************************/
174/* Private functions */ 172/* Private functions */
175 173
176/* Here we want the physical address of the memory. 174struct pwc_frame_buf *pwc_get_next_fill_buf(struct pwc_device *pdev)
177 * This is used when initializing the contents of the area.
178 */
179
180
181
182static void *pwc_rvmalloc(unsigned long size)
183{
184 void * mem;
185 unsigned long adr;
186
187 mem=vmalloc_32(size);
188 if (!mem)
189 return NULL;
190
191 memset(mem, 0, size); /* Clear the ram out, no junk to the user */
192 adr=(unsigned long) mem;
193 while (size > 0)
194 {
195 SetPageReserved(vmalloc_to_page((void *)adr));
196 adr += PAGE_SIZE;
197 size -= PAGE_SIZE;
198 }
199 return mem;
200}
201
202static void pwc_rvfree(void * mem, unsigned long size)
203{
204 unsigned long adr;
205
206 if (!mem)
207 return;
208
209 adr=(unsigned long) mem;
210 while ((long) size > 0)
211 {
212 ClearPageReserved(vmalloc_to_page((void *)adr));
213 adr += PAGE_SIZE;
214 size -= PAGE_SIZE;
215 }
216 vfree(mem);
217}
218
219
220
221
222static int pwc_allocate_buffers(struct pwc_device *pdev)
223{
224 int i, err;
225 void *kbuf;
226
227 PWC_DEBUG_MEMORY(">> pwc_allocate_buffers(pdev = 0x%p)\n", pdev);
228
229 if (pdev == NULL)
230 return -ENXIO;
231
232 /* Allocate Isochronuous pipe buffers */
233 for (i = 0; i < MAX_ISO_BUFS; i++) {
234 if (pdev->sbuf[i].data == NULL) {
235 kbuf = kzalloc(ISO_BUFFER_SIZE, GFP_KERNEL);
236 if (kbuf == NULL) {
237 PWC_ERROR("Failed to allocate iso buffer %d.\n", i);
238 return -ENOMEM;
239 }
240 PWC_DEBUG_MEMORY("Allocated iso buffer at %p.\n", kbuf);
241 pdev->sbuf[i].data = kbuf;
242 }
243 }
244
245 /* Allocate frame buffer structure */
246 if (pdev->fbuf == NULL) {
247 kbuf = kzalloc(default_fbufs * sizeof(struct pwc_frame_buf), GFP_KERNEL);
248 if (kbuf == NULL) {
249 PWC_ERROR("Failed to allocate frame buffer structure.\n");
250 return -ENOMEM;
251 }
252 PWC_DEBUG_MEMORY("Allocated frame buffer structure at %p.\n", kbuf);
253 pdev->fbuf = kbuf;
254 }
255
256 /* create frame buffers, and make circular ring */
257 for (i = 0; i < default_fbufs; i++) {
258 if (pdev->fbuf[i].data == NULL) {
259 kbuf = vzalloc(PWC_FRAME_SIZE); /* need vmalloc since frame buffer > 128K */
260 if (kbuf == NULL) {
261 PWC_ERROR("Failed to allocate frame buffer %d.\n", i);
262 return -ENOMEM;
263 }
264 PWC_DEBUG_MEMORY("Allocated frame buffer %d at %p.\n", i, kbuf);
265 pdev->fbuf[i].data = kbuf;
266 }
267 }
268
269 /* Allocate decompressor table space */
270 if (DEVICE_USE_CODEC1(pdev->type))
271 err = pwc_dec1_alloc(pdev);
272 else
273 err = pwc_dec23_alloc(pdev);
274
275 if (err) {
276 PWC_ERROR("Failed to allocate decompress table.\n");
277 return err;
278 }
279
280 /* Allocate image buffer; double buffer for mmap() */
281 kbuf = pwc_rvmalloc(pwc_mbufs * pdev->len_per_image);
282 if (kbuf == NULL) {
283 PWC_ERROR("Failed to allocate image buffer(s). needed (%d)\n",
284 pwc_mbufs * pdev->len_per_image);
285 return -ENOMEM;
286 }
287 PWC_DEBUG_MEMORY("Allocated image buffer at %p.\n", kbuf);
288 pdev->image_data = kbuf;
289 for (i = 0; i < pwc_mbufs; i++) {
290 pdev->images[i].offset = i * pdev->len_per_image;
291 pdev->images[i].vma_use_count = 0;
292 }
293 for (; i < MAX_IMAGES; i++) {
294 pdev->images[i].offset = 0;
295 }
296
297 kbuf = NULL;
298
299 PWC_DEBUG_MEMORY("<< pwc_allocate_buffers()\n");
300 return 0;
301}
302
303static void pwc_free_buffers(struct pwc_device *pdev)
304{
305 int i;
306
307 PWC_DEBUG_MEMORY("Entering free_buffers(%p).\n", pdev);
308
309 if (pdev == NULL)
310 return;
311 /* Release Iso-pipe buffers */
312 for (i = 0; i < MAX_ISO_BUFS; i++)
313 if (pdev->sbuf[i].data != NULL) {
314 PWC_DEBUG_MEMORY("Freeing ISO buffer at %p.\n", pdev->sbuf[i].data);
315 kfree(pdev->sbuf[i].data);
316 pdev->sbuf[i].data = NULL;
317 }
318
319 /* The same for frame buffers */
320 if (pdev->fbuf != NULL) {
321 for (i = 0; i < default_fbufs; i++) {
322 if (pdev->fbuf[i].data != NULL) {
323 PWC_DEBUG_MEMORY("Freeing frame buffer %d at %p.\n", i, pdev->fbuf[i].data);
324 vfree(pdev->fbuf[i].data);
325 pdev->fbuf[i].data = NULL;
326 }
327 }
328 kfree(pdev->fbuf);
329 pdev->fbuf = NULL;
330 }
331
332 /* Intermediate decompression buffer & tables */
333 if (pdev->decompress_data != NULL) {
334 PWC_DEBUG_MEMORY("Freeing decompression buffer at %p.\n", pdev->decompress_data);
335 kfree(pdev->decompress_data);
336 pdev->decompress_data = NULL;
337 }
338
339 /* Release image buffers */
340 if (pdev->image_data != NULL) {
341 PWC_DEBUG_MEMORY("Freeing image buffer at %p.\n", pdev->image_data);
342 pwc_rvfree(pdev->image_data, pwc_mbufs * pdev->len_per_image);
343 }
344 pdev->image_data = NULL;
345
346 PWC_DEBUG_MEMORY("Leaving free_buffers().\n");
347}
348
349/* The frame & image buffer mess.
350
351 Yes, this is a mess. Well, it used to be simple, but alas... In this
352 module, 3 buffers schemes are used to get the data from the USB bus to
353 the user program. The first scheme involves the ISO buffers (called thus
354 since they transport ISO data from the USB controller), and not really
355 interesting. Suffices to say the data from this buffer is quickly
356 gathered in an interrupt handler (pwc_isoc_handler) and placed into the
357 frame buffer.
358
359 The frame buffer is the second scheme, and is the central element here.
360 It collects the data from a single frame from the camera (hence, the
361 name). Frames are delimited by the USB camera with a short USB packet,
362 so that's easy to detect. The frame buffers form a list that is filled
363 by the camera+USB controller and drained by the user process through
364 either read() or mmap().
365
366 The image buffer is the third scheme, in which frames are decompressed
367 and converted into planar format. For mmap() there is more than
368 one image buffer available.
369
370 The frame buffers provide the image buffering. In case the user process
371 is a bit slow, this introduces lag and some undesired side-effects.
372 The problem arises when the frame buffer is full. I used to drop the last
373 frame, which makes the data in the queue stale very quickly. But dropping
374 the frame at the head of the queue proved to be a litte bit more difficult.
375 I tried a circular linked scheme, but this introduced more problems than
376 it solved.
377
378 Because filling and draining are completely asynchronous processes, this
379 requires some fiddling with pointers and mutexes.
380
381 Eventually, I came up with a system with 2 lists: an 'empty' frame list
382 and a 'full' frame list:
383 * Initially, all frame buffers but one are on the 'empty' list; the one
384 remaining buffer is our initial fill frame.
385 * If a frame is needed for filling, we try to take it from the 'empty'
386 list, unless that list is empty, in which case we take the buffer at
387 the head of the 'full' list.
388 * When our fill buffer has been filled, it is appended to the 'full'
389 list.
390 * If a frame is needed by read() or mmap(), it is taken from the head of
391 the 'full' list, handled, and then appended to the 'empty' list. If no
392 buffer is present on the 'full' list, we wait.
393 The advantage is that the buffer that is currently being decompressed/
394 converted, is on neither list, and thus not in our way (any other scheme
395 I tried had the problem of old data lingering in the queue).
396
397 Whatever strategy you choose, it always remains a tradeoff: with more
398 frame buffers the chances of a missed frame are reduced. On the other
399 hand, on slower machines it introduces lag because the queue will
400 always be full.
401 */
402
403/**
404 \brief Find next frame buffer to fill. Take from empty or full list, whichever comes first.
405 */
406static int pwc_next_fill_frame(struct pwc_device *pdev)
407{
408 int ret;
409 unsigned long flags;
410
411 ret = 0;
412 spin_lock_irqsave(&pdev->ptrlock, flags);
413 if (pdev->fill_frame != NULL) {
414 /* append to 'full' list */
415 if (pdev->full_frames == NULL) {
416 pdev->full_frames = pdev->fill_frame;
417 pdev->full_frames_tail = pdev->full_frames;
418 }
419 else {
420 pdev->full_frames_tail->next = pdev->fill_frame;
421 pdev->full_frames_tail = pdev->fill_frame;
422 }
423 }
424 if (pdev->empty_frames != NULL) {
425 /* We have empty frames available. That's easy */
426 pdev->fill_frame = pdev->empty_frames;
427 pdev->empty_frames = pdev->empty_frames->next;
428 }
429 else {
430 /* Hmm. Take it from the full list */
431 /* sanity check */
432 if (pdev->full_frames == NULL) {
433 PWC_ERROR("Neither empty or full frames available!\n");
434 spin_unlock_irqrestore(&pdev->ptrlock, flags);
435 return -EINVAL;
436 }
437 pdev->fill_frame = pdev->full_frames;
438 pdev->full_frames = pdev->full_frames->next;
439 ret = 1;
440 }
441 pdev->fill_frame->next = NULL;
442 spin_unlock_irqrestore(&pdev->ptrlock, flags);
443 return ret;
444}
445
446
447/**
448 \brief Reset all buffers, pointers and lists, except for the image_used[] buffer.
449
450 If the image_used[] buffer is cleared too, mmap()/VIDIOCSYNC will run into trouble.
451 */
452static void pwc_reset_buffers(struct pwc_device *pdev)
453{
454 int i;
455 unsigned long flags;
456
457 PWC_DEBUG_MEMORY(">> %s __enter__\n", __func__);
458
459 spin_lock_irqsave(&pdev->ptrlock, flags);
460 pdev->full_frames = NULL;
461 pdev->full_frames_tail = NULL;
462 for (i = 0; i < default_fbufs; i++) {
463 pdev->fbuf[i].filled = 0;
464 if (i > 0)
465 pdev->fbuf[i].next = &pdev->fbuf[i - 1];
466 else
467 pdev->fbuf->next = NULL;
468 }
469 pdev->empty_frames = &pdev->fbuf[default_fbufs - 1];
470 pdev->empty_frames_tail = pdev->fbuf;
471 pdev->read_frame = NULL;
472 pdev->fill_frame = pdev->empty_frames;
473 pdev->empty_frames = pdev->empty_frames->next;
474
475 pdev->image_read_pos = 0;
476 pdev->fill_image = 0;
477 spin_unlock_irqrestore(&pdev->ptrlock, flags);
478
479 PWC_DEBUG_MEMORY("<< %s __leaving__\n", __func__);
480}
481
482
483/**
484 \brief Do all the handling for getting one frame: get pointer, decompress, advance pointers.
485 */
486int pwc_handle_frame(struct pwc_device *pdev)
487{ 175{
488 int ret = 0; 176 unsigned long flags = 0;
489 unsigned long flags; 177 struct pwc_frame_buf *buf = NULL;
490 178
491 spin_lock_irqsave(&pdev->ptrlock, flags); 179 spin_lock_irqsave(&pdev->queued_bufs_lock, flags);
492 /* First grab our read_frame; this is removed from all lists, so 180 if (list_empty(&pdev->queued_bufs))
493 we can release the lock after this without problems */ 181 goto leave;
494 if (pdev->read_frame != NULL) { 182
495 /* This can't theoretically happen */ 183 buf = list_entry(pdev->queued_bufs.next, struct pwc_frame_buf, list);
496 PWC_ERROR("Huh? Read frame still in use?\n"); 184 list_del(&buf->list);
497 spin_unlock_irqrestore(&pdev->ptrlock, flags); 185leave:
498 return ret; 186 spin_unlock_irqrestore(&pdev->queued_bufs_lock, flags);
499 } 187 return buf;
500
501
502 if (pdev->full_frames == NULL) {
503 PWC_ERROR("Woops. No frames ready.\n");
504 }
505 else {
506 pdev->read_frame = pdev->full_frames;
507 pdev->full_frames = pdev->full_frames->next;
508 pdev->read_frame->next = NULL;
509 }
510
511 if (pdev->read_frame != NULL) {
512 /* Decompression is a lengthy process, so it's outside of the lock.
513 This gives the isoc_handler the opportunity to fill more frames
514 in the mean time.
515 */
516 spin_unlock_irqrestore(&pdev->ptrlock, flags);
517 ret = pwc_decompress(pdev);
518 spin_lock_irqsave(&pdev->ptrlock, flags);
519
520 /* We're done with read_buffer, tack it to the end of the empty buffer list */
521 if (pdev->empty_frames == NULL) {
522 pdev->empty_frames = pdev->read_frame;
523 pdev->empty_frames_tail = pdev->empty_frames;
524 }
525 else {
526 pdev->empty_frames_tail->next = pdev->read_frame;
527 pdev->empty_frames_tail = pdev->read_frame;
528 }
529 pdev->read_frame = NULL;
530 }
531 spin_unlock_irqrestore(&pdev->ptrlock, flags);
532 return ret;
533}
534
535/**
536 \brief Advance pointers of image buffer (after each user request)
537*/
538void pwc_next_image(struct pwc_device *pdev)
539{
540 pdev->image_used[pdev->fill_image] = 0;
541 pdev->fill_image = (pdev->fill_image + 1) % pwc_mbufs;
542}
543
544/**
545 * Print debug information when a frame is discarded because all of our buffer
546 * is full
547 */
548static void pwc_frame_dumped(struct pwc_device *pdev)
549{
550 pdev->vframes_dumped++;
551 if (pdev->vframe_count < FRAME_LOWMARK)
552 return;
553
554 if (pdev->vframes_dumped < 20)
555 PWC_DEBUG_FLOW("Dumping frame %d\n", pdev->vframe_count);
556 else if (pdev->vframes_dumped == 20)
557 PWC_DEBUG_FLOW("Dumping frame %d (last message)\n",
558 pdev->vframe_count);
559} 188}
560 189
561static void pwc_snapshot_button(struct pwc_device *pdev, int down) 190static void pwc_snapshot_button(struct pwc_device *pdev, int down)
@@ -575,9 +204,9 @@ static void pwc_snapshot_button(struct pwc_device *pdev, int down)
575#endif 204#endif
576} 205}
577 206
578static int pwc_rcv_short_packet(struct pwc_device *pdev, const struct pwc_frame_buf *fbuf) 207static void pwc_frame_complete(struct pwc_device *pdev)
579{ 208{
580 int awake = 0; 209 struct pwc_frame_buf *fbuf = pdev->fill_buf;
581 210
582 /* The ToUCam Fun CMOS sensor causes the firmware to send 2 or 3 bogus 211 /* The ToUCam Fun CMOS sensor causes the firmware to send 2 or 3 bogus
583 frames on the USB wire after an exposure change. This conditition is 212 frames on the USB wire after an exposure change. This conditition is
@@ -589,7 +218,6 @@ static int pwc_rcv_short_packet(struct pwc_device *pdev, const struct pwc_frame_
589 if (ptr[1] == 1 && ptr[0] & 0x10) { 218 if (ptr[1] == 1 && ptr[0] & 0x10) {
590 PWC_TRACE("Hyundai CMOS sensor bug. Dropping frame.\n"); 219 PWC_TRACE("Hyundai CMOS sensor bug. Dropping frame.\n");
591 pdev->drop_frames += 2; 220 pdev->drop_frames += 2;
592 pdev->vframes_error++;
593 } 221 }
594 if ((ptr[0] ^ pdev->vmirror) & 0x01) { 222 if ((ptr[0] ^ pdev->vmirror) & 0x01) {
595 pwc_snapshot_button(pdev, ptr[0] & 0x01); 223 pwc_snapshot_button(pdev, ptr[0] & 0x01);
@@ -612,8 +240,7 @@ static int pwc_rcv_short_packet(struct pwc_device *pdev, const struct pwc_frame_
612 */ 240 */
613 if (fbuf->filled == 4) 241 if (fbuf->filled == 4)
614 pdev->drop_frames++; 242 pdev->drop_frames++;
615 } 243 } else if (pdev->type == 740 || pdev->type == 720) {
616 else if (pdev->type == 740 || pdev->type == 720) {
617 unsigned char *ptr = (unsigned char *)fbuf->data; 244 unsigned char *ptr = (unsigned char *)fbuf->data;
618 if ((ptr[0] ^ pdev->vmirror) & 0x01) { 245 if ((ptr[0] ^ pdev->vmirror) & 0x01) {
619 pwc_snapshot_button(pdev, ptr[0] & 0x01); 246 pwc_snapshot_button(pdev, ptr[0] & 0x01);
@@ -621,33 +248,23 @@ static int pwc_rcv_short_packet(struct pwc_device *pdev, const struct pwc_frame_
621 pdev->vmirror = ptr[0] & 0x03; 248 pdev->vmirror = ptr[0] & 0x03;
622 } 249 }
623 250
624 /* In case we were instructed to drop the frame, do so silently. 251 /* In case we were instructed to drop the frame, do so silently. */
625 The buffer pointers are not updated either (but the counters are reset below). 252 if (pdev->drop_frames > 0) {
626 */
627 if (pdev->drop_frames > 0)
628 pdev->drop_frames--; 253 pdev->drop_frames--;
629 else { 254 } else {
630 /* Check for underflow first */ 255 /* Check for underflow first */
631 if (fbuf->filled < pdev->frame_total_size) { 256 if (fbuf->filled < pdev->frame_total_size) {
632 PWC_DEBUG_FLOW("Frame buffer underflow (%d bytes);" 257 PWC_DEBUG_FLOW("Frame buffer underflow (%d bytes);"
633 " discarded.\n", fbuf->filled); 258 " discarded.\n", fbuf->filled);
634 pdev->vframes_error++; 259 } else {
635 } 260 fbuf->vb.v4l2_buf.field = V4L2_FIELD_NONE;
636 else { 261 fbuf->vb.v4l2_buf.sequence = pdev->vframe_count;
637 /* Send only once per EOF */ 262 vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE);
638 awake = 1; /* delay wake_ups */ 263 pdev->fill_buf = NULL;
639 264 pdev->vsync = 0;
640 /* Find our next frame to fill. This will always succeed, since we
641 * nick a frame from either empty or full list, but if we had to
642 * take it from the full list, it means a frame got dropped.
643 */
644 if (pwc_next_fill_frame(pdev))
645 pwc_frame_dumped(pdev);
646
647 } 265 }
648 } /* !drop_frames */ 266 } /* !drop_frames */
649 pdev->vframe_count++; 267 pdev->vframe_count++;
650 return awake;
651} 268}
652 269
653/* This gets called for the Isochronous pipe (video). This is done in 270/* This gets called for the Isochronous pipe (video). This is done in
@@ -655,24 +272,20 @@ static int pwc_rcv_short_packet(struct pwc_device *pdev, const struct pwc_frame_
655 */ 272 */
656static void pwc_isoc_handler(struct urb *urb) 273static void pwc_isoc_handler(struct urb *urb)
657{ 274{
658 struct pwc_device *pdev; 275 struct pwc_device *pdev = (struct pwc_device *)urb->context;
659 int i, fst, flen; 276 int i, fst, flen;
660 int awake; 277 unsigned char *iso_buf = NULL;
661 struct pwc_frame_buf *fbuf;
662 unsigned char *fillptr = NULL, *iso_buf = NULL;
663 278
664 awake = 0; 279 if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
665 pdev = (struct pwc_device *)urb->context; 280 urb->status == -ESHUTDOWN) {
666 if (pdev == NULL) {
667 PWC_ERROR("isoc_handler() called with NULL device?!\n");
668 return;
669 }
670
671 if (urb->status == -ENOENT || urb->status == -ECONNRESET) {
672 PWC_DEBUG_OPEN("URB (%p) unlinked %ssynchronuously.\n", urb, urb->status == -ENOENT ? "" : "a"); 281 PWC_DEBUG_OPEN("URB (%p) unlinked %ssynchronuously.\n", urb, urb->status == -ENOENT ? "" : "a");
673 return; 282 return;
674 } 283 }
675 if (urb->status != -EINPROGRESS && urb->status != 0) { 284
285 if (pdev->fill_buf == NULL)
286 pdev->fill_buf = pwc_get_next_fill_buf(pdev);
287
288 if (urb->status != 0) {
676 const char *errmsg; 289 const char *errmsg;
677 290
678 errmsg = "Unknown"; 291 errmsg = "Unknown";
@@ -684,29 +297,21 @@ static void pwc_isoc_handler(struct urb *urb)
684 case -EILSEQ: errmsg = "CRC/Timeout (could be anything)"; break; 297 case -EILSEQ: errmsg = "CRC/Timeout (could be anything)"; break;
685 case -ETIME: errmsg = "Device does not respond"; break; 298 case -ETIME: errmsg = "Device does not respond"; break;
686 } 299 }
687 PWC_DEBUG_FLOW("pwc_isoc_handler() called with status %d [%s].\n", urb->status, errmsg); 300 PWC_ERROR("pwc_isoc_handler() called with status %d [%s].\n",
688 /* Give up after a number of contiguous errors on the USB bus. 301 urb->status, errmsg);
689 Appearantly something is wrong so we simulate an unplug event. 302 /* Give up after a number of contiguous errors */
690 */
691 if (++pdev->visoc_errors > MAX_ISOC_ERRORS) 303 if (++pdev->visoc_errors > MAX_ISOC_ERRORS)
692 { 304 {
693 PWC_INFO("Too many ISOC errors, bailing out.\n"); 305 PWC_ERROR("Too many ISOC errors, bailing out.\n");
694 pdev->error_status = EIO; 306 if (pdev->fill_buf) {
695 awake = 1; 307 vb2_buffer_done(&pdev->fill_buf->vb,
696 wake_up_interruptible(&pdev->frameq); 308 VB2_BUF_STATE_ERROR);
309 pdev->fill_buf = NULL;
310 }
697 } 311 }
698 goto handler_end; // ugly, but practical 312 pdev->vsync = 0; /* Drop the current frame */
699 }
700
701 fbuf = pdev->fill_frame;
702 if (fbuf == NULL) {
703 PWC_ERROR("pwc_isoc_handler without valid fill frame.\n");
704 awake = 1;
705 goto handler_end; 313 goto handler_end;
706 } 314 }
707 else {
708 fillptr = fbuf->data + fbuf->filled;
709 }
710 315
711 /* Reset ISOC error counter. We did get here, after all. */ 316 /* Reset ISOC error counter. We did get here, after all. */
712 pdev->visoc_errors = 0; 317 pdev->visoc_errors = 0;
@@ -720,89 +325,73 @@ static void pwc_isoc_handler(struct urb *urb)
720 fst = urb->iso_frame_desc[i].status; 325 fst = urb->iso_frame_desc[i].status;
721 flen = urb->iso_frame_desc[i].actual_length; 326 flen = urb->iso_frame_desc[i].actual_length;
722 iso_buf = urb->transfer_buffer + urb->iso_frame_desc[i].offset; 327 iso_buf = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
723 if (fst == 0) { 328 if (fst != 0) {
724 if (flen > 0) { /* if valid data... */ 329 PWC_ERROR("Iso frame %d has error %d\n", i, fst);
725 if (pdev->vsync > 0) { /* ...and we are not sync-hunting... */ 330 continue;
726 pdev->vsync = 2; 331 }
727 332 if (flen > 0 && pdev->vsync) {
728 /* ...copy data to frame buffer, if possible */ 333 struct pwc_frame_buf *fbuf = pdev->fill_buf;
729 if (flen + fbuf->filled > pdev->frame_total_size) { 334
730 PWC_DEBUG_FLOW("Frame buffer overflow (flen = %d, frame_total_size = %d).\n", flen, pdev->frame_total_size); 335 if (pdev->vsync == 1) {
731 pdev->vsync = 0; /* Hmm, let's wait for an EOF (end-of-frame) */ 336 do_gettimeofday(&fbuf->vb.v4l2_buf.timestamp);
732 pdev->vframes_error++; 337 pdev->vsync = 2;
733 } 338 }
734 else { 339
735 memmove(fillptr, iso_buf, flen); 340 if (flen + fbuf->filled > pdev->frame_total_size) {
736 fillptr += flen; 341 PWC_ERROR("Frame overflow (%d > %d)\n",
737 } 342 flen + fbuf->filled,
738 } 343 pdev->frame_total_size);
344 pdev->vsync = 0; /* Let's wait for an EOF */
345 } else {
346 memcpy(fbuf->data + fbuf->filled, iso_buf,
347 flen);
739 fbuf->filled += flen; 348 fbuf->filled += flen;
740 } /* ..flen > 0 */ 349 }
741 350 }
742 if (flen < pdev->vlast_packet_size) { 351 if (flen < pdev->vlast_packet_size) {
743 /* Shorter packet... We probably have the end of an image-frame; 352 /* Shorter packet... end of frame */
744 wake up read() process and let select()/poll() do something. 353 if (pdev->vsync == 2)
745 Decompression is done in user time over there. 354 pwc_frame_complete(pdev);
746 */ 355 if (pdev->fill_buf == NULL)
747 if (pdev->vsync == 2) { 356 pdev->fill_buf = pwc_get_next_fill_buf(pdev);
748 if (pwc_rcv_short_packet(pdev, fbuf)) { 357 if (pdev->fill_buf) {
749 awake = 1; 358 pdev->fill_buf->filled = 0;
750 fbuf = pdev->fill_frame;
751 }
752 }
753 fbuf->filled = 0;
754 fillptr = fbuf->data;
755 pdev->vsync = 1; 359 pdev->vsync = 1;
756 } 360 }
757
758 pdev->vlast_packet_size = flen;
759 } /* ..status == 0 */
760 else {
761 /* This is normally not interesting to the user, unless
762 * you are really debugging something, default = 0 */
763 static int iso_error;
764 iso_error++;
765 if (iso_error < 20)
766 PWC_DEBUG_FLOW("Iso frame %d of USB has error %d\n", i, fst);
767 } 361 }
362 pdev->vlast_packet_size = flen;
768 } 363 }
769 364
770handler_end: 365handler_end:
771 if (awake)
772 wake_up_interruptible(&pdev->frameq);
773
774 urb->dev = pdev->udev;
775 i = usb_submit_urb(urb, GFP_ATOMIC); 366 i = usb_submit_urb(urb, GFP_ATOMIC);
776 if (i != 0) 367 if (i != 0)
777 PWC_ERROR("Error (%d) re-submitting urb in pwc_isoc_handler.\n", i); 368 PWC_ERROR("Error (%d) re-submitting urb in pwc_isoc_handler.\n", i);
778} 369}
779 370
780 371static int pwc_isoc_init(struct pwc_device *pdev)
781int pwc_isoc_init(struct pwc_device *pdev)
782{ 372{
783 struct usb_device *udev; 373 struct usb_device *udev;
784 struct urb *urb; 374 struct urb *urb;
785 int i, j, ret; 375 int i, j, ret;
786
787 struct usb_interface *intf; 376 struct usb_interface *intf;
788 struct usb_host_interface *idesc = NULL; 377 struct usb_host_interface *idesc = NULL;
789 378
790 if (pdev == NULL)
791 return -EFAULT;
792 if (pdev->iso_init) 379 if (pdev->iso_init)
793 return 0; 380 return 0;
381
794 pdev->vsync = 0; 382 pdev->vsync = 0;
383 pdev->vlast_packet_size = 0;
384 pdev->fill_buf = NULL;
385 pdev->vframe_count = 0;
386 pdev->visoc_errors = 0;
795 udev = pdev->udev; 387 udev = pdev->udev;
796 388
797 /* Get the current alternate interface, adjust packet size */ 389 /* Get the current alternate interface, adjust packet size */
798 if (!udev->actconfig)
799 return -EFAULT;
800 intf = usb_ifnum_to_if(udev, 0); 390 intf = usb_ifnum_to_if(udev, 0);
801 if (intf) 391 if (intf)
802 idesc = usb_altnum_to_altsetting(intf, pdev->valternate); 392 idesc = usb_altnum_to_altsetting(intf, pdev->valternate);
803
804 if (!idesc) 393 if (!idesc)
805 return -EFAULT; 394 return -EIO;
806 395
807 /* Search video endpoint */ 396 /* Search video endpoint */
808 pdev->vmax_packet_size = -1; 397 pdev->vmax_packet_size = -1;
@@ -825,34 +414,32 @@ int pwc_isoc_init(struct pwc_device *pdev)
825 if (ret < 0) 414 if (ret < 0)
826 return ret; 415 return ret;
827 416
417 /* Allocate and init Isochronuous urbs */
828 for (i = 0; i < MAX_ISO_BUFS; i++) { 418 for (i = 0; i < MAX_ISO_BUFS; i++) {
829 urb = usb_alloc_urb(ISO_FRAMES_PER_DESC, GFP_KERNEL); 419 urb = usb_alloc_urb(ISO_FRAMES_PER_DESC, GFP_KERNEL);
830 if (urb == NULL) { 420 if (urb == NULL) {
831 PWC_ERROR("Failed to allocate urb %d\n", i); 421 PWC_ERROR("Failed to allocate urb %d\n", i);
832 ret = -ENOMEM; 422 pdev->iso_init = 1;
833 break; 423 pwc_isoc_cleanup(pdev);
424 return -ENOMEM;
834 } 425 }
835 pdev->sbuf[i].urb = urb; 426 pdev->urbs[i] = urb;
836 PWC_DEBUG_MEMORY("Allocated URB at 0x%p\n", urb); 427 PWC_DEBUG_MEMORY("Allocated URB at 0x%p\n", urb);
837 }
838 if (ret) {
839 /* De-allocate in reverse order */
840 while (i--) {
841 usb_free_urb(pdev->sbuf[i].urb);
842 pdev->sbuf[i].urb = NULL;
843 }
844 return ret;
845 }
846
847 /* init URB structure */
848 for (i = 0; i < MAX_ISO_BUFS; i++) {
849 urb = pdev->sbuf[i].urb;
850 428
851 urb->interval = 1; // devik 429 urb->interval = 1; // devik
852 urb->dev = udev; 430 urb->dev = udev;
853 urb->pipe = usb_rcvisocpipe(udev, pdev->vendpoint); 431 urb->pipe = usb_rcvisocpipe(udev, pdev->vendpoint);
854 urb->transfer_flags = URB_ISO_ASAP; 432 urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
855 urb->transfer_buffer = pdev->sbuf[i].data; 433 urb->transfer_buffer = usb_alloc_coherent(udev,
434 ISO_BUFFER_SIZE,
435 GFP_KERNEL,
436 &urb->transfer_dma);
437 if (urb->transfer_buffer == NULL) {
438 PWC_ERROR("Failed to allocate urb buffer %d\n", i);
439 pdev->iso_init = 1;
440 pwc_isoc_cleanup(pdev);
441 return -ENOMEM;
442 }
856 urb->transfer_buffer_length = ISO_BUFFER_SIZE; 443 urb->transfer_buffer_length = ISO_BUFFER_SIZE;
857 urb->complete = pwc_isoc_handler; 444 urb->complete = pwc_isoc_handler;
858 urb->context = pdev; 445 urb->context = pdev;
@@ -866,14 +453,14 @@ int pwc_isoc_init(struct pwc_device *pdev)
866 453
867 /* link */ 454 /* link */
868 for (i = 0; i < MAX_ISO_BUFS; i++) { 455 for (i = 0; i < MAX_ISO_BUFS; i++) {
869 ret = usb_submit_urb(pdev->sbuf[i].urb, GFP_KERNEL); 456 ret = usb_submit_urb(pdev->urbs[i], GFP_KERNEL);
870 if (ret) { 457 if (ret) {
871 PWC_ERROR("isoc_init() submit_urb %d failed with error %d\n", i, ret); 458 PWC_ERROR("isoc_init() submit_urb %d failed with error %d\n", i, ret);
872 pdev->iso_init = 1; 459 pdev->iso_init = 1;
873 pwc_isoc_cleanup(pdev); 460 pwc_isoc_cleanup(pdev);
874 return ret; 461 return ret;
875 } 462 }
876 PWC_DEBUG_MEMORY("URB 0x%p submitted.\n", pdev->sbuf[i].urb); 463 PWC_DEBUG_MEMORY("URB 0x%p submitted.\n", pdev->urbs[i]);
877 } 464 }
878 465
879 /* All is done... */ 466 /* All is done... */
@@ -888,12 +475,9 @@ static void pwc_iso_stop(struct pwc_device *pdev)
888 475
889 /* Unlinking ISOC buffers one by one */ 476 /* Unlinking ISOC buffers one by one */
890 for (i = 0; i < MAX_ISO_BUFS; i++) { 477 for (i = 0; i < MAX_ISO_BUFS; i++) {
891 struct urb *urb; 478 if (pdev->urbs[i]) {
892 479 PWC_DEBUG_MEMORY("Unlinking URB %p\n", pdev->urbs[i]);
893 urb = pdev->sbuf[i].urb; 480 usb_kill_urb(pdev->urbs[i]);
894 if (urb) {
895 PWC_DEBUG_MEMORY("Unlinking URB %p\n", urb);
896 usb_kill_urb(urb);
897 } 481 }
898 } 482 }
899} 483}
@@ -904,40 +488,51 @@ static void pwc_iso_free(struct pwc_device *pdev)
904 488
905 /* Freeing ISOC buffers one by one */ 489 /* Freeing ISOC buffers one by one */
906 for (i = 0; i < MAX_ISO_BUFS; i++) { 490 for (i = 0; i < MAX_ISO_BUFS; i++) {
907 struct urb *urb; 491 if (pdev->urbs[i]) {
908
909 urb = pdev->sbuf[i].urb;
910 if (urb) {
911 PWC_DEBUG_MEMORY("Freeing URB\n"); 492 PWC_DEBUG_MEMORY("Freeing URB\n");
912 usb_free_urb(urb); 493 if (pdev->urbs[i]->transfer_buffer) {
913 pdev->sbuf[i].urb = NULL; 494 usb_free_coherent(pdev->udev,
495 pdev->urbs[i]->transfer_buffer_length,
496 pdev->urbs[i]->transfer_buffer,
497 pdev->urbs[i]->transfer_dma);
498 }
499 usb_free_urb(pdev->urbs[i]);
500 pdev->urbs[i] = NULL;
914 } 501 }
915 } 502 }
916} 503}
917 504
918void pwc_isoc_cleanup(struct pwc_device *pdev) 505static void pwc_isoc_cleanup(struct pwc_device *pdev)
919{ 506{
920 PWC_DEBUG_OPEN(">> pwc_isoc_cleanup()\n"); 507 PWC_DEBUG_OPEN(">> pwc_isoc_cleanup()\n");
921 if (pdev == NULL) 508
922 return;
923 if (pdev->iso_init == 0) 509 if (pdev->iso_init == 0)
924 return; 510 return;
925 511
926 pwc_iso_stop(pdev); 512 pwc_iso_stop(pdev);
927 pwc_iso_free(pdev); 513 pwc_iso_free(pdev);
928 514 usb_set_interface(pdev->udev, 0, 0);
929 /* Stop camera, but only if we are sure the camera is still there (unplug
930 is signalled by EPIPE)
931 */
932 if (pdev->error_status != EPIPE) {
933 PWC_DEBUG_OPEN("Setting alternate interface 0.\n");
934 usb_set_interface(pdev->udev, 0, 0);
935 }
936 515
937 pdev->iso_init = 0; 516 pdev->iso_init = 0;
938 PWC_DEBUG_OPEN("<< pwc_isoc_cleanup()\n"); 517 PWC_DEBUG_OPEN("<< pwc_isoc_cleanup()\n");
939} 518}
940 519
520/*
521 * Release all queued buffers, no need to take queued_bufs_lock, since all
522 * iso urbs have been killed when we're called so pwc_isoc_handler won't run.
523 */
524static void pwc_cleanup_queued_bufs(struct pwc_device *pdev)
525{
526 while (!list_empty(&pdev->queued_bufs)) {
527 struct pwc_frame_buf *buf;
528
529 buf = list_entry(pdev->queued_bufs.next, struct pwc_frame_buf,
530 list);
531 list_del(&buf->list);
532 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
533 }
534}
535
941/********* 536/*********
942 * sysfs 537 * sysfs
943 *********/ 538 *********/
@@ -1051,98 +646,15 @@ static const char *pwc_sensor_type_to_string(unsigned int sensor_type)
1051 646
1052static int pwc_video_open(struct file *file) 647static int pwc_video_open(struct file *file)
1053{ 648{
1054 int i, ret;
1055 struct video_device *vdev = video_devdata(file); 649 struct video_device *vdev = video_devdata(file);
1056 struct pwc_device *pdev; 650 struct pwc_device *pdev;
1057 651
1058 PWC_DEBUG_OPEN(">> video_open called(vdev = 0x%p).\n", vdev); 652 PWC_DEBUG_OPEN(">> video_open called(vdev = 0x%p).\n", vdev);
1059 653
1060 pdev = video_get_drvdata(vdev); 654 pdev = video_get_drvdata(vdev);
1061 BUG_ON(!pdev); 655 if (!pdev->udev)
1062 if (pdev->vopen) { 656 return -ENODEV;
1063 PWC_DEBUG_OPEN("I'm busy, someone is using the device.\n");
1064 return -EBUSY;
1065 }
1066
1067 pwc_construct(pdev); /* set min/max sizes correct */
1068 if (!pdev->usb_init) {
1069 PWC_DEBUG_OPEN("Doing first time initialization.\n");
1070 pdev->usb_init = 1;
1071
1072 /* Query sensor type */
1073 ret = pwc_get_cmos_sensor(pdev, &i);
1074 if (ret >= 0)
1075 {
1076 PWC_DEBUG_OPEN("This %s camera is equipped with a %s (%d).\n",
1077 pdev->vdev.name,
1078 pwc_sensor_type_to_string(i), i);
1079 }
1080 }
1081
1082 /* Turn on camera */
1083 if (power_save) {
1084 i = pwc_camera_power(pdev, 1);
1085 if (i < 0)
1086 PWC_DEBUG_OPEN("Failed to restore power to the camera! (%d)\n", i);
1087 }
1088 /* Set LED on/off time */
1089 if (pwc_set_leds(pdev, led_on, led_off) < 0)
1090 PWC_DEBUG_OPEN("Failed to set LED on/off time.\n");
1091
1092
1093 /* So far, so good. Allocate memory. */
1094 i = pwc_allocate_buffers(pdev);
1095 if (i < 0) {
1096 PWC_DEBUG_OPEN("Failed to allocate buffers memory.\n");
1097 pwc_free_buffers(pdev);
1098 return i;
1099 }
1100
1101 /* Reset buffers & parameters */
1102 pwc_reset_buffers(pdev);
1103 for (i = 0; i < pwc_mbufs; i++)
1104 pdev->image_used[i] = 0;
1105 pdev->vframe_count = 0;
1106 pdev->vframes_dumped = 0;
1107 pdev->vframes_error = 0;
1108 pdev->visoc_errors = 0;
1109 pdev->error_status = 0;
1110 pwc_construct(pdev); /* set min/max sizes correct */
1111
1112 /* Set some defaults */
1113 pdev->vsnapshot = 0;
1114
1115 /* Set video size, first try the last used video size
1116 (or the default one); if that fails try QCIF/10 or QSIF/10;
1117 it that fails too, give up.
1118 */
1119 i = pwc_set_video_mode(pdev, pwc_image_sizes[pdev->vsize].x, pwc_image_sizes[pdev->vsize].y, pdev->vframes, pdev->vcompression, 0);
1120 if (i) {
1121 unsigned int default_resolution;
1122 PWC_DEBUG_OPEN("First attempt at set_video_mode failed.\n");
1123 if (pdev->type>= 730)
1124 default_resolution = PSZ_QSIF;
1125 else
1126 default_resolution = PSZ_QCIF;
1127
1128 i = pwc_set_video_mode(pdev,
1129 pwc_image_sizes[default_resolution].x,
1130 pwc_image_sizes[default_resolution].y,
1131 10,
1132 pdev->vcompression,
1133 0);
1134 }
1135 if (i) {
1136 PWC_DEBUG_OPEN("Second attempt at set_video_mode failed.\n");
1137 pwc_free_buffers(pdev);
1138 return i;
1139 }
1140
1141 /* Initialize the webcam to sane value */
1142 pwc_set_brightness(pdev, 0x7fff);
1143 pwc_set_agc(pdev, 1, 0);
1144 657
1145 pdev->vopen++;
1146 file->private_data = vdev; 658 file->private_data = vdev;
1147 PWC_DEBUG_OPEN("<< video_open() returns 0.\n"); 659 PWC_DEBUG_OPEN("<< video_open() returns 0.\n");
1148 return 0; 660 return 0;
@@ -1158,239 +670,211 @@ static void pwc_video_release(struct video_device *vfd)
1158 if (device_hint[hint].pdev == pdev) 670 if (device_hint[hint].pdev == pdev)
1159 device_hint[hint].pdev = NULL; 671 device_hint[hint].pdev = NULL;
1160 672
673 /* Free intermediate decompression buffer & tables */
674 if (pdev->decompress_data != NULL) {
675 PWC_DEBUG_MEMORY("Freeing decompression buffer at %p.\n",
676 pdev->decompress_data);
677 kfree(pdev->decompress_data);
678 pdev->decompress_data = NULL;
679 }
680
681 v4l2_ctrl_handler_free(&pdev->ctrl_handler);
682
1161 kfree(pdev); 683 kfree(pdev);
1162} 684}
1163 685
1164/* Note that all cleanup is done in the reverse order as in _open */
1165static int pwc_video_close(struct file *file) 686static int pwc_video_close(struct file *file)
1166{ 687{
1167 struct video_device *vdev = file->private_data; 688 struct video_device *vdev = file->private_data;
1168 struct pwc_device *pdev; 689 struct pwc_device *pdev;
1169 int i;
1170 690
1171 PWC_DEBUG_OPEN(">> video_close called(vdev = 0x%p).\n", vdev); 691 PWC_DEBUG_OPEN(">> video_close called(vdev = 0x%p).\n", vdev);
1172 692
1173 pdev = video_get_drvdata(vdev); 693 pdev = video_get_drvdata(vdev);
1174 if (pdev->vopen == 0) 694 if (pdev->capt_file == file) {
1175 PWC_DEBUG_MODULE("video_close() called on closed device?\n"); 695 vb2_queue_release(&pdev->vb_queue);
1176 696 pdev->capt_file = NULL;
1177 /* Dump statistics, but only if a reasonable amount of frames were
1178 processed (to prevent endless log-entries in case of snap-shot
1179 programs)
1180 */
1181 if (pdev->vframe_count > 20)
1182 PWC_DEBUG_MODULE("Closing video device: %d frames received, dumped %d frames, %d frames with errors.\n", pdev->vframe_count, pdev->vframes_dumped, pdev->vframes_error);
1183
1184 if (DEVICE_USE_CODEC1(pdev->type))
1185 pwc_dec1_exit();
1186 else
1187 pwc_dec23_exit();
1188
1189 pwc_isoc_cleanup(pdev);
1190 pwc_free_buffers(pdev);
1191
1192 /* Turn off LEDS and power down camera, but only when not unplugged */
1193 if (!pdev->unplugged) {
1194 /* Turn LEDs off */
1195 if (pwc_set_leds(pdev, 0, 0) < 0)
1196 PWC_DEBUG_MODULE("Failed to set LED on/off time.\n");
1197 if (power_save) {
1198 i = pwc_camera_power(pdev, 0);
1199 if (i < 0)
1200 PWC_ERROR("Failed to power down camera (%d)\n", i);
1201 }
1202 pdev->vopen--;
1203 PWC_DEBUG_OPEN("<< video_close() vopen=%d\n", pdev->vopen);
1204 } 697 }
1205 698
699 PWC_DEBUG_OPEN("<< video_close()\n");
1206 return 0; 700 return 0;
1207} 701}
1208 702
1209/*
1210 * FIXME: what about two parallel reads ????
1211 * ANSWER: Not supported. You can't open the device more than once,
1212 despite what the V4L1 interface says. First, I don't see
1213 the need, second there's no mechanism of alerting the
1214 2nd/3rd/... process of events like changing image size.
1215 And I don't see the point of blocking that for the
1216 2nd/3rd/... process.
1217 In multi-threaded environments reading parallel from any
1218 device is tricky anyhow.
1219 */
1220
1221static ssize_t pwc_video_read(struct file *file, char __user *buf, 703static ssize_t pwc_video_read(struct file *file, char __user *buf,
1222 size_t count, loff_t *ppos) 704 size_t count, loff_t *ppos)
1223{ 705{
1224 struct video_device *vdev = file->private_data; 706 struct video_device *vdev = file->private_data;
1225 struct pwc_device *pdev; 707 struct pwc_device *pdev = video_get_drvdata(vdev);
1226 int noblock = file->f_flags & O_NONBLOCK;
1227 DECLARE_WAITQUEUE(wait, current);
1228 int bytes_to_read, rv = 0;
1229 void *image_buffer_addr;
1230
1231 PWC_DEBUG_READ("pwc_video_read(vdev=0x%p, buf=%p, count=%zd) called.\n",
1232 vdev, buf, count);
1233 if (vdev == NULL)
1234 return -EFAULT;
1235 pdev = video_get_drvdata(vdev);
1236 if (pdev == NULL)
1237 return -EFAULT;
1238 708
1239 if (pdev->error_status) { 709 if (!pdev->udev)
1240 rv = -pdev->error_status; /* Something happened, report what. */ 710 return -ENODEV;
1241 goto err_out;
1242 }
1243 711
1244 /* Start the stream (if not already started) */ 712 if (pdev->capt_file != NULL &&
1245 rv = pwc_isoc_init(pdev); 713 pdev->capt_file != file)
1246 if (rv) 714 return -EBUSY;
1247 goto err_out;
1248
1249 /* In case we're doing partial reads, we don't have to wait for a frame */
1250 if (pdev->image_read_pos == 0) {
1251 /* Do wait queueing according to the (doc)book */
1252 add_wait_queue(&pdev->frameq, &wait);
1253 while (pdev->full_frames == NULL) {
1254 /* Check for unplugged/etc. here */
1255 if (pdev->error_status) {
1256 remove_wait_queue(&pdev->frameq, &wait);
1257 set_current_state(TASK_RUNNING);
1258 rv = -pdev->error_status ;
1259 goto err_out;
1260 }
1261 if (noblock) {
1262 remove_wait_queue(&pdev->frameq, &wait);
1263 set_current_state(TASK_RUNNING);
1264 rv = -EWOULDBLOCK;
1265 goto err_out;
1266 }
1267 if (signal_pending(current)) {
1268 remove_wait_queue(&pdev->frameq, &wait);
1269 set_current_state(TASK_RUNNING);
1270 rv = -ERESTARTSYS;
1271 goto err_out;
1272 }
1273 mutex_unlock(&pdev->modlock);
1274 schedule();
1275 set_current_state(TASK_INTERRUPTIBLE);
1276 mutex_lock(&pdev->modlock);
1277 }
1278 remove_wait_queue(&pdev->frameq, &wait);
1279 set_current_state(TASK_RUNNING);
1280 715
1281 /* Decompress and release frame */ 716 pdev->capt_file = file;
1282 if (pwc_handle_frame(pdev)) {
1283 rv = -EFAULT;
1284 goto err_out;
1285 }
1286 }
1287 717
1288 PWC_DEBUG_READ("Copying data to user space.\n"); 718 return vb2_read(&pdev->vb_queue, buf, count, ppos,
1289 if (pdev->pixfmt != V4L2_PIX_FMT_YUV420) 719 file->f_flags & O_NONBLOCK);
1290 bytes_to_read = pdev->frame_size + sizeof(struct pwc_raw_frame);
1291 else
1292 bytes_to_read = pdev->view.size;
1293
1294 /* copy bytes to user space; we allow for partial reads */
1295 if (count + pdev->image_read_pos > bytes_to_read)
1296 count = bytes_to_read - pdev->image_read_pos;
1297 image_buffer_addr = pdev->image_data;
1298 image_buffer_addr += pdev->images[pdev->fill_image].offset;
1299 image_buffer_addr += pdev->image_read_pos;
1300 if (copy_to_user(buf, image_buffer_addr, count)) {
1301 rv = -EFAULT;
1302 goto err_out;
1303 }
1304 pdev->image_read_pos += count;
1305 if (pdev->image_read_pos >= bytes_to_read) { /* All data has been read */
1306 pdev->image_read_pos = 0;
1307 pwc_next_image(pdev);
1308 }
1309 return count;
1310err_out:
1311 return rv;
1312} 720}
1313 721
1314static unsigned int pwc_video_poll(struct file *file, poll_table *wait) 722static unsigned int pwc_video_poll(struct file *file, poll_table *wait)
1315{ 723{
1316 struct video_device *vdev = file->private_data; 724 struct video_device *vdev = file->private_data;
1317 struct pwc_device *pdev; 725 struct pwc_device *pdev = video_get_drvdata(vdev);
1318 int ret;
1319 726
1320 if (vdev == NULL) 727 if (!pdev->udev)
1321 return -EFAULT; 728 return POLL_ERR;
1322 pdev = video_get_drvdata(vdev);
1323 if (pdev == NULL)
1324 return -EFAULT;
1325 729
1326 /* Start the stream (if not already started) */ 730 return vb2_poll(&pdev->vb_queue, file, wait);
1327 ret = pwc_isoc_init(pdev); 731}
1328 if (ret) 732
1329 return ret; 733static int pwc_video_mmap(struct file *file, struct vm_area_struct *vma)
734{
735 struct video_device *vdev = file->private_data;
736 struct pwc_device *pdev = video_get_drvdata(vdev);
737
738 if (pdev->capt_file != file)
739 return -EBUSY;
740
741 return vb2_mmap(&pdev->vb_queue, vma);
742}
743
744/***************************************************************************/
745/* Videobuf2 operations */
746
747static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
748 unsigned int *nplanes, unsigned long sizes[],
749 void *alloc_ctxs[])
750{
751 struct pwc_device *pdev = vb2_get_drv_priv(vq);
752
753 if (*nbuffers < MIN_FRAMES)
754 *nbuffers = MIN_FRAMES;
755 else if (*nbuffers > MAX_FRAMES)
756 *nbuffers = MAX_FRAMES;
757
758 *nplanes = 1;
1330 759
1331 poll_wait(file, &pdev->frameq, wait); 760 sizes[0] = PAGE_ALIGN((pdev->abs_max.x * pdev->abs_max.y * 3) / 2);
1332 if (pdev->error_status)
1333 return POLLERR;
1334 if (pdev->full_frames != NULL) /* we have frames waiting */
1335 return (POLLIN | POLLRDNORM);
1336 761
1337 return 0; 762 return 0;
1338} 763}
1339 764
1340static int pwc_video_mmap(struct file *file, struct vm_area_struct *vma) 765static int buffer_init(struct vb2_buffer *vb)
1341{ 766{
1342 struct video_device *vdev = file->private_data; 767 struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb);
1343 struct pwc_device *pdev;
1344 unsigned long start;
1345 unsigned long size;
1346 unsigned long page, pos = 0;
1347 int index;
1348 768
1349 PWC_DEBUG_MEMORY(">> %s\n", __func__); 769 /* need vmalloc since frame buffer > 128K */
1350 pdev = video_get_drvdata(vdev); 770 buf->data = vzalloc(PWC_FRAME_SIZE);
1351 size = vma->vm_end - vma->vm_start; 771 if (buf->data == NULL)
1352 start = vma->vm_start; 772 return -ENOMEM;
1353 773
1354 /* Find the idx buffer for this mapping */ 774 return 0;
1355 for (index = 0; index < pwc_mbufs; index++) { 775}
1356 pos = pdev->images[index].offset; 776
1357 if ((pos>>PAGE_SHIFT) == vma->vm_pgoff) 777static int buffer_prepare(struct vb2_buffer *vb)
1358 break; 778{
779 struct pwc_device *pdev = vb2_get_drv_priv(vb->vb2_queue);
780
781 /* Don't allow queing new buffers after device disconnection */
782 if (!pdev->udev)
783 return -ENODEV;
784
785 return 0;
786}
787
788static int buffer_finish(struct vb2_buffer *vb)
789{
790 struct pwc_device *pdev = vb2_get_drv_priv(vb->vb2_queue);
791 struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb);
792
793 /*
794 * Application has called dqbuf and is getting back a buffer we've
795 * filled, take the pwc data we've stored in buf->data and decompress
796 * it into a usable format, storing the result in the vb2_buffer
797 */
798 return pwc_decompress(pdev, buf);
799}
800
801static void buffer_cleanup(struct vb2_buffer *vb)
802{
803 struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb);
804
805 vfree(buf->data);
806}
807
808static void buffer_queue(struct vb2_buffer *vb)
809{
810 struct pwc_device *pdev = vb2_get_drv_priv(vb->vb2_queue);
811 struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb);
812 unsigned long flags = 0;
813
814 spin_lock_irqsave(&pdev->queued_bufs_lock, flags);
815 list_add_tail(&buf->list, &pdev->queued_bufs);
816 spin_unlock_irqrestore(&pdev->queued_bufs_lock, flags);
817}
818
819static int start_streaming(struct vb2_queue *vq)
820{
821 struct pwc_device *pdev = vb2_get_drv_priv(vq);
822
823 if (!pdev->udev)
824 return -ENODEV;
825
826 /* Turn on camera and set LEDS on */
827 pwc_camera_power(pdev, 1);
828 if (pdev->power_save) {
829 /* Restore video mode */
830 pwc_set_video_mode(pdev, pdev->view.x, pdev->view.y,
831 pdev->vframes, pdev->vcompression,
832 pdev->vsnapshot);
1359 } 833 }
1360 if (index == MAX_IMAGES) 834 pwc_set_leds(pdev, led_on, led_off);
1361 return -EINVAL; 835
1362 if (index == 0) { 836 return pwc_isoc_init(pdev);
1363 /* 837}
1364 * Special case for v4l1. In v4l1, we map only one big buffer, 838
1365 * but in v4l2 each buffer is mapped 839static int stop_streaming(struct vb2_queue *vq)
1366 */ 840{
1367 unsigned long total_size; 841 struct pwc_device *pdev = vb2_get_drv_priv(vq);
1368 total_size = pwc_mbufs * pdev->len_per_image; 842
1369 if (size != pdev->len_per_image && size != total_size) { 843 if (pdev->udev) {
1370 PWC_ERROR("Wrong size (%lu) needed to be len_per_image=%d or total_size=%lu\n", 844 pwc_set_leds(pdev, 0, 0);
1371 size, pdev->len_per_image, total_size); 845 pwc_camera_power(pdev, 0);
1372 return -EINVAL; 846 pwc_isoc_cleanup(pdev);
1373 }
1374 } else if (size > pdev->len_per_image)
1375 return -EINVAL;
1376
1377 vma->vm_flags |= VM_IO; /* from 2.6.9-acX */
1378
1379 pos += (unsigned long)pdev->image_data;
1380 while (size > 0) {
1381 page = vmalloc_to_pfn((void *)pos);
1382 if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
1383 return -EAGAIN;
1384 start += PAGE_SIZE;
1385 pos += PAGE_SIZE;
1386 if (size > PAGE_SIZE)
1387 size -= PAGE_SIZE;
1388 else
1389 size = 0;
1390 } 847 }
848 pwc_cleanup_queued_bufs(pdev);
849
1391 return 0; 850 return 0;
1392} 851}
1393 852
853static void pwc_lock(struct vb2_queue *vq)
854{
855 struct pwc_device *pdev = vb2_get_drv_priv(vq);
856 mutex_lock(&pdev->modlock);
857}
858
859static void pwc_unlock(struct vb2_queue *vq)
860{
861 struct pwc_device *pdev = vb2_get_drv_priv(vq);
862 mutex_unlock(&pdev->modlock);
863}
864
865static struct vb2_ops pwc_vb_queue_ops = {
866 .queue_setup = queue_setup,
867 .buf_init = buffer_init,
868 .buf_prepare = buffer_prepare,
869 .buf_finish = buffer_finish,
870 .buf_cleanup = buffer_cleanup,
871 .buf_queue = buffer_queue,
872 .start_streaming = start_streaming,
873 .stop_streaming = stop_streaming,
874 .wait_prepare = pwc_unlock,
875 .wait_finish = pwc_lock,
876};
877
1394/***************************************************************************/ 878/***************************************************************************/
1395/* USB functions */ 879/* USB functions */
1396 880
@@ -1406,6 +890,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1406 int hint, rc; 890 int hint, rc;
1407 int features = 0; 891 int features = 0;
1408 int video_nr = -1; /* default: use next available device */ 892 int video_nr = -1; /* default: use next available device */
893 int my_power_save = power_save;
1409 char serial_number[30], *name; 894 char serial_number[30], *name;
1410 895
1411 vendor_id = le16_to_cpu(udev->descriptor.idVendor); 896 vendor_id = le16_to_cpu(udev->descriptor.idVendor);
@@ -1513,6 +998,8 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1513 PWC_INFO("Logitech QuickCam 4000 Pro USB webcam detected.\n"); 998 PWC_INFO("Logitech QuickCam 4000 Pro USB webcam detected.\n");
1514 name = "Logitech QuickCam Pro 4000"; 999 name = "Logitech QuickCam Pro 4000";
1515 type_id = 740; /* CCD sensor */ 1000 type_id = 740; /* CCD sensor */
1001 if (my_power_save == -1)
1002 my_power_save = 1;
1516 break; 1003 break;
1517 case 0x08b3: 1004 case 0x08b3:
1518 PWC_INFO("Logitech QuickCam Zoom USB webcam detected.\n"); 1005 PWC_INFO("Logitech QuickCam Zoom USB webcam detected.\n");
@@ -1523,12 +1010,15 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1523 PWC_INFO("Logitech QuickCam Zoom (new model) USB webcam detected.\n"); 1010 PWC_INFO("Logitech QuickCam Zoom (new model) USB webcam detected.\n");
1524 name = "Logitech QuickCam Zoom"; 1011 name = "Logitech QuickCam Zoom";
1525 type_id = 740; /* CCD sensor */ 1012 type_id = 740; /* CCD sensor */
1526 power_save = 1; 1013 if (my_power_save == -1)
1014 my_power_save = 1;
1527 break; 1015 break;
1528 case 0x08b5: 1016 case 0x08b5:
1529 PWC_INFO("Logitech QuickCam Orbit/Sphere USB webcam detected.\n"); 1017 PWC_INFO("Logitech QuickCam Orbit/Sphere USB webcam detected.\n");
1530 name = "Logitech QuickCam Orbit"; 1018 name = "Logitech QuickCam Orbit";
1531 type_id = 740; /* CCD sensor */ 1019 type_id = 740; /* CCD sensor */
1020 if (my_power_save == -1)
1021 my_power_save = 1;
1532 features |= FEATURE_MOTOR_PANTILT; 1022 features |= FEATURE_MOTOR_PANTILT;
1533 break; 1023 break;
1534 case 0x08b6: 1024 case 0x08b6:
@@ -1583,6 +1073,8 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1583 PWC_INFO("Creative Labs Webcam 5 detected.\n"); 1073 PWC_INFO("Creative Labs Webcam 5 detected.\n");
1584 name = "Creative Labs Webcam 5"; 1074 name = "Creative Labs Webcam 5";
1585 type_id = 730; 1075 type_id = 730;
1076 if (my_power_save == -1)
1077 my_power_save = 1;
1586 break; 1078 break;
1587 case 0x4011: 1079 case 0x4011:
1588 PWC_INFO("Creative Labs Webcam Pro Ex detected.\n"); 1080 PWC_INFO("Creative Labs Webcam Pro Ex detected.\n");
@@ -1640,6 +1132,9 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1640 else 1132 else
1641 return -ENODEV; /* Not any of the know types; but the list keeps growing. */ 1133 return -ENODEV; /* Not any of the know types; but the list keeps growing. */
1642 1134
1135 if (my_power_save == -1)
1136 my_power_save = 0;
1137
1643 memset(serial_number, 0, 30); 1138 memset(serial_number, 0, 30);
1644 usb_string(udev, udev->descriptor.iSerialNumber, serial_number, 29); 1139 usb_string(udev, udev->descriptor.iSerialNumber, serial_number, 29);
1645 PWC_DEBUG_PROBE("Device serial number is %s\n", serial_number); 1140 PWC_DEBUG_PROBE("Device serial number is %s\n", serial_number);
@@ -1654,7 +1149,6 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1654 return -ENOMEM; 1149 return -ENOMEM;
1655 } 1150 }
1656 pdev->type = type_id; 1151 pdev->type = type_id;
1657 pdev->vsize = default_size;
1658 pdev->vframes = default_fps; 1152 pdev->vframes = default_fps;
1659 strcpy(pdev->serial, serial_number); 1153 strcpy(pdev->serial, serial_number);
1660 pdev->features = features; 1154 pdev->features = features;
@@ -1668,13 +1162,26 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1668 pdev->angle_range.tilt_min = -3000; 1162 pdev->angle_range.tilt_min = -3000;
1669 pdev->angle_range.tilt_max = 2500; 1163 pdev->angle_range.tilt_max = 2500;
1670 } 1164 }
1165 pwc_construct(pdev); /* set min/max sizes correct */
1671 1166
1672 mutex_init(&pdev->modlock); 1167 mutex_init(&pdev->modlock);
1673 spin_lock_init(&pdev->ptrlock); 1168 mutex_init(&pdev->udevlock);
1169 spin_lock_init(&pdev->queued_bufs_lock);
1170 INIT_LIST_HEAD(&pdev->queued_bufs);
1674 1171
1675 pdev->udev = udev; 1172 pdev->udev = udev;
1676 init_waitqueue_head(&pdev->frameq);
1677 pdev->vcompression = pwc_preferred_compression; 1173 pdev->vcompression = pwc_preferred_compression;
1174 pdev->power_save = my_power_save;
1175
1176 /* Init videobuf2 queue structure */
1177 memset(&pdev->vb_queue, 0, sizeof(pdev->vb_queue));
1178 pdev->vb_queue.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1179 pdev->vb_queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
1180 pdev->vb_queue.drv_priv = pdev;
1181 pdev->vb_queue.buf_struct_size = sizeof(struct pwc_frame_buf);
1182 pdev->vb_queue.ops = &pwc_vb_queue_ops;
1183 pdev->vb_queue.mem_ops = &vb2_vmalloc_memops;
1184 vb2_queue_init(&pdev->vb_queue);
1678 1185
1679 /* Init video_device structure */ 1186 /* Init video_device structure */
1680 memcpy(&pdev->vdev, &pwc_template, sizeof(pwc_template)); 1187 memcpy(&pdev->vdev, &pwc_template, sizeof(pwc_template));
@@ -1707,14 +1214,40 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1707 PWC_DEBUG_PROBE("probe() function returning struct at 0x%p.\n", pdev); 1214 PWC_DEBUG_PROBE("probe() function returning struct at 0x%p.\n", pdev);
1708 usb_set_intfdata(intf, pdev); 1215 usb_set_intfdata(intf, pdev);
1709 1216
1217#ifdef CONFIG_USB_PWC_DEBUG
1218 /* Query sensor type */
1219 if (pwc_get_cmos_sensor(pdev, &rc) >= 0) {
1220 PWC_DEBUG_OPEN("This %s camera is equipped with a %s (%d).\n",
1221 pdev->vdev.name,
1222 pwc_sensor_type_to_string(rc), rc);
1223 }
1224#endif
1225
1710 /* Set the leds off */ 1226 /* Set the leds off */
1711 pwc_set_leds(pdev, 0, 0); 1227 pwc_set_leds(pdev, 0, 0);
1228
1229 /* Setup intial videomode */
1230 rc = pwc_set_video_mode(pdev, pdev->view_max.x, pdev->view_max.y,
1231 pdev->vframes, pdev->vcompression, 0);
1232 if (rc)
1233 goto err_free_mem;
1234
1235 /* Register controls (and read default values from camera */
1236 rc = pwc_init_controls(pdev);
1237 if (rc) {
1238 PWC_ERROR("Failed to register v4l2 controls (%d).\n", rc);
1239 goto err_free_mem;
1240 }
1241
1242 pdev->vdev.ctrl_handler = &pdev->ctrl_handler;
1243
1244 /* And powerdown the camera until streaming starts */
1712 pwc_camera_power(pdev, 0); 1245 pwc_camera_power(pdev, 0);
1713 1246
1714 rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, video_nr); 1247 rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, video_nr);
1715 if (rc < 0) { 1248 if (rc < 0) {
1716 PWC_ERROR("Failed to register as video device (%d).\n", rc); 1249 PWC_ERROR("Failed to register as video device (%d).\n", rc);
1717 goto err_free_mem; 1250 goto err_free_controls;
1718 } 1251 }
1719 rc = pwc_create_sysfs_files(pdev); 1252 rc = pwc_create_sysfs_files(pdev);
1720 if (rc) 1253 if (rc)
@@ -1757,7 +1290,10 @@ err_video_unreg:
1757 if (hint < MAX_DEV_HINTS) 1290 if (hint < MAX_DEV_HINTS)
1758 device_hint[hint].pdev = NULL; 1291 device_hint[hint].pdev = NULL;
1759 video_unregister_device(&pdev->vdev); 1292 video_unregister_device(&pdev->vdev);
1293err_free_controls:
1294 v4l2_ctrl_handler_free(&pdev->ctrl_handler);
1760err_free_mem: 1295err_free_mem:
1296 usb_set_intfdata(intf, NULL);
1761 kfree(pdev); 1297 kfree(pdev);
1762 return rc; 1298 return rc;
1763} 1299}
@@ -1767,33 +1303,17 @@ static void usb_pwc_disconnect(struct usb_interface *intf)
1767{ 1303{
1768 struct pwc_device *pdev = usb_get_intfdata(intf); 1304 struct pwc_device *pdev = usb_get_intfdata(intf);
1769 1305
1306 mutex_lock(&pdev->udevlock);
1770 mutex_lock(&pdev->modlock); 1307 mutex_lock(&pdev->modlock);
1771 usb_set_intfdata (intf, NULL);
1772 if (pdev == NULL) {
1773 PWC_ERROR("pwc_disconnect() Called without private pointer.\n");
1774 goto disconnect_out;
1775 }
1776 if (pdev->udev == NULL) {
1777 PWC_ERROR("pwc_disconnect() already called for %p\n", pdev);
1778 goto disconnect_out;
1779 }
1780 if (pdev->udev != interface_to_usbdev(intf)) {
1781 PWC_ERROR("pwc_disconnect() Woops: pointer mismatch udev/pdev.\n");
1782 goto disconnect_out;
1783 }
1784
1785 /* We got unplugged; this is signalled by an EPIPE error code */
1786 pdev->error_status = EPIPE;
1787 pdev->unplugged = 1;
1788
1789 /* Alert waiting processes */
1790 wake_up_interruptible(&pdev->frameq);
1791 1308
1309 usb_set_intfdata(intf, NULL);
1792 /* No need to keep the urbs around after disconnection */ 1310 /* No need to keep the urbs around after disconnection */
1793 pwc_isoc_cleanup(pdev); 1311 pwc_isoc_cleanup(pdev);
1312 pwc_cleanup_queued_bufs(pdev);
1313 pdev->udev = NULL;
1794 1314
1795disconnect_out:
1796 mutex_unlock(&pdev->modlock); 1315 mutex_unlock(&pdev->modlock);
1316 mutex_unlock(&pdev->udevlock);
1797 1317
1798 pwc_remove_sysfs_files(pdev); 1318 pwc_remove_sysfs_files(pdev);
1799 video_unregister_device(&pdev->vdev); 1319 video_unregister_device(&pdev->vdev);
@@ -1809,36 +1329,27 @@ disconnect_out:
1809 * Initialization code & module stuff 1329 * Initialization code & module stuff
1810 */ 1330 */
1811 1331
1812static char *size;
1813static int fps; 1332static int fps;
1814static int fbufs;
1815static int mbufs;
1816static int compression = -1; 1333static int compression = -1;
1817static int leds[2] = { -1, -1 }; 1334static int leds[2] = { -1, -1 };
1818static unsigned int leds_nargs; 1335static unsigned int leds_nargs;
1819static char *dev_hint[MAX_DEV_HINTS]; 1336static char *dev_hint[MAX_DEV_HINTS];
1820static unsigned int dev_hint_nargs; 1337static unsigned int dev_hint_nargs;
1821 1338
1822module_param(size, charp, 0444);
1823module_param(fps, int, 0444); 1339module_param(fps, int, 0444);
1824module_param(fbufs, int, 0444);
1825module_param(mbufs, int, 0444);
1826#ifdef CONFIG_USB_PWC_DEBUG 1340#ifdef CONFIG_USB_PWC_DEBUG
1827module_param_named(trace, pwc_trace, int, 0644); 1341module_param_named(trace, pwc_trace, int, 0644);
1828#endif 1342#endif
1829module_param(power_save, int, 0444); 1343module_param(power_save, int, 0644);
1830module_param(compression, int, 0444); 1344module_param(compression, int, 0444);
1831module_param_array(leds, int, &leds_nargs, 0444); 1345module_param_array(leds, int, &leds_nargs, 0444);
1832module_param_array(dev_hint, charp, &dev_hint_nargs, 0444); 1346module_param_array(dev_hint, charp, &dev_hint_nargs, 0444);
1833 1347
1834MODULE_PARM_DESC(size, "Initial image size. One of sqcif, qsif, qcif, sif, cif, vga");
1835MODULE_PARM_DESC(fps, "Initial frames per second. Varies with model, useful range 5-30"); 1348MODULE_PARM_DESC(fps, "Initial frames per second. Varies with model, useful range 5-30");
1836MODULE_PARM_DESC(fbufs, "Number of internal frame buffers to reserve");
1837MODULE_PARM_DESC(mbufs, "Number of external (mmap()ed) image buffers");
1838#ifdef CONFIG_USB_PWC_DEBUG 1349#ifdef CONFIG_USB_PWC_DEBUG
1839MODULE_PARM_DESC(trace, "For debugging purposes"); 1350MODULE_PARM_DESC(trace, "For debugging purposes");
1840#endif 1351#endif
1841MODULE_PARM_DESC(power_save, "Turn power save feature in camera on or off"); 1352MODULE_PARM_DESC(power_save, "Turn power saving for new cameras on or off");
1842MODULE_PARM_DESC(compression, "Preferred compression quality. Range 0 (uncompressed) to 3 (high compression)"); 1353MODULE_PARM_DESC(compression, "Preferred compression quality. Range 0 (uncompressed) to 3 (high compression)");
1843MODULE_PARM_DESC(leds, "LED on,off time in milliseconds"); 1354MODULE_PARM_DESC(leds, "LED on,off time in milliseconds");
1844MODULE_PARM_DESC(dev_hint, "Device node hints"); 1355MODULE_PARM_DESC(dev_hint, "Device node hints");
@@ -1851,14 +1362,19 @@ MODULE_VERSION( PWC_VERSION );
1851 1362
1852static int __init usb_pwc_init(void) 1363static int __init usb_pwc_init(void)
1853{ 1364{
1854 int i, sz; 1365 int i;
1855 char *sizenames[PSZ_MAX] = { "sqcif", "qsif", "qcif", "sif", "cif", "vga" };
1856 1366
1367#ifdef CONFIG_USB_PWC_DEBUG
1857 PWC_INFO("Philips webcam module version " PWC_VERSION " loaded.\n"); 1368 PWC_INFO("Philips webcam module version " PWC_VERSION " loaded.\n");
1858 PWC_INFO("Supports Philips PCA645/646, PCVC675/680/690, PCVC720[40]/730/740/750 & PCVC830/840.\n"); 1369 PWC_INFO("Supports Philips PCA645/646, PCVC675/680/690, PCVC720[40]/730/740/750 & PCVC830/840.\n");
1859 PWC_INFO("Also supports the Askey VC010, various Logitech Quickcams, Samsung MPC-C10 and MPC-C30,\n"); 1370 PWC_INFO("Also supports the Askey VC010, various Logitech Quickcams, Samsung MPC-C10 and MPC-C30,\n");
1860 PWC_INFO("the Creative WebCam 5 & Pro Ex, SOTEC Afina Eye and Visionite VCS-UC300 and VCS-UM100.\n"); 1371 PWC_INFO("the Creative WebCam 5 & Pro Ex, SOTEC Afina Eye and Visionite VCS-UC300 and VCS-UM100.\n");
1861 1372
1373 if (pwc_trace >= 0) {
1374 PWC_DEBUG_MODULE("Trace options: 0x%04x\n", pwc_trace);
1375 }
1376#endif
1377
1862 if (fps) { 1378 if (fps) {
1863 if (fps < 4 || fps > 30) { 1379 if (fps < 4 || fps > 30) {
1864 PWC_ERROR("Framerate out of bounds (4-30).\n"); 1380 PWC_ERROR("Framerate out of bounds (4-30).\n");
@@ -1868,41 +1384,6 @@ static int __init usb_pwc_init(void)
1868 PWC_DEBUG_MODULE("Default framerate set to %d.\n", default_fps); 1384 PWC_DEBUG_MODULE("Default framerate set to %d.\n", default_fps);
1869 } 1385 }
1870 1386
1871 if (size) {
1872 /* string; try matching with array */
1873 for (sz = 0; sz < PSZ_MAX; sz++) {
1874 if (!strcmp(sizenames[sz], size)) { /* Found! */
1875 default_size = sz;
1876 break;
1877 }
1878 }
1879 if (sz == PSZ_MAX) {
1880 PWC_ERROR("Size not recognized; try size=[sqcif | qsif | qcif | sif | cif | vga].\n");
1881 return -EINVAL;
1882 }
1883 PWC_DEBUG_MODULE("Default image size set to %s [%dx%d].\n", sizenames[default_size], pwc_image_sizes[default_size].x, pwc_image_sizes[default_size].y);
1884 }
1885 if (mbufs) {
1886 if (mbufs < 1 || mbufs > MAX_IMAGES) {
1887 PWC_ERROR("Illegal number of mmap() buffers; use a number between 1 and %d.\n", MAX_IMAGES);
1888 return -EINVAL;
1889 }
1890 pwc_mbufs = mbufs;
1891 PWC_DEBUG_MODULE("Number of image buffers set to %d.\n", pwc_mbufs);
1892 }
1893 if (fbufs) {
1894 if (fbufs < 2 || fbufs > MAX_FRAMES) {
1895 PWC_ERROR("Illegal number of frame buffers; use a number between 2 and %d.\n", MAX_FRAMES);
1896 return -EINVAL;
1897 }
1898 default_fbufs = fbufs;
1899 PWC_DEBUG_MODULE("Number of frame buffers set to %d.\n", default_fbufs);
1900 }
1901#ifdef CONFIG_USB_PWC_DEBUG
1902 if (pwc_trace >= 0) {
1903 PWC_DEBUG_MODULE("Trace options: 0x%04x\n", pwc_trace);
1904 }
1905#endif
1906 if (compression >= 0) { 1387 if (compression >= 0) {
1907 if (compression > 3) { 1388 if (compression > 3) {
1908 PWC_ERROR("Invalid compression setting; use a number between 0 (uncompressed) and 3 (high).\n"); 1389 PWC_ERROR("Invalid compression setting; use a number between 0 (uncompressed) and 3 (high).\n");
@@ -1911,8 +1392,6 @@ static int __init usb_pwc_init(void)
1911 pwc_preferred_compression = compression; 1392 pwc_preferred_compression = compression;
1912 PWC_DEBUG_MODULE("Preferred compression set to %d.\n", pwc_preferred_compression); 1393 PWC_DEBUG_MODULE("Preferred compression set to %d.\n", pwc_preferred_compression);
1913 } 1394 }
1914 if (power_save)
1915 PWC_DEBUG_MODULE("Enabling power save on open/close.\n");
1916 if (leds[0] >= 0) 1395 if (leds[0] >= 0)
1917 led_on = leds[0]; 1396 led_on = leds[0];
1918 if (leds[1] >= 0) 1397 if (leds[1] >= 0)
diff --git a/drivers/media/video/pwc/pwc-ioctl.h b/drivers/media/video/pwc/pwc-ioctl.h
deleted file mode 100644
index 8c0cae7b3daf..000000000000
--- a/drivers/media/video/pwc/pwc-ioctl.h
+++ /dev/null
@@ -1,323 +0,0 @@
1#ifndef PWC_IOCTL_H
2#define PWC_IOCTL_H
3
4/* (C) 2001-2004 Nemosoft Unv.
5 (C) 2004-2006 Luc Saillard (luc@saillard.org)
6
7 NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
8 driver and thus may have bugs that are not present in the original version.
9 Please send bug reports and support requests to <luc@saillard.org>.
10 The decompression routines have been implemented by reverse-engineering the
11 Nemosoft binary pwcx module. Caveat emptor.
12
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26*/
27
28/* This is pwc-ioctl.h belonging to PWC 10.0.10
29 It contains structures and defines to communicate from user space
30 directly to the driver.
31 */
32
33/*
34 Changes
35 2001/08/03 Alvarado Added ioctl constants to access methods for
36 changing white balance and red/blue gains
37 2002/12/15 G. H. Fernandez-Toribio VIDIOCGREALSIZE
38 2003/12/13 Nemosft Unv. Some modifications to make interfacing to
39 PWCX easier
40 */
41
42/* These are private ioctl() commands, specific for the Philips webcams.
43 They contain functions not found in other webcams, and settings not
44 specified in the Video4Linux API.
45
46 The #define names are built up like follows:
47 VIDIOC VIDeo IOCtl prefix
48 PWC Philps WebCam
49 G optional: Get
50 S optional: Set
51 ... the function
52 */
53
54#include <linux/types.h>
55#include <linux/version.h>
56
57 /* Enumeration of image sizes */
58#define PSZ_SQCIF 0x00
59#define PSZ_QSIF 0x01
60#define PSZ_QCIF 0x02
61#define PSZ_SIF 0x03
62#define PSZ_CIF 0x04
63#define PSZ_VGA 0x05
64#define PSZ_MAX 6
65
66
67/* The frame rate is encoded in the video_window.flags parameter using
68 the upper 16 bits, since some flags are defined nowadays. The following
69 defines provide a mask and shift to filter out this value.
70 This value can also be passing using the private flag when using v4l2 and
71 VIDIOC_S_FMT ioctl.
72
73 In 'Snapshot' mode the camera freezes its automatic exposure and colour
74 balance controls.
75 */
76#define PWC_FPS_SHIFT 16
77#define PWC_FPS_MASK 0x00FF0000
78#define PWC_FPS_FRMASK 0x003F0000
79#define PWC_FPS_SNAPSHOT 0x00400000
80#define PWC_QLT_MASK 0x03000000
81#define PWC_QLT_SHIFT 24
82
83
84/* structure for transferring x & y coordinates */
85struct pwc_coord
86{
87 int x, y; /* guess what */
88 int size; /* size, or offset */
89};
90
91
92/* Used with VIDIOCPWCPROBE */
93struct pwc_probe
94{
95 char name[32];
96 int type;
97};
98
99struct pwc_serial
100{
101 char serial[30]; /* String with serial number. Contains terminating 0 */
102};
103
104/* pwc_whitebalance.mode values */
105#define PWC_WB_INDOOR 0
106#define PWC_WB_OUTDOOR 1
107#define PWC_WB_FL 2
108#define PWC_WB_MANUAL 3
109#define PWC_WB_AUTO 4
110
111/* Used with VIDIOCPWC[SG]AWB (Auto White Balance).
112 Set mode to one of the PWC_WB_* values above.
113 *red and *blue are the respective gains of these colour components inside
114 the camera; range 0..65535
115 When 'mode' == PWC_WB_MANUAL, 'manual_red' and 'manual_blue' are set or read;
116 otherwise undefined.
117 'read_red' and 'read_blue' are read-only.
118*/
119struct pwc_whitebalance
120{
121 int mode;
122 int manual_red, manual_blue; /* R/W */
123 int read_red, read_blue; /* R/O */
124};
125
126/*
127 'control_speed' and 'control_delay' are used in automatic whitebalance mode,
128 and tell the camera how fast it should react to changes in lighting, and
129 with how much delay. Valid values are 0..65535.
130*/
131struct pwc_wb_speed
132{
133 int control_speed;
134 int control_delay;
135
136};
137
138/* Used with VIDIOCPWC[SG]LED */
139struct pwc_leds
140{
141 int led_on; /* Led on-time; range = 0..25000 */
142 int led_off; /* Led off-time; range = 0..25000 */
143};
144
145/* Image size (used with GREALSIZE) */
146struct pwc_imagesize
147{
148 int width;
149 int height;
150};
151
152/* Defines and structures for Motorized Pan & Tilt */
153#define PWC_MPT_PAN 0x01
154#define PWC_MPT_TILT 0x02
155#define PWC_MPT_TIMEOUT 0x04 /* for status */
156
157/* Set angles; when absolute != 0, the angle is absolute and the
158 driver calculates the relative offset for you. This can only
159 be used with VIDIOCPWCSANGLE; VIDIOCPWCGANGLE always returns
160 absolute angles.
161 */
162struct pwc_mpt_angles
163{
164 int absolute; /* write-only */
165 int pan; /* degrees * 100 */
166 int tilt; /* degress * 100 */
167};
168
169/* Range of angles of the camera, both horizontally and vertically.
170 */
171struct pwc_mpt_range
172{
173 int pan_min, pan_max; /* degrees * 100 */
174 int tilt_min, tilt_max;
175};
176
177struct pwc_mpt_status
178{
179 int status;
180 int time_pan;
181 int time_tilt;
182};
183
184
185/* This is used for out-of-kernel decompression. With it, you can get
186 all the necessary information to initialize and use the decompressor
187 routines in standalone applications.
188 */
189struct pwc_video_command
190{
191 int type; /* camera type (645, 675, 730, etc.) */
192 int release; /* release number */
193
194 int size; /* one of PSZ_* */
195 int alternate;
196 int command_len; /* length of USB video command */
197 unsigned char command_buf[13]; /* Actual USB video command */
198 int bandlength; /* >0 = compressed */
199 int frame_size; /* Size of one (un)compressed frame */
200};
201
202/* Flags for PWCX subroutines. Not all modules honour all flags. */
203#define PWCX_FLAG_PLANAR 0x0001
204#define PWCX_FLAG_BAYER 0x0008
205
206
207/* IOCTL definitions */
208
209 /* Restore user settings */
210#define VIDIOCPWCRUSER _IO('v', 192)
211 /* Save user settings */
212#define VIDIOCPWCSUSER _IO('v', 193)
213 /* Restore factory settings */
214#define VIDIOCPWCFACTORY _IO('v', 194)
215
216 /* You can manipulate the compression factor. A compression preference of 0
217 means use uncompressed modes when available; 1 is low compression, 2 is
218 medium and 3 is high compression preferred. Of course, the higher the
219 compression, the lower the bandwidth used but more chance of artefacts
220 in the image. The driver automatically chooses a higher compression when
221 the preferred mode is not available.
222 */
223 /* Set preferred compression quality (0 = uncompressed, 3 = highest compression) */
224#define VIDIOCPWCSCQUAL _IOW('v', 195, int)
225 /* Get preferred compression quality */
226#define VIDIOCPWCGCQUAL _IOR('v', 195, int)
227
228
229/* Retrieve serial number of camera */
230#define VIDIOCPWCGSERIAL _IOR('v', 198, struct pwc_serial)
231
232 /* This is a probe function; since so many devices are supported, it
233 becomes difficult to include all the names in programs that want to
234 check for the enhanced Philips stuff. So in stead, try this PROBE;
235 it returns a structure with the original name, and the corresponding
236 Philips type.
237 To use, fill the structure with zeroes, call PROBE and if that succeeds,
238 compare the name with that returned from VIDIOCGCAP; they should be the
239 same. If so, you can be assured it is a Philips (OEM) cam and the type
240 is valid.
241 */
242#define VIDIOCPWCPROBE _IOR('v', 199, struct pwc_probe)
243
244 /* Set AGC (Automatic Gain Control); int < 0 = auto, 0..65535 = fixed */
245#define VIDIOCPWCSAGC _IOW('v', 200, int)
246 /* Get AGC; int < 0 = auto; >= 0 = fixed, range 0..65535 */
247#define VIDIOCPWCGAGC _IOR('v', 200, int)
248 /* Set shutter speed; int < 0 = auto; >= 0 = fixed, range 0..65535 */
249#define VIDIOCPWCSSHUTTER _IOW('v', 201, int)
250
251 /* Color compensation (Auto White Balance) */
252#define VIDIOCPWCSAWB _IOW('v', 202, struct pwc_whitebalance)
253#define VIDIOCPWCGAWB _IOR('v', 202, struct pwc_whitebalance)
254
255 /* Auto WB speed */
256#define VIDIOCPWCSAWBSPEED _IOW('v', 203, struct pwc_wb_speed)
257#define VIDIOCPWCGAWBSPEED _IOR('v', 203, struct pwc_wb_speed)
258
259 /* LEDs on/off/blink; int range 0..65535 */
260#define VIDIOCPWCSLED _IOW('v', 205, struct pwc_leds)
261#define VIDIOCPWCGLED _IOR('v', 205, struct pwc_leds)
262
263 /* Contour (sharpness); int < 0 = auto, 0..65536 = fixed */
264#define VIDIOCPWCSCONTOUR _IOW('v', 206, int)
265#define VIDIOCPWCGCONTOUR _IOR('v', 206, int)
266
267 /* Backlight compensation; 0 = off, otherwise on */
268#define VIDIOCPWCSBACKLIGHT _IOW('v', 207, int)
269#define VIDIOCPWCGBACKLIGHT _IOR('v', 207, int)
270
271 /* Flickerless mode; = 0 off, otherwise on */
272#define VIDIOCPWCSFLICKER _IOW('v', 208, int)
273#define VIDIOCPWCGFLICKER _IOR('v', 208, int)
274
275 /* Dynamic noise reduction; 0 off, 3 = high noise reduction */
276#define VIDIOCPWCSDYNNOISE _IOW('v', 209, int)
277#define VIDIOCPWCGDYNNOISE _IOR('v', 209, int)
278
279 /* Real image size as used by the camera; tells you whether or not there's a gray border around the image */
280#define VIDIOCPWCGREALSIZE _IOR('v', 210, struct pwc_imagesize)
281
282 /* Motorized pan & tilt functions */
283#define VIDIOCPWCMPTRESET _IOW('v', 211, int)
284#define VIDIOCPWCMPTGRANGE _IOR('v', 211, struct pwc_mpt_range)
285#define VIDIOCPWCMPTSANGLE _IOW('v', 212, struct pwc_mpt_angles)
286#define VIDIOCPWCMPTGANGLE _IOR('v', 212, struct pwc_mpt_angles)
287#define VIDIOCPWCMPTSTATUS _IOR('v', 213, struct pwc_mpt_status)
288
289 /* Get the USB set-video command; needed for initializing libpwcx */
290#define VIDIOCPWCGVIDCMD _IOR('v', 215, struct pwc_video_command)
291struct pwc_table_init_buffer {
292 int len;
293 char *buffer;
294
295};
296#define VIDIOCPWCGVIDTABLE _IOR('v', 216, struct pwc_table_init_buffer)
297
298/*
299 * This is private command used when communicating with v4l2.
300 * In the future all private ioctl will be remove/replace to
301 * use interface offer by v4l2.
302 */
303
304#define V4L2_CID_PRIVATE_SAVE_USER (V4L2_CID_PRIVATE_BASE + 0)
305#define V4L2_CID_PRIVATE_RESTORE_USER (V4L2_CID_PRIVATE_BASE + 1)
306#define V4L2_CID_PRIVATE_RESTORE_FACTORY (V4L2_CID_PRIVATE_BASE + 2)
307#define V4L2_CID_PRIVATE_COLOUR_MODE (V4L2_CID_PRIVATE_BASE + 3)
308#define V4L2_CID_PRIVATE_AUTOCONTOUR (V4L2_CID_PRIVATE_BASE + 4)
309#define V4L2_CID_PRIVATE_CONTOUR (V4L2_CID_PRIVATE_BASE + 5)
310#define V4L2_CID_PRIVATE_BACKLIGHT (V4L2_CID_PRIVATE_BASE + 6)
311#define V4L2_CID_PRIVATE_FLICKERLESS (V4L2_CID_PRIVATE_BASE + 7)
312#define V4L2_CID_PRIVATE_NOISE_REDUCTION (V4L2_CID_PRIVATE_BASE + 8)
313
314struct pwc_raw_frame {
315 __le16 type; /* type of the webcam */
316 __le16 vbandlength; /* Size of 4lines compressed (used by the decompressor) */
317 __u8 cmd[4]; /* the four byte of the command (in case of nala,
318 only the first 3 bytes is filled) */
319 __u8 rawframe[0]; /* frame_size = H/4*vbandlength */
320} __attribute__ ((packed));
321
322
323#endif
diff --git a/drivers/media/video/pwc/pwc-kiara.c b/drivers/media/video/pwc/pwc-kiara.c
index f4ae83c0cf2b..e5f4fd817125 100644
--- a/drivers/media/video/pwc/pwc-kiara.c
+++ b/drivers/media/video/pwc/pwc-kiara.c
@@ -40,7 +40,6 @@
40 40
41 41
42#include "pwc-kiara.h" 42#include "pwc-kiara.h"
43#include "pwc-uncompress.h"
44 43
45const unsigned int Kiara_fps_vector[PWC_FPS_MAX_KIARA] = { 5, 10, 15, 20, 25, 30 }; 44const unsigned int Kiara_fps_vector[PWC_FPS_MAX_KIARA] = { 5, 10, 15, 20, 25, 30 };
46 45
diff --git a/drivers/media/video/pwc/pwc-misc.c b/drivers/media/video/pwc/pwc-misc.c
index 6af5bb538358..0b031336eab8 100644
--- a/drivers/media/video/pwc/pwc-misc.c
+++ b/drivers/media/video/pwc/pwc-misc.c
@@ -126,8 +126,4 @@ void pwc_construct(struct pwc_device *pdev)
126 pdev->pixfmt = V4L2_PIX_FMT_YUV420; /* default */ 126 pdev->pixfmt = V4L2_PIX_FMT_YUV420; /* default */
127 pdev->view_min.size = pdev->view_min.x * pdev->view_min.y; 127 pdev->view_min.size = pdev->view_min.x * pdev->view_min.y;
128 pdev->view_max.size = pdev->view_max.x * pdev->view_max.y; 128 pdev->view_max.size = pdev->view_max.x * pdev->view_max.y;
129 /* length of image, in YUV format; always allocate enough memory. */
130 pdev->len_per_image = PAGE_ALIGN((pdev->abs_max.x * pdev->abs_max.y * 3) / 2);
131} 129}
132
133
diff --git a/drivers/media/video/pwc/pwc-uncompress.c b/drivers/media/video/pwc/pwc-uncompress.c
index 3b73f295f032..51265092bd31 100644
--- a/drivers/media/video/pwc/pwc-uncompress.c
+++ b/drivers/media/video/pwc/pwc-uncompress.c
@@ -30,26 +30,17 @@
30#include <asm/types.h> 30#include <asm/types.h>
31 31
32#include "pwc.h" 32#include "pwc.h"
33#include "pwc-uncompress.h"
34#include "pwc-dec1.h" 33#include "pwc-dec1.h"
35#include "pwc-dec23.h" 34#include "pwc-dec23.h"
36 35
37int pwc_decompress(struct pwc_device *pdev) 36int pwc_decompress(struct pwc_device *pdev, struct pwc_frame_buf *fbuf)
38{ 37{
39 struct pwc_frame_buf *fbuf;
40 int n, line, col, stride; 38 int n, line, col, stride;
41 void *yuv, *image; 39 void *yuv, *image;
42 u16 *src; 40 u16 *src;
43 u16 *dsty, *dstu, *dstv; 41 u16 *dsty, *dstu, *dstv;
44 42
45 if (pdev == NULL) 43 image = vb2_plane_vaddr(&fbuf->vb, 0);
46 return -EFAULT;
47
48 fbuf = pdev->read_frame;
49 if (fbuf == NULL)
50 return -EFAULT;
51 image = pdev->image_data;
52 image += pdev->images[pdev->fill_image].offset;
53 44
54 yuv = fbuf->data + pdev->frame_header_size; /* Skip header */ 45 yuv = fbuf->data + pdev->frame_header_size; /* Skip header */
55 46
@@ -64,9 +55,13 @@ int pwc_decompress(struct pwc_device *pdev)
64 * determine this using the type of the webcam */ 55 * determine this using the type of the webcam */
65 memcpy(raw_frame->cmd, pdev->cmd_buf, 4); 56 memcpy(raw_frame->cmd, pdev->cmd_buf, 4);
66 memcpy(raw_frame+1, yuv, pdev->frame_size); 57 memcpy(raw_frame+1, yuv, pdev->frame_size);
58 vb2_set_plane_payload(&fbuf->vb, 0,
59 pdev->frame_size + sizeof(struct pwc_raw_frame));
67 return 0; 60 return 0;
68 } 61 }
69 62
63 vb2_set_plane_payload(&fbuf->vb, 0, pdev->view.size);
64
70 if (pdev->vbandlength == 0) { 65 if (pdev->vbandlength == 0) {
71 /* Uncompressed mode. 66 /* Uncompressed mode.
72 * We copy the data into the output buffer, using the viewport 67 * We copy the data into the output buffer, using the viewport
diff --git a/drivers/media/video/pwc/pwc-uncompress.h b/drivers/media/video/pwc/pwc-uncompress.h
deleted file mode 100644
index 43028e74e9e0..000000000000
--- a/drivers/media/video/pwc/pwc-uncompress.h
+++ /dev/null
@@ -1,40 +0,0 @@
1/* (C) 1999-2003 Nemosoft Unv.
2 (C) 2004-2006 Luc Saillard (luc@saillard.org)
3
4 NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
5 driver and thus may have bugs that are not present in the original version.
6 Please send bug reports and support requests to <luc@saillard.org>.
7 The decompression routines have been implemented by reverse-engineering the
8 Nemosoft binary pwcx module. Caveat emptor.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23*/
24
25/* This file is the bridge between the kernel module and the plugin; it
26 describes the structures and datatypes used in both modules. Any
27 significant change should be reflected by increasing the
28 pwc_decompressor_version major number.
29 */
30#ifndef PWC_UNCOMPRESS_H
31#define PWC_UNCOMPRESS_H
32
33
34#include <media/pwc-ioctl.h>
35
36/* from pwc-dec.h */
37#define PWCX_FLAG_PLANAR 0x0001
38/* */
39
40#endif
diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c
index f85c51249c7b..e9a0e94b9995 100644
--- a/drivers/media/video/pwc/pwc-v4l.c
+++ b/drivers/media/video/pwc/pwc-v4l.c
@@ -2,6 +2,7 @@
2 USB and Video4Linux interface part. 2 USB and Video4Linux interface part.
3 (C) 1999-2004 Nemosoft Unv. 3 (C) 1999-2004 Nemosoft Unv.
4 (C) 2004-2006 Luc Saillard (luc@saillard.org) 4 (C) 2004-2006 Luc Saillard (luc@saillard.org)
5 (C) 2011 Hans de Goede <hdegoede@redhat.com>
5 6
6 NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx 7 NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
7 driver and thus may have bugs that are not present in the original version. 8 driver and thus may have bugs that are not present in the original version.
@@ -31,184 +32,330 @@
31#include <linux/module.h> 32#include <linux/module.h>
32#include <linux/poll.h> 33#include <linux/poll.h>
33#include <linux/vmalloc.h> 34#include <linux/vmalloc.h>
35#include <linux/jiffies.h>
34#include <asm/io.h> 36#include <asm/io.h>
35 37
36#include "pwc.h" 38#include "pwc.h"
37 39
38static struct v4l2_queryctrl pwc_controls[] = { 40#define PWC_CID_CUSTOM(ctrl) ((V4L2_CID_USER_BASE | 0xf000) + custom_ ## ctrl)
39 { 41
40 .id = V4L2_CID_BRIGHTNESS, 42static int pwc_g_volatile_ctrl(struct v4l2_ctrl *ctrl);
41 .type = V4L2_CTRL_TYPE_INTEGER, 43static int pwc_s_ctrl(struct v4l2_ctrl *ctrl);
42 .name = "Brightness", 44
43 .minimum = 0, 45static const struct v4l2_ctrl_ops pwc_ctrl_ops = {
44 .maximum = 128, 46 .g_volatile_ctrl = pwc_g_volatile_ctrl,
45 .step = 1, 47 .s_ctrl = pwc_s_ctrl,
46 .default_value = 64, 48};
47 }, 49
48 { 50enum { awb_indoor, awb_outdoor, awb_fl, awb_manual, awb_auto };
49 .id = V4L2_CID_CONTRAST, 51enum { custom_autocontour, custom_contour, custom_noise_reduction,
50 .type = V4L2_CTRL_TYPE_INTEGER, 52 custom_save_user, custom_restore_user, custom_restore_factory };
51 .name = "Contrast", 53
52 .minimum = 0, 54const char * const pwc_auto_whitebal_qmenu[] = {
53 .maximum = 64, 55 "Indoor (Incandescant Lighting) Mode",
54 .step = 1, 56 "Outdoor (Sunlight) Mode",
55 .default_value = 0, 57 "Indoor (Fluorescent Lighting) Mode",
56 }, 58 "Manual Mode",
57 { 59 "Auto Mode",
58 .id = V4L2_CID_SATURATION, 60 NULL
59 .type = V4L2_CTRL_TYPE_INTEGER, 61};
60 .name = "Saturation", 62
61 .minimum = -100, 63static const struct v4l2_ctrl_config pwc_auto_white_balance_cfg = {
62 .maximum = 100, 64 .ops = &pwc_ctrl_ops,
63 .step = 1, 65 .id = V4L2_CID_AUTO_WHITE_BALANCE,
64 .default_value = 0, 66 .type = V4L2_CTRL_TYPE_MENU,
65 }, 67 .max = awb_auto,
66 { 68 .qmenu = pwc_auto_whitebal_qmenu,
67 .id = V4L2_CID_GAMMA, 69};
68 .type = V4L2_CTRL_TYPE_INTEGER, 70
69 .name = "Gamma", 71static const struct v4l2_ctrl_config pwc_autocontour_cfg = {
70 .minimum = 0, 72 .ops = &pwc_ctrl_ops,
71 .maximum = 32, 73 .id = PWC_CID_CUSTOM(autocontour),
72 .step = 1, 74 .type = V4L2_CTRL_TYPE_BOOLEAN,
73 .default_value = 0, 75 .name = "Auto contour",
74 }, 76 .min = 0,
75 { 77 .max = 1,
76 .id = V4L2_CID_RED_BALANCE, 78 .step = 1,
77 .type = V4L2_CTRL_TYPE_INTEGER, 79};
78 .name = "Red Gain", 80
79 .minimum = 0, 81static const struct v4l2_ctrl_config pwc_contour_cfg = {
80 .maximum = 256, 82 .ops = &pwc_ctrl_ops,
81 .step = 1, 83 .id = PWC_CID_CUSTOM(contour),
82 .default_value = 0, 84 .type = V4L2_CTRL_TYPE_INTEGER,
83 }, 85 .name = "Contour",
84 { 86 .min = 0,
85 .id = V4L2_CID_BLUE_BALANCE, 87 .max = 63,
86 .type = V4L2_CTRL_TYPE_INTEGER, 88 .step = 1,
87 .name = "Blue Gain", 89};
88 .minimum = 0, 90
89 .maximum = 256, 91static const struct v4l2_ctrl_config pwc_backlight_cfg = {
90 .step = 1, 92 .ops = &pwc_ctrl_ops,
91 .default_value = 0, 93 .id = V4L2_CID_BACKLIGHT_COMPENSATION,
92 }, 94 .type = V4L2_CTRL_TYPE_BOOLEAN,
93 { 95 .min = 0,
94 .id = V4L2_CID_AUTO_WHITE_BALANCE, 96 .max = 1,
95 .type = V4L2_CTRL_TYPE_BOOLEAN, 97 .step = 1,
96 .name = "Auto White Balance", 98};
97 .minimum = 0, 99
98 .maximum = 1, 100static const struct v4l2_ctrl_config pwc_flicker_cfg = {
99 .step = 1, 101 .ops = &pwc_ctrl_ops,
100 .default_value = 0, 102 .id = V4L2_CID_BAND_STOP_FILTER,
101 }, 103 .type = V4L2_CTRL_TYPE_BOOLEAN,
102 { 104 .min = 0,
103 .id = V4L2_CID_EXPOSURE, 105 .max = 1,
104 .type = V4L2_CTRL_TYPE_INTEGER, 106 .step = 1,
105 .name = "Shutter Speed (Exposure)", 107};
106 .minimum = 0, 108
107 .maximum = 256, 109static const struct v4l2_ctrl_config pwc_noise_reduction_cfg = {
108 .step = 1, 110 .ops = &pwc_ctrl_ops,
109 .default_value = 200, 111 .id = PWC_CID_CUSTOM(noise_reduction),
110 }, 112 .type = V4L2_CTRL_TYPE_INTEGER,
111 { 113 .name = "Dynamic Noise Reduction",
112 .id = V4L2_CID_AUTOGAIN, 114 .min = 0,
113 .type = V4L2_CTRL_TYPE_BOOLEAN, 115 .max = 3,
114 .name = "Auto Gain Enabled", 116 .step = 1,
115 .minimum = 0, 117};
116 .maximum = 1, 118
117 .step = 1, 119static const struct v4l2_ctrl_config pwc_save_user_cfg = {
118 .default_value = 1, 120 .ops = &pwc_ctrl_ops,
119 }, 121 .id = PWC_CID_CUSTOM(save_user),
120 { 122 .type = V4L2_CTRL_TYPE_BUTTON,
121 .id = V4L2_CID_GAIN, 123 .name = "Save User Settings",
122 .type = V4L2_CTRL_TYPE_INTEGER,
123 .name = "Gain Level",
124 .minimum = 0,
125 .maximum = 256,
126 .step = 1,
127 .default_value = 0,
128 },
129 {
130 .id = V4L2_CID_PRIVATE_SAVE_USER,
131 .type = V4L2_CTRL_TYPE_BUTTON,
132 .name = "Save User Settings",
133 .minimum = 0,
134 .maximum = 0,
135 .step = 0,
136 .default_value = 0,
137 },
138 {
139 .id = V4L2_CID_PRIVATE_RESTORE_USER,
140 .type = V4L2_CTRL_TYPE_BUTTON,
141 .name = "Restore User Settings",
142 .minimum = 0,
143 .maximum = 0,
144 .step = 0,
145 .default_value = 0,
146 },
147 {
148 .id = V4L2_CID_PRIVATE_RESTORE_FACTORY,
149 .type = V4L2_CTRL_TYPE_BUTTON,
150 .name = "Restore Factory Settings",
151 .minimum = 0,
152 .maximum = 0,
153 .step = 0,
154 .default_value = 0,
155 },
156 {
157 .id = V4L2_CID_PRIVATE_COLOUR_MODE,
158 .type = V4L2_CTRL_TYPE_BOOLEAN,
159 .name = "Colour mode",
160 .minimum = 0,
161 .maximum = 1,
162 .step = 1,
163 .default_value = 0,
164 },
165 {
166 .id = V4L2_CID_PRIVATE_AUTOCONTOUR,
167 .type = V4L2_CTRL_TYPE_BOOLEAN,
168 .name = "Auto contour",
169 .minimum = 0,
170 .maximum = 1,
171 .step = 1,
172 .default_value = 0,
173 },
174 {
175 .id = V4L2_CID_PRIVATE_CONTOUR,
176 .type = V4L2_CTRL_TYPE_INTEGER,
177 .name = "Contour",
178 .minimum = 0,
179 .maximum = 63,
180 .step = 1,
181 .default_value = 0,
182 },
183 {
184 .id = V4L2_CID_PRIVATE_BACKLIGHT,
185 .type = V4L2_CTRL_TYPE_BOOLEAN,
186 .name = "Backlight compensation",
187 .minimum = 0,
188 .maximum = 1,
189 .step = 1,
190 .default_value = 0,
191 },
192 {
193 .id = V4L2_CID_PRIVATE_FLICKERLESS,
194 .type = V4L2_CTRL_TYPE_BOOLEAN,
195 .name = "Flickerless",
196 .minimum = 0,
197 .maximum = 1,
198 .step = 1,
199 .default_value = 0,
200 },
201 {
202 .id = V4L2_CID_PRIVATE_NOISE_REDUCTION,
203 .type = V4L2_CTRL_TYPE_INTEGER,
204 .name = "Noise reduction",
205 .minimum = 0,
206 .maximum = 3,
207 .step = 1,
208 .default_value = 0,
209 },
210}; 124};
211 125
126static const struct v4l2_ctrl_config pwc_restore_user_cfg = {
127 .ops = &pwc_ctrl_ops,
128 .id = PWC_CID_CUSTOM(restore_user),
129 .type = V4L2_CTRL_TYPE_BUTTON,
130 .name = "Restore User Settings",
131};
132
133static const struct v4l2_ctrl_config pwc_restore_factory_cfg = {
134 .ops = &pwc_ctrl_ops,
135 .id = PWC_CID_CUSTOM(restore_factory),
136 .type = V4L2_CTRL_TYPE_BUTTON,
137 .name = "Restore Factory Settings",
138};
139
140int pwc_init_controls(struct pwc_device *pdev)
141{
142 struct v4l2_ctrl_handler *hdl;
143 struct v4l2_ctrl_config cfg;
144 int r, def;
145
146 hdl = &pdev->ctrl_handler;
147 r = v4l2_ctrl_handler_init(hdl, 20);
148 if (r)
149 return r;
150
151 /* Brightness, contrast, saturation, gamma */
152 r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, BRIGHTNESS_FORMATTER, &def);
153 if (r || def > 127)
154 def = 63;
155 pdev->brightness = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
156 V4L2_CID_BRIGHTNESS, 0, 127, 1, def);
157
158 r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, CONTRAST_FORMATTER, &def);
159 if (r || def > 63)
160 def = 31;
161 pdev->contrast = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
162 V4L2_CID_CONTRAST, 0, 63, 1, def);
163
164 if (pdev->type >= 675) {
165 if (pdev->type < 730)
166 pdev->saturation_fmt = SATURATION_MODE_FORMATTER2;
167 else
168 pdev->saturation_fmt = SATURATION_MODE_FORMATTER1;
169 r = pwc_get_s8_ctrl(pdev, GET_CHROM_CTL, pdev->saturation_fmt,
170 &def);
171 if (r || def < -100 || def > 100)
172 def = 0;
173 pdev->saturation = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
174 V4L2_CID_SATURATION, -100, 100, 1, def);
175 }
176
177 r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, GAMMA_FORMATTER, &def);
178 if (r || def > 31)
179 def = 15;
180 pdev->gamma = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
181 V4L2_CID_GAMMA, 0, 31, 1, def);
182
183 /* auto white balance, red gain, blue gain */
184 r = pwc_get_u8_ctrl(pdev, GET_CHROM_CTL, WB_MODE_FORMATTER, &def);
185 if (r || def > awb_auto)
186 def = awb_auto;
187 cfg = pwc_auto_white_balance_cfg;
188 cfg.name = v4l2_ctrl_get_name(cfg.id);
189 cfg.def = def;
190 pdev->auto_white_balance = v4l2_ctrl_new_custom(hdl, &cfg, NULL);
191 /* check auto controls to avoid NULL deref in v4l2_ctrl_auto_cluster */
192 if (!pdev->auto_white_balance)
193 return hdl->error;
194
195 r = pwc_get_u8_ctrl(pdev, GET_CHROM_CTL,
196 PRESET_MANUAL_RED_GAIN_FORMATTER, &def);
197 if (r)
198 def = 127;
199 pdev->red_balance = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
200 V4L2_CID_RED_BALANCE, 0, 255, 1, def);
201
202 r = pwc_get_u8_ctrl(pdev, GET_CHROM_CTL,
203 PRESET_MANUAL_BLUE_GAIN_FORMATTER, &def);
204 if (r)
205 def = 127;
206 pdev->blue_balance = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
207 V4L2_CID_BLUE_BALANCE, 0, 255, 1, def);
208
209 v4l2_ctrl_auto_cluster(3, &pdev->auto_white_balance, awb_manual,
210 pdev->auto_white_balance->cur.val == awb_auto);
211
212 /* autogain, gain */
213 r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, AGC_MODE_FORMATTER, &def);
214 if (r || (def != 0 && def != 0xff))
215 def = 0;
216 /* Note a register value if 0 means auto gain is on */
217 pdev->autogain = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
218 V4L2_CID_AUTOGAIN, 0, 1, 1, def == 0);
219 if (!pdev->autogain)
220 return hdl->error;
221
222 r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, PRESET_AGC_FORMATTER, &def);
223 if (r || def > 63)
224 def = 31;
225 pdev->gain = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
226 V4L2_CID_GAIN, 0, 63, 1, def);
227
228 /* auto exposure, exposure */
229 if (DEVICE_USE_CODEC2(pdev->type)) {
230 r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, SHUTTER_MODE_FORMATTER,
231 &def);
232 if (r || (def != 0 && def != 0xff))
233 def = 0;
234 /*
235 * def = 0 auto, def = ff manual
236 * menu idx 0 = auto, idx 1 = manual
237 */
238 pdev->exposure_auto = v4l2_ctrl_new_std_menu(hdl,
239 &pwc_ctrl_ops,
240 V4L2_CID_EXPOSURE_AUTO,
241 1, 0, def != 0);
242 if (!pdev->exposure_auto)
243 return hdl->error;
244
245 /* GET_LUM_CTL, PRESET_SHUTTER_FORMATTER is unreliable */
246 r = pwc_get_u16_ctrl(pdev, GET_STATUS_CTL,
247 READ_SHUTTER_FORMATTER, &def);
248 if (r || def > 655)
249 def = 655;
250 pdev->exposure = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
251 V4L2_CID_EXPOSURE, 0, 655, 1, def);
252 /* CODEC2: separate auto gain & auto exposure */
253 v4l2_ctrl_auto_cluster(2, &pdev->autogain, 0, true);
254 v4l2_ctrl_auto_cluster(2, &pdev->exposure_auto,
255 V4L2_EXPOSURE_MANUAL, true);
256 } else if (DEVICE_USE_CODEC3(pdev->type)) {
257 /* GET_LUM_CTL, PRESET_SHUTTER_FORMATTER is unreliable */
258 r = pwc_get_u16_ctrl(pdev, GET_STATUS_CTL,
259 READ_SHUTTER_FORMATTER, &def);
260 if (r || def > 255)
261 def = 255;
262 pdev->exposure = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
263 V4L2_CID_EXPOSURE, 0, 255, 1, def);
264 /* CODEC3: both gain and exposure controlled by autogain */
265 pdev->autogain_expo_cluster[0] = pdev->autogain;
266 pdev->autogain_expo_cluster[1] = pdev->gain;
267 pdev->autogain_expo_cluster[2] = pdev->exposure;
268 v4l2_ctrl_auto_cluster(3, pdev->autogain_expo_cluster,
269 0, true);
270 }
271
272 /* color / bw setting */
273 r = pwc_get_u8_ctrl(pdev, GET_CHROM_CTL, COLOUR_MODE_FORMATTER,
274 &def);
275 if (r || (def != 0 && def != 0xff))
276 def = 0xff;
277 /* def = 0 bw, def = ff color, menu idx 0 = color, idx 1 = bw */
278 pdev->colorfx = v4l2_ctrl_new_std_menu(hdl, &pwc_ctrl_ops,
279 V4L2_CID_COLORFX, 1, 0, def == 0);
280
281 /* autocontour, contour */
282 r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, AUTO_CONTOUR_FORMATTER, &def);
283 if (r || (def != 0 && def != 0xff))
284 def = 0;
285 cfg = pwc_autocontour_cfg;
286 cfg.def = def == 0;
287 pdev->autocontour = v4l2_ctrl_new_custom(hdl, &cfg, NULL);
288 if (!pdev->autocontour)
289 return hdl->error;
290
291 r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, PRESET_CONTOUR_FORMATTER, &def);
292 if (r || def > 63)
293 def = 31;
294 cfg = pwc_contour_cfg;
295 cfg.def = def;
296 pdev->contour = v4l2_ctrl_new_custom(hdl, &cfg, NULL);
297
298 v4l2_ctrl_auto_cluster(2, &pdev->autocontour, 0, false);
299
300 /* backlight */
301 r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL,
302 BACK_LIGHT_COMPENSATION_FORMATTER, &def);
303 if (r || (def != 0 && def != 0xff))
304 def = 0;
305 cfg = pwc_backlight_cfg;
306 cfg.name = v4l2_ctrl_get_name(cfg.id);
307 cfg.def = def == 0;
308 pdev->backlight = v4l2_ctrl_new_custom(hdl, &cfg, NULL);
309
310 /* flikker rediction */
311 r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL,
312 FLICKERLESS_MODE_FORMATTER, &def);
313 if (r || (def != 0 && def != 0xff))
314 def = 0;
315 cfg = pwc_flicker_cfg;
316 cfg.name = v4l2_ctrl_get_name(cfg.id);
317 cfg.def = def == 0;
318 pdev->flicker = v4l2_ctrl_new_custom(hdl, &cfg, NULL);
319
320 /* Dynamic noise reduction */
321 r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL,
322 DYNAMIC_NOISE_CONTROL_FORMATTER, &def);
323 if (r || def > 3)
324 def = 2;
325 cfg = pwc_noise_reduction_cfg;
326 cfg.def = def;
327 pdev->noise_reduction = v4l2_ctrl_new_custom(hdl, &cfg, NULL);
328
329 /* Save / Restore User / Factory Settings */
330 pdev->save_user = v4l2_ctrl_new_custom(hdl, &pwc_save_user_cfg, NULL);
331 pdev->restore_user = v4l2_ctrl_new_custom(hdl, &pwc_restore_user_cfg,
332 NULL);
333 if (pdev->restore_user)
334 pdev->restore_user->flags = V4L2_CTRL_FLAG_UPDATE;
335 pdev->restore_factory = v4l2_ctrl_new_custom(hdl,
336 &pwc_restore_factory_cfg,
337 NULL);
338 if (pdev->restore_factory)
339 pdev->restore_factory->flags = V4L2_CTRL_FLAG_UPDATE;
340
341 if (!pdev->features & FEATURE_MOTOR_PANTILT)
342 return hdl->error;
343
344 /* Motor pan / tilt / reset */
345 pdev->motor_pan = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
346 V4L2_CID_PAN_RELATIVE, -4480, 4480, 64, 0);
347 if (!pdev->motor_pan)
348 return hdl->error;
349 pdev->motor_tilt = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
350 V4L2_CID_TILT_RELATIVE, -1920, 1920, 64, 0);
351 pdev->motor_pan_reset = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
352 V4L2_CID_PAN_RESET, 0, 0, 0, 0);
353 pdev->motor_tilt_reset = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
354 V4L2_CID_TILT_RESET, 0, 0, 0, 0);
355 v4l2_ctrl_cluster(4, &pdev->motor_pan);
356
357 return hdl->error;
358}
212 359
213static void pwc_vidioc_fill_fmt(const struct pwc_device *pdev, struct v4l2_format *f) 360static void pwc_vidioc_fill_fmt(const struct pwc_device *pdev, struct v4l2_format *f)
214{ 361{
@@ -284,10 +431,21 @@ static int pwc_vidioc_try_fmt(struct pwc_device *pdev, struct v4l2_format *f)
284} 431}
285 432
286/* ioctl(VIDIOC_SET_FMT) */ 433/* ioctl(VIDIOC_SET_FMT) */
287static int pwc_vidioc_set_fmt(struct pwc_device *pdev, struct v4l2_format *f) 434
435static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
288{ 436{
437 struct pwc_device *pdev = video_drvdata(file);
289 int ret, fps, snapshot, compression, pixelformat; 438 int ret, fps, snapshot, compression, pixelformat;
290 439
440 if (!pdev->udev)
441 return -ENODEV;
442
443 if (pdev->capt_file != NULL &&
444 pdev->capt_file != file)
445 return -EBUSY;
446
447 pdev->capt_file = file;
448
291 ret = pwc_vidioc_try_fmt(pdev, f); 449 ret = pwc_vidioc_try_fmt(pdev, f);
292 if (ret<0) 450 if (ret<0)
293 return ret; 451 return ret;
@@ -309,7 +467,7 @@ static int pwc_vidioc_set_fmt(struct pwc_device *pdev, struct v4l2_format *f)
309 pixelformat != V4L2_PIX_FMT_PWC2) 467 pixelformat != V4L2_PIX_FMT_PWC2)
310 return -EINVAL; 468 return -EINVAL;
311 469
312 if (pdev->iso_init) 470 if (vb2_is_streaming(&pdev->vb_queue))
313 return -EBUSY; 471 return -EBUSY;
314 472
315 PWC_DEBUG_IOCTL("Trying to set format to: width=%d height=%d fps=%d " 473 PWC_DEBUG_IOCTL("Trying to set format to: width=%d height=%d fps=%d "
@@ -343,13 +501,14 @@ static int pwc_vidioc_set_fmt(struct pwc_device *pdev, struct v4l2_format *f)
343 501
344static int pwc_querycap(struct file *file, void *fh, struct v4l2_capability *cap) 502static int pwc_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
345{ 503{
346 struct video_device *vdev = video_devdata(file);
347 struct pwc_device *pdev = video_drvdata(file); 504 struct pwc_device *pdev = video_drvdata(file);
348 505
506 if (!pdev->udev)
507 return -ENODEV;
508
349 strcpy(cap->driver, PWC_NAME); 509 strcpy(cap->driver, PWC_NAME);
350 strlcpy(cap->card, vdev->name, sizeof(cap->card)); 510 strlcpy(cap->card, pdev->vdev.name, sizeof(cap->card));
351 usb_make_path(pdev->udev, cap->bus_info, sizeof(cap->bus_info)); 511 usb_make_path(pdev->udev, cap->bus_info, sizeof(cap->bus_info));
352 cap->version = PWC_VERSION_CODE;
353 cap->capabilities = 512 cap->capabilities =
354 V4L2_CAP_VIDEO_CAPTURE | 513 V4L2_CAP_VIDEO_CAPTURE |
355 V4L2_CAP_STREAMING | 514 V4L2_CAP_STREAMING |
@@ -377,255 +536,396 @@ static int pwc_s_input(struct file *file, void *fh, unsigned int i)
377 return i ? -EINVAL : 0; 536 return i ? -EINVAL : 0;
378} 537}
379 538
380static int pwc_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *c) 539static int pwc_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
381{ 540{
382 int i, idx; 541 struct pwc_device *pdev =
383 u32 id; 542 container_of(ctrl->handler, struct pwc_device, ctrl_handler);
384 543 int ret = 0;
385 id = c->id; 544
386 if (id & V4L2_CTRL_FLAG_NEXT_CTRL) { 545 /*
387 id &= V4L2_CTRL_ID_MASK; 546 * Sometimes it can take quite long for the pwc to complete usb control
388 id++; 547 * transfers, so release the modlock to give streaming by another
389 idx = -1; 548 * process / thread the chance to continue with a dqbuf.
390 for (i = 0; i < ARRAY_SIZE(pwc_controls); i++) { 549 */
391 if (pwc_controls[i].id < id) 550 mutex_unlock(&pdev->modlock);
392 continue; 551
393 if (idx >= 0 552 /*
394 && pwc_controls[i].id > pwc_controls[idx].id) 553 * Take the udev-lock to protect against the disconnect handler
395 continue; 554 * completing and setting dev->udev to NULL underneath us. Other code
396 idx = i; 555 * does not need to do this since it is protected by the modlock.
556 */
557 mutex_lock(&pdev->udevlock);
558
559 if (!pdev->udev) {
560 ret = -ENODEV;
561 goto leave;
562 }
563
564 switch (ctrl->id) {
565 case V4L2_CID_AUTO_WHITE_BALANCE:
566 if (pdev->color_bal_valid && time_before(jiffies,
567 pdev->last_color_bal_update + HZ / 4)) {
568 pdev->red_balance->val = pdev->last_red_balance;
569 pdev->blue_balance->val = pdev->last_blue_balance;
570 break;
397 } 571 }
398 if (idx < 0) 572 ret = pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
399 return -EINVAL; 573 READ_RED_GAIN_FORMATTER,
400 memcpy(c, &pwc_controls[idx], sizeof pwc_controls[0]); 574 &pdev->red_balance->val);
401 return 0; 575 if (ret)
576 break;
577 ret = pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
578 READ_BLUE_GAIN_FORMATTER,
579 &pdev->blue_balance->val);
580 if (ret)
581 break;
582 pdev->last_red_balance = pdev->red_balance->val;
583 pdev->last_blue_balance = pdev->blue_balance->val;
584 pdev->last_color_bal_update = jiffies;
585 pdev->color_bal_valid = true;
586 break;
587 case V4L2_CID_AUTOGAIN:
588 if (pdev->gain_valid && time_before(jiffies,
589 pdev->last_gain_update + HZ / 4)) {
590 pdev->gain->val = pdev->last_gain;
591 break;
592 }
593 ret = pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
594 READ_AGC_FORMATTER, &pdev->gain->val);
595 if (ret)
596 break;
597 pdev->last_gain = pdev->gain->val;
598 pdev->last_gain_update = jiffies;
599 pdev->gain_valid = true;
600 if (!DEVICE_USE_CODEC3(pdev->type))
601 break;
602 /* Fall through for CODEC3 where autogain also controls expo */
603 case V4L2_CID_EXPOSURE_AUTO:
604 if (pdev->exposure_valid && time_before(jiffies,
605 pdev->last_exposure_update + HZ / 4)) {
606 pdev->exposure->val = pdev->last_exposure;
607 break;
608 }
609 ret = pwc_get_u16_ctrl(pdev, GET_STATUS_CTL,
610 READ_SHUTTER_FORMATTER,
611 &pdev->exposure->val);
612 if (ret)
613 break;
614 pdev->last_exposure = pdev->exposure->val;
615 pdev->last_exposure_update = jiffies;
616 pdev->exposure_valid = true;
617 break;
618 default:
619 ret = -EINVAL;
402 } 620 }
403 for (i = 0; i < sizeof(pwc_controls) / sizeof(struct v4l2_queryctrl); i++) { 621
404 if (pwc_controls[i].id == c->id) { 622 if (ret)
405 PWC_DEBUG_IOCTL("ioctl(VIDIOC_QUERYCTRL) found\n"); 623 PWC_ERROR("g_ctrl %s error %d\n", ctrl->name, ret);
406 memcpy(c, &pwc_controls[i], sizeof(struct v4l2_queryctrl)); 624
407 return 0; 625leave:
626 mutex_unlock(&pdev->udevlock);
627 mutex_lock(&pdev->modlock);
628 return ret;
629}
630
631static int pwc_set_awb(struct pwc_device *pdev)
632{
633 int ret = 0;
634
635 if (pdev->auto_white_balance->is_new) {
636 ret = pwc_set_u8_ctrl(pdev, SET_CHROM_CTL,
637 WB_MODE_FORMATTER,
638 pdev->auto_white_balance->val);
639 if (ret)
640 return ret;
641
642 /* Update val when coming from auto or going to a preset */
643 if (pdev->red_balance->is_volatile ||
644 pdev->auto_white_balance->val == awb_indoor ||
645 pdev->auto_white_balance->val == awb_outdoor ||
646 pdev->auto_white_balance->val == awb_fl) {
647 if (!pdev->red_balance->is_new)
648 pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
649 READ_RED_GAIN_FORMATTER,
650 &pdev->red_balance->val);
651 if (!pdev->blue_balance->is_new)
652 pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
653 READ_BLUE_GAIN_FORMATTER,
654 &pdev->blue_balance->val);
655 }
656 if (pdev->auto_white_balance->val == awb_auto) {
657 pdev->red_balance->is_volatile = true;
658 pdev->blue_balance->is_volatile = true;
659 pdev->color_bal_valid = false; /* Force cache update */
660 } else {
661 pdev->red_balance->is_volatile = false;
662 pdev->blue_balance->is_volatile = false;
408 } 663 }
409 } 664 }
410 return -EINVAL; 665
666 if (ret == 0 && pdev->red_balance->is_new) {
667 if (pdev->auto_white_balance->val != awb_manual)
668 return -EBUSY;
669 ret = pwc_set_u8_ctrl(pdev, SET_CHROM_CTL,
670 PRESET_MANUAL_RED_GAIN_FORMATTER,
671 pdev->red_balance->val);
672 }
673
674 if (ret == 0 && pdev->blue_balance->is_new) {
675 if (pdev->auto_white_balance->val != awb_manual)
676 return -EBUSY;
677 ret = pwc_set_u8_ctrl(pdev, SET_CHROM_CTL,
678 PRESET_MANUAL_BLUE_GAIN_FORMATTER,
679 pdev->blue_balance->val);
680 }
681 return ret;
411} 682}
412 683
413static int pwc_g_ctrl(struct file *file, void *fh, struct v4l2_control *c) 684/* For CODEC2 models which have separate autogain and auto exposure */
685static int pwc_set_autogain(struct pwc_device *pdev)
414{ 686{
415 struct pwc_device *pdev = video_drvdata(file); 687 int ret = 0;
416 int ret; 688
689 if (pdev->autogain->is_new) {
690 ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
691 AGC_MODE_FORMATTER,
692 pdev->autogain->val ? 0 : 0xff);
693 if (ret)
694 return ret;
695 if (pdev->autogain->val)
696 pdev->gain_valid = false; /* Force cache update */
697 else if (!pdev->gain->is_new)
698 pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
699 READ_AGC_FORMATTER,
700 &pdev->gain->val);
701 }
702 if (ret == 0 && pdev->gain->is_new) {
703 if (pdev->autogain->val)
704 return -EBUSY;
705 ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
706 PRESET_AGC_FORMATTER,
707 pdev->gain->val);
708 }
709 return ret;
710}
417 711
418 switch (c->id) { 712/* For CODEC2 models which have separate autogain and auto exposure */
419 case V4L2_CID_BRIGHTNESS: 713static int pwc_set_exposure_auto(struct pwc_device *pdev)
420 c->value = pwc_get_brightness(pdev); 714{
421 if (c->value < 0) 715 int ret = 0;
422 return -EINVAL; 716 int is_auto = pdev->exposure_auto->val == V4L2_EXPOSURE_AUTO;
423 return 0; 717
424 case V4L2_CID_CONTRAST: 718 if (pdev->exposure_auto->is_new) {
425 c->value = pwc_get_contrast(pdev); 719 ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
426 if (c->value < 0) 720 SHUTTER_MODE_FORMATTER,
427 return -EINVAL; 721 is_auto ? 0 : 0xff);
428 return 0; 722 if (ret)
429 case V4L2_CID_SATURATION: 723 return ret;
430 ret = pwc_get_saturation(pdev, &c->value); 724 if (is_auto)
431 if (ret < 0) 725 pdev->exposure_valid = false; /* Force cache update */
432 return -EINVAL; 726 else if (!pdev->exposure->is_new)
433 return 0; 727 pwc_get_u16_ctrl(pdev, GET_STATUS_CTL,
434 case V4L2_CID_GAMMA: 728 READ_SHUTTER_FORMATTER,
435 c->value = pwc_get_gamma(pdev); 729 &pdev->exposure->val);
436 if (c->value < 0) 730 }
437 return -EINVAL; 731 if (ret == 0 && pdev->exposure->is_new) {
438 return 0; 732 if (is_auto)
439 case V4L2_CID_RED_BALANCE: 733 return -EBUSY;
440 ret = pwc_get_red_gain(pdev, &c->value); 734 ret = pwc_set_u16_ctrl(pdev, SET_LUM_CTL,
441 if (ret < 0) 735 PRESET_SHUTTER_FORMATTER,
442 return -EINVAL; 736 pdev->exposure->val);
443 c->value >>= 8; 737 }
444 return 0; 738 return ret;
445 case V4L2_CID_BLUE_BALANCE: 739}
446 ret = pwc_get_blue_gain(pdev, &c->value);
447 if (ret < 0)
448 return -EINVAL;
449 c->value >>= 8;
450 return 0;
451 case V4L2_CID_AUTO_WHITE_BALANCE:
452 ret = pwc_get_awb(pdev);
453 if (ret < 0)
454 return -EINVAL;
455 c->value = (ret == PWC_WB_MANUAL) ? 0 : 1;
456 return 0;
457 case V4L2_CID_GAIN:
458 ret = pwc_get_agc(pdev, &c->value);
459 if (ret < 0)
460 return -EINVAL;
461 c->value >>= 8;
462 return 0;
463 case V4L2_CID_AUTOGAIN:
464 ret = pwc_get_agc(pdev, &c->value);
465 if (ret < 0)
466 return -EINVAL;
467 c->value = (c->value < 0) ? 1 : 0;
468 return 0;
469 case V4L2_CID_EXPOSURE:
470 ret = pwc_get_shutter_speed(pdev, &c->value);
471 if (ret < 0)
472 return -EINVAL;
473 return 0;
474 case V4L2_CID_PRIVATE_COLOUR_MODE:
475 ret = pwc_get_colour_mode(pdev, &c->value);
476 if (ret < 0)
477 return -EINVAL;
478 return 0;
479 case V4L2_CID_PRIVATE_AUTOCONTOUR:
480 ret = pwc_get_contour(pdev, &c->value);
481 if (ret < 0)
482 return -EINVAL;
483 c->value = (c->value == -1 ? 1 : 0);
484 return 0;
485 case V4L2_CID_PRIVATE_CONTOUR:
486 ret = pwc_get_contour(pdev, &c->value);
487 if (ret < 0)
488 return -EINVAL;
489 c->value >>= 10;
490 return 0;
491 case V4L2_CID_PRIVATE_BACKLIGHT:
492 ret = pwc_get_backlight(pdev, &c->value);
493 if (ret < 0)
494 return -EINVAL;
495 return 0;
496 case V4L2_CID_PRIVATE_FLICKERLESS:
497 ret = pwc_get_flicker(pdev, &c->value);
498 if (ret < 0)
499 return -EINVAL;
500 c->value = (c->value ? 1 : 0);
501 return 0;
502 case V4L2_CID_PRIVATE_NOISE_REDUCTION:
503 ret = pwc_get_dynamic_noise(pdev, &c->value);
504 if (ret < 0)
505 return -EINVAL;
506 return 0;
507 740
508 case V4L2_CID_PRIVATE_SAVE_USER: 741/* For CODEC3 models which have autogain controlling both gain and exposure */
509 case V4L2_CID_PRIVATE_RESTORE_USER: 742static int pwc_set_autogain_expo(struct pwc_device *pdev)
510 case V4L2_CID_PRIVATE_RESTORE_FACTORY: 743{
511 return -EINVAL; 744 int ret = 0;
745
746 if (pdev->autogain->is_new) {
747 ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
748 AGC_MODE_FORMATTER,
749 pdev->autogain->val ? 0 : 0xff);
750 if (ret)
751 return ret;
752 if (pdev->autogain->val) {
753 pdev->gain_valid = false; /* Force cache update */
754 pdev->exposure_valid = false; /* Force cache update */
755 } else {
756 if (!pdev->gain->is_new)
757 pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
758 READ_AGC_FORMATTER,
759 &pdev->gain->val);
760 if (!pdev->exposure->is_new)
761 pwc_get_u16_ctrl(pdev, GET_STATUS_CTL,
762 READ_SHUTTER_FORMATTER,
763 &pdev->exposure->val);
764 }
512 } 765 }
513 return -EINVAL; 766 if (ret == 0 && pdev->gain->is_new) {
767 if (pdev->autogain->val)
768 return -EBUSY;
769 ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
770 PRESET_AGC_FORMATTER,
771 pdev->gain->val);
772 }
773 if (ret == 0 && pdev->exposure->is_new) {
774 if (pdev->autogain->val)
775 return -EBUSY;
776 ret = pwc_set_u16_ctrl(pdev, SET_LUM_CTL,
777 PRESET_SHUTTER_FORMATTER,
778 pdev->exposure->val);
779 }
780 return ret;
514} 781}
515 782
516static int pwc_s_ctrl(struct file *file, void *fh, struct v4l2_control *c) 783static int pwc_set_motor(struct pwc_device *pdev)
517{ 784{
518 struct pwc_device *pdev = video_drvdata(file);
519 int ret; 785 int ret;
786 u8 buf[4];
787
788 buf[0] = 0;
789 if (pdev->motor_pan_reset->is_new)
790 buf[0] |= 0x01;
791 if (pdev->motor_tilt_reset->is_new)
792 buf[0] |= 0x02;
793 if (pdev->motor_pan_reset->is_new || pdev->motor_tilt_reset->is_new) {
794 ret = send_control_msg(pdev, SET_MPT_CTL,
795 PT_RESET_CONTROL_FORMATTER, buf, 1);
796 if (ret < 0)
797 return ret;
798 }
520 799
521 switch (c->id) { 800 memset(buf, 0, sizeof(buf));
522 case V4L2_CID_BRIGHTNESS: 801 if (pdev->motor_pan->is_new) {
523 c->value <<= 9; 802 buf[0] = pdev->motor_pan->val & 0xFF;
524 ret = pwc_set_brightness(pdev, c->value); 803 buf[1] = (pdev->motor_pan->val >> 8);
804 }
805 if (pdev->motor_tilt->is_new) {
806 buf[2] = pdev->motor_tilt->val & 0xFF;
807 buf[3] = (pdev->motor_tilt->val >> 8);
808 }
809 if (pdev->motor_pan->is_new || pdev->motor_tilt->is_new) {
810 ret = send_control_msg(pdev, SET_MPT_CTL,
811 PT_RELATIVE_CONTROL_FORMATTER,
812 buf, sizeof(buf));
525 if (ret < 0) 813 if (ret < 0)
526 return -EINVAL; 814 return ret;
527 return 0; 815 }
816
817 return 0;
818}
819
820static int pwc_s_ctrl(struct v4l2_ctrl *ctrl)
821{
822 struct pwc_device *pdev =
823 container_of(ctrl->handler, struct pwc_device, ctrl_handler);
824 int ret = 0;
825
826 /* See the comments on locking in pwc_g_volatile_ctrl */
827 mutex_unlock(&pdev->modlock);
828 mutex_lock(&pdev->udevlock);
829
830 if (!pdev->udev) {
831 ret = -ENODEV;
832 goto leave;
833 }
834
835 switch (ctrl->id) {
836 case V4L2_CID_BRIGHTNESS:
837 ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
838 BRIGHTNESS_FORMATTER, ctrl->val);
839 break;
528 case V4L2_CID_CONTRAST: 840 case V4L2_CID_CONTRAST:
529 c->value <<= 10; 841 ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
530 ret = pwc_set_contrast(pdev, c->value); 842 CONTRAST_FORMATTER, ctrl->val);
531 if (ret < 0) 843 break;
532 return -EINVAL;
533 return 0;
534 case V4L2_CID_SATURATION: 844 case V4L2_CID_SATURATION:
535 ret = pwc_set_saturation(pdev, c->value); 845 ret = pwc_set_s8_ctrl(pdev, SET_CHROM_CTL,
536 if (ret < 0) 846 pdev->saturation_fmt, ctrl->val);
537 return -EINVAL; 847 break;
538 return 0;
539 case V4L2_CID_GAMMA: 848 case V4L2_CID_GAMMA:
540 c->value <<= 11; 849 ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
541 ret = pwc_set_gamma(pdev, c->value); 850 GAMMA_FORMATTER, ctrl->val);
542 if (ret < 0) 851 break;
543 return -EINVAL;
544 return 0;
545 case V4L2_CID_RED_BALANCE:
546 c->value <<= 8;
547 ret = pwc_set_red_gain(pdev, c->value);
548 if (ret < 0)
549 return -EINVAL;
550 return 0;
551 case V4L2_CID_BLUE_BALANCE:
552 c->value <<= 8;
553 ret = pwc_set_blue_gain(pdev, c->value);
554 if (ret < 0)
555 return -EINVAL;
556 return 0;
557 case V4L2_CID_AUTO_WHITE_BALANCE: 852 case V4L2_CID_AUTO_WHITE_BALANCE:
558 c->value = (c->value == 0) ? PWC_WB_MANUAL : PWC_WB_AUTO; 853 ret = pwc_set_awb(pdev);
559 ret = pwc_set_awb(pdev, c->value); 854 break;
560 if (ret < 0)
561 return -EINVAL;
562 return 0;
563 case V4L2_CID_EXPOSURE:
564 c->value <<= 8;
565 ret = pwc_set_shutter_speed(pdev, c->value ? 0 : 1, c->value);
566 if (ret < 0)
567 return -EINVAL;
568 return 0;
569 case V4L2_CID_AUTOGAIN: 855 case V4L2_CID_AUTOGAIN:
570 /* autogain off means nothing without a gain */ 856 if (DEVICE_USE_CODEC2(pdev->type))
571 if (c->value == 0) 857 ret = pwc_set_autogain(pdev);
572 return 0; 858 else if (DEVICE_USE_CODEC3(pdev->type))
573 ret = pwc_set_agc(pdev, c->value, 0); 859 ret = pwc_set_autogain_expo(pdev);
574 if (ret < 0) 860 else
575 return -EINVAL; 861 ret = -EINVAL;
576 return 0; 862 break;
577 case V4L2_CID_GAIN: 863 case V4L2_CID_EXPOSURE_AUTO:
578 c->value <<= 8; 864 if (DEVICE_USE_CODEC2(pdev->type))
579 ret = pwc_set_agc(pdev, 0, c->value); 865 ret = pwc_set_exposure_auto(pdev);
580 if (ret < 0) 866 else
581 return -EINVAL; 867 ret = -EINVAL;
582 return 0; 868 break;
583 case V4L2_CID_PRIVATE_SAVE_USER: 869 case V4L2_CID_COLORFX:
584 if (pwc_save_user(pdev)) 870 ret = pwc_set_u8_ctrl(pdev, SET_CHROM_CTL,
585 return -EINVAL; 871 COLOUR_MODE_FORMATTER,
586 return 0; 872 ctrl->val ? 0 : 0xff);
587 case V4L2_CID_PRIVATE_RESTORE_USER: 873 break;
588 if (pwc_restore_user(pdev)) 874 case PWC_CID_CUSTOM(autocontour):
589 return -EINVAL; 875 if (pdev->autocontour->is_new) {
590 return 0; 876 ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
591 case V4L2_CID_PRIVATE_RESTORE_FACTORY: 877 AUTO_CONTOUR_FORMATTER,
592 if (pwc_restore_factory(pdev)) 878 pdev->autocontour->val ? 0 : 0xff);
593 return -EINVAL; 879 }
594 return 0; 880 if (ret == 0 && pdev->contour->is_new) {
595 case V4L2_CID_PRIVATE_COLOUR_MODE: 881 if (pdev->autocontour->val) {
596 ret = pwc_set_colour_mode(pdev, c->value); 882 ret = -EBUSY;
597 if (ret < 0) 883 break;
598 return -EINVAL; 884 }
599 return 0; 885 ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
600 case V4L2_CID_PRIVATE_AUTOCONTOUR: 886 PRESET_CONTOUR_FORMATTER,
601 c->value = (c->value == 1) ? -1 : 0; 887 pdev->contour->val);
602 ret = pwc_set_contour(pdev, c->value); 888 }
603 if (ret < 0) 889 break;
604 return -EINVAL; 890 case V4L2_CID_BACKLIGHT_COMPENSATION:
605 return 0; 891 ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
606 case V4L2_CID_PRIVATE_CONTOUR: 892 BACK_LIGHT_COMPENSATION_FORMATTER,
607 c->value <<= 10; 893 ctrl->val ? 0 : 0xff);
608 ret = pwc_set_contour(pdev, c->value); 894 break;
609 if (ret < 0) 895 case V4L2_CID_BAND_STOP_FILTER:
610 return -EINVAL; 896 ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
611 return 0; 897 FLICKERLESS_MODE_FORMATTER,
612 case V4L2_CID_PRIVATE_BACKLIGHT: 898 ctrl->val ? 0 : 0xff);
613 ret = pwc_set_backlight(pdev, c->value); 899 break;
614 if (ret < 0) 900 case PWC_CID_CUSTOM(noise_reduction):
615 return -EINVAL; 901 ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
616 return 0; 902 DYNAMIC_NOISE_CONTROL_FORMATTER,
617 case V4L2_CID_PRIVATE_FLICKERLESS: 903 ctrl->val);
618 ret = pwc_set_flicker(pdev, c->value); 904 break;
619 if (ret < 0) 905 case PWC_CID_CUSTOM(save_user):
620 return -EINVAL; 906 ret = pwc_button_ctrl(pdev, SAVE_USER_DEFAULTS_FORMATTER);
621 case V4L2_CID_PRIVATE_NOISE_REDUCTION: 907 break;
622 ret = pwc_set_dynamic_noise(pdev, c->value); 908 case PWC_CID_CUSTOM(restore_user):
623 if (ret < 0) 909 ret = pwc_button_ctrl(pdev, RESTORE_USER_DEFAULTS_FORMATTER);
624 return -EINVAL; 910 break;
625 return 0; 911 case PWC_CID_CUSTOM(restore_factory):
626 912 ret = pwc_button_ctrl(pdev,
913 RESTORE_FACTORY_DEFAULTS_FORMATTER);
914 break;
915 case V4L2_CID_PAN_RELATIVE:
916 ret = pwc_set_motor(pdev);
917 break;
918 default:
919 ret = -EINVAL;
627 } 920 }
628 return -EINVAL; 921
922 if (ret)
923 PWC_ERROR("s_ctrl %s error %d\n", ctrl->name, ret);
924
925leave:
926 mutex_unlock(&pdev->udevlock);
927 mutex_lock(&pdev->modlock);
928 return ret;
629} 929}
630 930
631static int pwc_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f) 931static int pwc_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f)
@@ -667,157 +967,77 @@ static int pwc_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *
667 return pwc_vidioc_try_fmt(pdev, f); 967 return pwc_vidioc_try_fmt(pdev, f);
668} 968}
669 969
670static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f) 970static int pwc_reqbufs(struct file *file, void *fh,
971 struct v4l2_requestbuffers *rb)
671{ 972{
672 struct pwc_device *pdev = video_drvdata(file); 973 struct pwc_device *pdev = video_drvdata(file);
673 974
674 return pwc_vidioc_set_fmt(pdev, f); 975 if (pdev->capt_file != NULL &&
675} 976 pdev->capt_file != file)
676 977 return -EBUSY;
677static int pwc_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
678{
679 int nbuffers;
680 978
681 PWC_DEBUG_IOCTL("ioctl(VIDIOC_REQBUFS) count=%d\n", rb->count); 979 pdev->capt_file = file;
682 if (rb->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
683 return -EINVAL;
684 if (rb->memory != V4L2_MEMORY_MMAP)
685 return -EINVAL;
686 980
687 nbuffers = rb->count; 981 return vb2_reqbufs(&pdev->vb_queue, rb);
688 if (nbuffers < 2)
689 nbuffers = 2;
690 else if (nbuffers > pwc_mbufs)
691 nbuffers = pwc_mbufs;
692 /* Force to use our # of buffers */
693 rb->count = pwc_mbufs;
694 return 0;
695} 982}
696 983
697static int pwc_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf) 984static int pwc_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
698{ 985{
699 struct pwc_device *pdev = video_drvdata(file); 986 struct pwc_device *pdev = video_drvdata(file);
700 int index;
701 987
702 PWC_DEBUG_IOCTL("ioctl(VIDIOC_QUERYBUF) index=%d\n", buf->index); 988 return vb2_querybuf(&pdev->vb_queue, buf);
703 if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
704 PWC_DEBUG_IOCTL("ioctl(VIDIOC_QUERYBUF) Bad type\n");
705 return -EINVAL;
706 }
707 index = buf->index;
708 if (index < 0 || index >= pwc_mbufs) {
709 PWC_DEBUG_IOCTL("ioctl(VIDIOC_QUERYBUF) Bad index %d\n", buf->index);
710 return -EINVAL;
711 }
712
713 buf->m.offset = index * pdev->len_per_image;
714 if (pdev->pixfmt != V4L2_PIX_FMT_YUV420)
715 buf->bytesused = pdev->frame_size + sizeof(struct pwc_raw_frame);
716 else
717 buf->bytesused = pdev->view.size;
718 buf->field = V4L2_FIELD_NONE;
719 buf->memory = V4L2_MEMORY_MMAP;
720 /*buf->flags = V4L2_BUF_FLAG_MAPPED;*/
721 buf->length = pdev->len_per_image;
722
723 PWC_DEBUG_READ("VIDIOC_QUERYBUF: index=%d\n", buf->index);
724 PWC_DEBUG_READ("VIDIOC_QUERYBUF: m.offset=%d\n", buf->m.offset);
725 PWC_DEBUG_READ("VIDIOC_QUERYBUF: bytesused=%d\n", buf->bytesused);
726
727 return 0;
728} 989}
729 990
730static int pwc_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf) 991static int pwc_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
731{ 992{
732 PWC_DEBUG_IOCTL("ioctl(VIDIOC_QBUF) index=%d\n", buf->index); 993 struct pwc_device *pdev = video_drvdata(file);
733 if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
734 return -EINVAL;
735 if (buf->memory != V4L2_MEMORY_MMAP)
736 return -EINVAL;
737 if (buf->index >= pwc_mbufs)
738 return -EINVAL;
739 994
740 buf->flags |= V4L2_BUF_FLAG_QUEUED; 995 if (!pdev->udev)
741 buf->flags &= ~V4L2_BUF_FLAG_DONE; 996 return -ENODEV;
742 997
743 return 0; 998 if (pdev->capt_file != file)
999 return -EBUSY;
1000
1001 return vb2_qbuf(&pdev->vb_queue, buf);
744} 1002}
745 1003
746static int pwc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf) 1004static int pwc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
747{ 1005{
748 DECLARE_WAITQUEUE(wait, current);
749 struct pwc_device *pdev = video_drvdata(file); 1006 struct pwc_device *pdev = video_drvdata(file);
750 int ret;
751 1007
752 PWC_DEBUG_IOCTL("ioctl(VIDIOC_DQBUF)\n"); 1008 if (!pdev->udev)
1009 return -ENODEV;
753 1010
754 if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 1011 if (pdev->capt_file != file)
755 return -EINVAL; 1012 return -EBUSY;
756
757 add_wait_queue(&pdev->frameq, &wait);
758 while (pdev->full_frames == NULL) {
759 if (pdev->error_status) {
760 remove_wait_queue(&pdev->frameq, &wait);
761 set_current_state(TASK_RUNNING);
762 return -pdev->error_status;
763 }
764
765 if (signal_pending(current)) {
766 remove_wait_queue(&pdev->frameq, &wait);
767 set_current_state(TASK_RUNNING);
768 return -ERESTARTSYS;
769 }
770 mutex_unlock(&pdev->modlock);
771 schedule();
772 set_current_state(TASK_INTERRUPTIBLE);
773 mutex_lock(&pdev->modlock);
774 }
775 remove_wait_queue(&pdev->frameq, &wait);
776 set_current_state(TASK_RUNNING);
777
778 PWC_DEBUG_IOCTL("VIDIOC_DQBUF: frame ready.\n");
779 /* Decompress data in pdev->images[pdev->fill_image] */
780 ret = pwc_handle_frame(pdev);
781 if (ret)
782 return -EFAULT;
783 PWC_DEBUG_IOCTL("VIDIOC_DQBUF: after pwc_handle_frame\n");
784
785 buf->index = pdev->fill_image;
786 if (pdev->pixfmt != V4L2_PIX_FMT_YUV420)
787 buf->bytesused = pdev->frame_size + sizeof(struct pwc_raw_frame);
788 else
789 buf->bytesused = pdev->view.size;
790 buf->flags = V4L2_BUF_FLAG_MAPPED;
791 buf->field = V4L2_FIELD_NONE;
792 do_gettimeofday(&buf->timestamp);
793 buf->sequence = 0;
794 buf->memory = V4L2_MEMORY_MMAP;
795 buf->m.offset = pdev->fill_image * pdev->len_per_image;
796 buf->length = pdev->len_per_image;
797 pwc_next_image(pdev);
798
799 PWC_DEBUG_IOCTL("VIDIOC_DQBUF: buf->index=%d\n", buf->index);
800 PWC_DEBUG_IOCTL("VIDIOC_DQBUF: buf->length=%d\n", buf->length);
801 PWC_DEBUG_IOCTL("VIDIOC_DQBUF: m.offset=%d\n", buf->m.offset);
802 PWC_DEBUG_IOCTL("VIDIOC_DQBUF: bytesused=%d\n", buf->bytesused);
803 PWC_DEBUG_IOCTL("VIDIOC_DQBUF: leaving\n");
804 return 0;
805 1013
1014 return vb2_dqbuf(&pdev->vb_queue, buf, file->f_flags & O_NONBLOCK);
806} 1015}
807 1016
808static int pwc_streamon(struct file *file, void *fh, enum v4l2_buf_type i) 1017static int pwc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
809{ 1018{
810 struct pwc_device *pdev = video_drvdata(file); 1019 struct pwc_device *pdev = video_drvdata(file);
811 1020
812 return pwc_isoc_init(pdev); 1021 if (!pdev->udev)
1022 return -ENODEV;
1023
1024 if (pdev->capt_file != file)
1025 return -EBUSY;
1026
1027 return vb2_streamon(&pdev->vb_queue, i);
813} 1028}
814 1029
815static int pwc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i) 1030static int pwc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
816{ 1031{
817 struct pwc_device *pdev = video_drvdata(file); 1032 struct pwc_device *pdev = video_drvdata(file);
818 1033
819 pwc_isoc_cleanup(pdev); 1034 if (!pdev->udev)
820 return 0; 1035 return -ENODEV;
1036
1037 if (pdev->capt_file != file)
1038 return -EBUSY;
1039
1040 return vb2_streamoff(&pdev->vb_queue, i);
821} 1041}
822 1042
823static int pwc_enum_framesizes(struct file *file, void *fh, 1043static int pwc_enum_framesizes(struct file *file, void *fh,
@@ -896,9 +1116,6 @@ const struct v4l2_ioctl_ops pwc_ioctl_ops = {
896 .vidioc_g_fmt_vid_cap = pwc_g_fmt_vid_cap, 1116 .vidioc_g_fmt_vid_cap = pwc_g_fmt_vid_cap,
897 .vidioc_s_fmt_vid_cap = pwc_s_fmt_vid_cap, 1117 .vidioc_s_fmt_vid_cap = pwc_s_fmt_vid_cap,
898 .vidioc_try_fmt_vid_cap = pwc_try_fmt_vid_cap, 1118 .vidioc_try_fmt_vid_cap = pwc_try_fmt_vid_cap,
899 .vidioc_queryctrl = pwc_queryctrl,
900 .vidioc_g_ctrl = pwc_g_ctrl,
901 .vidioc_s_ctrl = pwc_s_ctrl,
902 .vidioc_reqbufs = pwc_reqbufs, 1119 .vidioc_reqbufs = pwc_reqbufs,
903 .vidioc_querybuf = pwc_querybuf, 1120 .vidioc_querybuf = pwc_querybuf,
904 .vidioc_qbuf = pwc_qbuf, 1121 .vidioc_qbuf = pwc_qbuf,
diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h
index 083f8b15df73..0e4e2d7b7872 100644
--- a/drivers/media/video/pwc/pwc.h
+++ b/drivers/media/video/pwc/pwc.h
@@ -29,7 +29,6 @@
29#include <linux/usb.h> 29#include <linux/usb.h>
30#include <linux/spinlock.h> 30#include <linux/spinlock.h>
31#include <linux/wait.h> 31#include <linux/wait.h>
32#include <linux/version.h>
33#include <linux/mutex.h> 32#include <linux/mutex.h>
34#include <linux/mm.h> 33#include <linux/mm.h>
35#include <linux/slab.h> 34#include <linux/slab.h>
@@ -37,19 +36,16 @@
37#include <linux/videodev2.h> 36#include <linux/videodev2.h>
38#include <media/v4l2-common.h> 37#include <media/v4l2-common.h>
39#include <media/v4l2-ioctl.h> 38#include <media/v4l2-ioctl.h>
39#include <media/v4l2-ctrls.h>
40#include <media/videobuf2-vmalloc.h>
40#ifdef CONFIG_USB_PWC_INPUT_EVDEV 41#ifdef CONFIG_USB_PWC_INPUT_EVDEV
41#include <linux/input.h> 42#include <linux/input.h>
42#endif 43#endif
43 44
44#include "pwc-uncompress.h"
45#include <media/pwc-ioctl.h> 45#include <media/pwc-ioctl.h>
46 46
47/* Version block */ 47/* Version block */
48#define PWC_MAJOR 10 48#define PWC_VERSION "10.0.15"
49#define PWC_MINOR 0
50#define PWC_EXTRAMINOR 12
51#define PWC_VERSION_CODE KERNEL_VERSION(PWC_MAJOR,PWC_MINOR,PWC_EXTRAMINOR)
52#define PWC_VERSION "10.0.14"
53#define PWC_NAME "pwc" 49#define PWC_NAME "pwc"
54#define PFX PWC_NAME ": " 50#define PFX PWC_NAME ": "
55 51
@@ -81,9 +77,9 @@
81#define PWC_DEBUG_LEVEL (PWC_DEBUG_LEVEL_MODULE) 77#define PWC_DEBUG_LEVEL (PWC_DEBUG_LEVEL_MODULE)
82 78
83#define PWC_DEBUG(level, fmt, args...) do {\ 79#define PWC_DEBUG(level, fmt, args...) do {\
84 if ((PWC_DEBUG_LEVEL_ ##level) & pwc_trace) \ 80 if ((PWC_DEBUG_LEVEL_ ##level) & pwc_trace) \
85 printk(KERN_DEBUG PFX fmt, ##args); \ 81 printk(KERN_DEBUG PFX fmt, ##args); \
86 } while(0) 82 } while (0)
87 83
88#define PWC_ERROR(fmt, args...) printk(KERN_ERR PFX fmt, ##args) 84#define PWC_ERROR(fmt, args...) printk(KERN_ERR PFX fmt, ##args)
89#define PWC_WARNING(fmt, args...) printk(KERN_WARNING PFX fmt, ##args) 85#define PWC_WARNING(fmt, args...) printk(KERN_WARNING PFX fmt, ##args)
@@ -110,25 +106,21 @@
110#define FEATURE_CODEC1 0x0002 106#define FEATURE_CODEC1 0x0002
111#define FEATURE_CODEC2 0x0004 107#define FEATURE_CODEC2 0x0004
112 108
113/* Turn certain features on/off */
114#define PWC_INT_PIPE 0
115
116/* Ignore errors in the first N frames, to allow for startup delays */ 109/* Ignore errors in the first N frames, to allow for startup delays */
117#define FRAME_LOWMARK 5 110#define FRAME_LOWMARK 5
118 111
119/* Size and number of buffers for the ISO pipe. */ 112/* Size and number of buffers for the ISO pipe. */
120#define MAX_ISO_BUFS 2 113#define MAX_ISO_BUFS 3
121#define ISO_FRAMES_PER_DESC 10 114#define ISO_FRAMES_PER_DESC 10
122#define ISO_MAX_FRAME_SIZE 960 115#define ISO_MAX_FRAME_SIZE 960
123#define ISO_BUFFER_SIZE (ISO_FRAMES_PER_DESC * ISO_MAX_FRAME_SIZE) 116#define ISO_BUFFER_SIZE (ISO_FRAMES_PER_DESC * ISO_MAX_FRAME_SIZE)
124 117
125/* Frame buffers: contains compressed or uncompressed video data. */
126#define MAX_FRAMES 5
127/* Maximum size after decompression is 640x480 YUV data, 1.5 * 640 * 480 */ 118/* Maximum size after decompression is 640x480 YUV data, 1.5 * 640 * 480 */
128#define PWC_FRAME_SIZE (460800 + TOUCAM_HEADER_SIZE + TOUCAM_TRAILER_SIZE) 119#define PWC_FRAME_SIZE (460800 + TOUCAM_HEADER_SIZE + TOUCAM_TRAILER_SIZE)
129 120
130/* Absolute maximum number of buffers available for mmap() */ 121/* Absolute minimum and maximum number of buffers available for mmap() */
131#define MAX_IMAGES 10 122#define MIN_FRAMES 2
123#define MAX_FRAMES 16
132 124
133/* Some macros to quickly find the type of a webcam */ 125/* Some macros to quickly find the type of a webcam */
134#define DEVICE_USE_CODEC1(x) ((x)<675) 126#define DEVICE_USE_CODEC1(x) ((x)<675)
@@ -136,149 +128,221 @@
136#define DEVICE_USE_CODEC3(x) ((x)>=700) 128#define DEVICE_USE_CODEC3(x) ((x)>=700)
137#define DEVICE_USE_CODEC23(x) ((x)>=675) 129#define DEVICE_USE_CODEC23(x) ((x)>=675)
138 130
139/* The following structures were based on cpia.h. Why reinvent the wheel? :-) */ 131/* from pwc-dec.h */
140struct pwc_iso_buf 132#define PWCX_FLAG_PLANAR 0x0001
141{ 133
142 void *data; 134/* Request types: video */
143 int length; 135#define SET_LUM_CTL 0x01
144 int read; 136#define GET_LUM_CTL 0x02
145 struct urb *urb; 137#define SET_CHROM_CTL 0x03
146}; 138#define GET_CHROM_CTL 0x04
139#define SET_STATUS_CTL 0x05
140#define GET_STATUS_CTL 0x06
141#define SET_EP_STREAM_CTL 0x07
142#define GET_EP_STREAM_CTL 0x08
143#define GET_XX_CTL 0x09
144#define SET_XX_CTL 0x0A
145#define GET_XY_CTL 0x0B
146#define SET_XY_CTL 0x0C
147#define SET_MPT_CTL 0x0D
148#define GET_MPT_CTL 0x0E
149
150/* Selectors for the Luminance controls [GS]ET_LUM_CTL */
151#define AGC_MODE_FORMATTER 0x2000
152#define PRESET_AGC_FORMATTER 0x2100
153#define SHUTTER_MODE_FORMATTER 0x2200
154#define PRESET_SHUTTER_FORMATTER 0x2300
155#define PRESET_CONTOUR_FORMATTER 0x2400
156#define AUTO_CONTOUR_FORMATTER 0x2500
157#define BACK_LIGHT_COMPENSATION_FORMATTER 0x2600
158#define CONTRAST_FORMATTER 0x2700
159#define DYNAMIC_NOISE_CONTROL_FORMATTER 0x2800
160#define FLICKERLESS_MODE_FORMATTER 0x2900
161#define AE_CONTROL_SPEED 0x2A00
162#define BRIGHTNESS_FORMATTER 0x2B00
163#define GAMMA_FORMATTER 0x2C00
164
165/* Selectors for the Chrominance controls [GS]ET_CHROM_CTL */
166#define WB_MODE_FORMATTER 0x1000
167#define AWB_CONTROL_SPEED_FORMATTER 0x1100
168#define AWB_CONTROL_DELAY_FORMATTER 0x1200
169#define PRESET_MANUAL_RED_GAIN_FORMATTER 0x1300
170#define PRESET_MANUAL_BLUE_GAIN_FORMATTER 0x1400
171#define COLOUR_MODE_FORMATTER 0x1500
172#define SATURATION_MODE_FORMATTER1 0x1600
173#define SATURATION_MODE_FORMATTER2 0x1700
174
175/* Selectors for the Status controls [GS]ET_STATUS_CTL */
176#define SAVE_USER_DEFAULTS_FORMATTER 0x0200
177#define RESTORE_USER_DEFAULTS_FORMATTER 0x0300
178#define RESTORE_FACTORY_DEFAULTS_FORMATTER 0x0400
179#define READ_AGC_FORMATTER 0x0500
180#define READ_SHUTTER_FORMATTER 0x0600
181#define READ_RED_GAIN_FORMATTER 0x0700
182#define READ_BLUE_GAIN_FORMATTER 0x0800
183
184/* Formatters for the motorized pan & tilt [GS]ET_MPT_CTL */
185#define PT_RELATIVE_CONTROL_FORMATTER 0x01
186#define PT_RESET_CONTROL_FORMATTER 0x02
187#define PT_STATUS_FORMATTER 0x03
147 188
148/* intermediate buffers with raw data from the USB cam */ 189/* intermediate buffers with raw data from the USB cam */
149struct pwc_frame_buf 190struct pwc_frame_buf
150{ 191{
151 void *data; 192 struct vb2_buffer vb; /* common v4l buffer stuff -- must be first */
152 volatile int filled; /* number of bytes filled */ 193 struct list_head list;
153 struct pwc_frame_buf *next; /* list */ 194 void *data;
154}; 195 int filled; /* number of bytes filled */
155
156/* additionnal informations used when dealing image between kernel and userland */
157struct pwc_imgbuf
158{
159 unsigned long offset; /* offset of this buffer in the big array of image_data */
160 int vma_use_count; /* count the number of time this memory is mapped */
161}; 196};
162 197
163struct pwc_device 198struct pwc_device
164{ 199{
165 struct video_device vdev; 200 struct video_device vdev;
166 201 struct mutex modlock;
167 /* Pointer to our usb_device, may be NULL after unplug */ 202
168 struct usb_device *udev; 203 /* Pointer to our usb_device, may be NULL after unplug */
169 204 struct usb_device *udev;
170 int type; /* type of cam (645, 646, 675, 680, 690, 720, 730, 740, 750) */ 205 /* Protects the setting of udev to NULL by our disconnect handler */
171 int release; /* release number */ 206 struct mutex udevlock;
172 int features; /* feature bits */ 207
173 char serial[30]; /* serial number (string) */ 208 /* type of cam (645, 646, 675, 680, 690, 720, 730, 740, 750) */
174 int error_status; /* set when something goes wrong with the cam (unplugged, USB errors) */ 209 int type;
175 int usb_init; /* set when the cam has been initialized over USB */ 210 int release; /* release number */
176 211 int features; /* feature bits */
177 /*** Video data ***/ 212 char serial[30]; /* serial number (string) */
178 int vopen; /* flag */ 213
179 int vendpoint; /* video isoc endpoint */ 214 /*** Video data ***/
180 int vcinterface; /* video control interface */ 215 struct file *capt_file; /* file doing video capture */
181 int valternate; /* alternate interface needed */ 216 int vendpoint; /* video isoc endpoint */
182 int vframes, vsize; /* frames-per-second & size (see PSZ_*) */ 217 int vcinterface; /* video control interface */
183 int pixfmt; /* pixelformat: V4L2_PIX_FMT_YUV420 or raw: _PWC1, _PWC2 */ 218 int valternate; /* alternate interface needed */
184 int vframe_count; /* received frames */ 219 int vframes, vsize; /* frames-per-second & size (see PSZ_*) */
185 int vframes_dumped; /* counter for dumped frames */ 220 int pixfmt; /* pixelformat: V4L2_PIX_FMT_YUV420 or _PWCX */
186 int vframes_error; /* frames received in error */ 221 int vframe_count; /* received frames */
187 int vmax_packet_size; /* USB maxpacket size */ 222 int vmax_packet_size; /* USB maxpacket size */
188 int vlast_packet_size; /* for frame synchronisation */ 223 int vlast_packet_size; /* for frame synchronisation */
189 int visoc_errors; /* number of contiguous ISOC errors */ 224 int visoc_errors; /* number of contiguous ISOC errors */
190 int vcompression; /* desired compression factor */ 225 int vcompression; /* desired compression factor */
191 int vbandlength; /* compressed band length; 0 is uncompressed */ 226 int vbandlength; /* compressed band length; 0 is uncompressed */
192 char vsnapshot; /* snapshot mode */ 227 char vsnapshot; /* snapshot mode */
193 char vsync; /* used by isoc handler */ 228 char vsync; /* used by isoc handler */
194 char vmirror; /* for ToUCaM series */ 229 char vmirror; /* for ToUCaM series */
195 char unplugged; 230 char power_save; /* Do powersaving for this cam */
196 231
197 int cmd_len; 232 int cmd_len;
198 unsigned char cmd_buf[13]; 233 unsigned char cmd_buf[13];
199 234
200 /* The image acquisition requires 3 to 4 steps: 235 struct urb *urbs[MAX_ISO_BUFS];
201 1. data is gathered in short packets from the USB controller 236 char iso_init;
202 2. data is synchronized and packed into a frame buffer 237
203 3a. in case data is compressed, decompress it directly into image buffer 238 /* videobuf2 queue and queued buffers list */
204 3b. in case data is uncompressed, copy into image buffer with viewport 239 struct vb2_queue vb_queue;
205 4. data is transferred to the user process 240 struct list_head queued_bufs;
206 241 spinlock_t queued_bufs_lock;
207 Note that MAX_ISO_BUFS != MAX_FRAMES != MAX_IMAGES.... 242
208 We have in effect a back-to-back-double-buffer system. 243 /*
209 */ 244 * Frame currently being filled, this only gets touched by the
210 /* 1: isoc */ 245 * isoc urb complete handler, and by stream start / stop since
211 struct pwc_iso_buf sbuf[MAX_ISO_BUFS]; 246 * start / stop touch it before / after starting / killing the urbs
212 char iso_init; 247 * no locking is needed around this
213 248 */
214 /* 2: frame */ 249 struct pwc_frame_buf *fill_buf;
215 struct pwc_frame_buf *fbuf; /* all frames */ 250
216 struct pwc_frame_buf *empty_frames, *empty_frames_tail; /* all empty frames */ 251 int frame_header_size, frame_trailer_size;
217 struct pwc_frame_buf *full_frames, *full_frames_tail; /* all filled frames */ 252 int frame_size;
218 struct pwc_frame_buf *fill_frame; /* frame currently being filled */ 253 int frame_total_size; /* including header & trailer */
219 struct pwc_frame_buf *read_frame; /* frame currently read by user process */ 254 int drop_frames;
220 int frame_header_size, frame_trailer_size; 255
221 int frame_size; 256 void *decompress_data; /* private data for decompression engine */
222 int frame_total_size; /* including header & trailer */ 257
223 int drop_frames; 258 /*
224 259 * We have an 'image' and a 'view', where 'image' is the fixed-size img
225 /* 3: decompression */ 260 * as delivered by the camera, and 'view' is the size requested by the
226 void *decompress_data; /* private data for decompression engine */ 261 * program. The camera image is centered in this viewport, laced with
227 262 * a gray or black border. view_min <= image <= view <= view_max;
228 /* 4: image */ 263 */
229 /* We have an 'image' and a 'view', where 'image' is the fixed-size image 264 int image_mask; /* supported sizes */
230 as delivered by the camera, and 'view' is the size requested by the 265 struct pwc_coord view_min, view_max; /* minimum and maximum view */
231 program. The camera image is centered in this viewport, laced with 266 struct pwc_coord abs_max; /* maximum supported size */
232 a gray or black border. view_min <= image <= view <= view_max; 267 struct pwc_coord image, view; /* image and viewport size */
233 */ 268 struct pwc_coord offset; /* offset of the viewport */
234 int image_mask; /* bitmask of supported sizes */ 269
235 struct pwc_coord view_min, view_max; /* minimum and maximum viewable sizes */ 270 /*** motorized pan/tilt feature */
236 struct pwc_coord abs_max; /* maximum supported size with compression */ 271 struct pwc_mpt_range angle_range;
237 struct pwc_coord image, view; /* image and viewport size */ 272 int pan_angle; /* in degrees * 100 */
238 struct pwc_coord offset; /* offset within the viewport */ 273 int tilt_angle; /* absolute angle; 0,0 is home */
239 274
240 void *image_data; /* total buffer, which is subdivided into ... */ 275 /*
241 struct pwc_imgbuf images[MAX_IMAGES];/* ...several images... */ 276 * Set to 1 when the user push the button, reset to 0
242 int fill_image; /* ...which are rotated. */ 277 * when this value is read from sysfs.
243 int len_per_image; /* length per image */ 278 */
244 int image_read_pos; /* In case we read data in pieces, keep track of were we are in the imagebuffer */ 279 int snapshot_button_status;
245 int image_used[MAX_IMAGES]; /* For MCAPTURE and SYNC */
246
247 struct mutex modlock; /* to prevent races in video_open(), etc */
248 spinlock_t ptrlock; /* for manipulating the buffer pointers */
249
250 /*** motorized pan/tilt feature */
251 struct pwc_mpt_range angle_range;
252 int pan_angle; /* in degrees * 100 */
253 int tilt_angle; /* absolute angle; 0,0 is home position */
254 int snapshot_button_status; /* set to 1 when the user push the button, reset to 0 when this value is read */
255#ifdef CONFIG_USB_PWC_INPUT_EVDEV 280#ifdef CONFIG_USB_PWC_INPUT_EVDEV
256 struct input_dev *button_dev; /* webcam snapshot button input */ 281 struct input_dev *button_dev; /* webcam snapshot button input */
257 char button_phys[64]; 282 char button_phys[64];
258#endif 283#endif
259 284
260 /*** Misc. data ***/ 285 /* controls */
261 wait_queue_head_t frameq; /* When waiting for a frame to finish... */ 286 struct v4l2_ctrl_handler ctrl_handler;
262#if PWC_INT_PIPE 287 u16 saturation_fmt;
263 void *usb_int_handler; /* for the interrupt endpoint */ 288 struct v4l2_ctrl *brightness;
264#endif 289 struct v4l2_ctrl *contrast;
290 struct v4l2_ctrl *saturation;
291 struct v4l2_ctrl *gamma;
292 struct {
293 /* awb / red-blue balance cluster */
294 struct v4l2_ctrl *auto_white_balance;
295 struct v4l2_ctrl *red_balance;
296 struct v4l2_ctrl *blue_balance;
297 /* usb ctrl transfers are slow, so we cache things */
298 int color_bal_valid;
299 unsigned long last_color_bal_update; /* In jiffies */
300 s32 last_red_balance;
301 s32 last_blue_balance;
302 };
303 struct {
304 /* autogain / gain cluster */
305 struct v4l2_ctrl *autogain;
306 struct v4l2_ctrl *gain;
307 int gain_valid;
308 unsigned long last_gain_update; /* In jiffies */
309 s32 last_gain;
310 };
311 struct {
312 /* exposure_auto / exposure cluster */
313 struct v4l2_ctrl *exposure_auto;
314 struct v4l2_ctrl *exposure;
315 int exposure_valid;
316 unsigned long last_exposure_update; /* In jiffies */
317 s32 last_exposure;
318 };
319 struct v4l2_ctrl *colorfx;
320 struct {
321 /* autocontour/contour cluster */
322 struct v4l2_ctrl *autocontour;
323 struct v4l2_ctrl *contour;
324 };
325 struct v4l2_ctrl *backlight;
326 struct v4l2_ctrl *flicker;
327 struct v4l2_ctrl *noise_reduction;
328 struct v4l2_ctrl *save_user;
329 struct v4l2_ctrl *restore_user;
330 struct v4l2_ctrl *restore_factory;
331 struct {
332 /* motor control cluster */
333 struct v4l2_ctrl *motor_pan;
334 struct v4l2_ctrl *motor_tilt;
335 struct v4l2_ctrl *motor_pan_reset;
336 struct v4l2_ctrl *motor_tilt_reset;
337 };
338 /* CODEC3 models have both gain and exposure controlled by autogain */
339 struct v4l2_ctrl *autogain_expo_cluster[3];
265}; 340};
266 341
267#ifdef __cplusplus
268extern "C" {
269#endif
270
271/* Global variables */ 342/* Global variables */
272#ifdef CONFIG_USB_PWC_DEBUG 343#ifdef CONFIG_USB_PWC_DEBUG
273extern int pwc_trace; 344extern int pwc_trace;
274#endif 345#endif
275extern int pwc_mbufs;
276
277/** functions in pwc-if.c */
278int pwc_handle_frame(struct pwc_device *pdev);
279void pwc_next_image(struct pwc_device *pdev);
280int pwc_isoc_init(struct pwc_device *pdev);
281void pwc_isoc_cleanup(struct pwc_device *pdev);
282 346
283/** Functions in pwc-misc.c */ 347/** Functions in pwc-misc.c */
284/* sizes in pixels */ 348/* sizes in pixels */
@@ -291,50 +355,25 @@ void pwc_construct(struct pwc_device *pdev);
291/* Request a certain video mode. Returns < 0 if not possible */ 355/* Request a certain video mode. Returns < 0 if not possible */
292extern int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, int frames, int compression, int snapshot); 356extern int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, int frames, int compression, int snapshot);
293extern unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned int size); 357extern unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned int size);
294/* Calculate the number of bytes per image (not frame) */
295extern int pwc_mpt_reset(struct pwc_device *pdev, int flags); 358extern int pwc_mpt_reset(struct pwc_device *pdev, int flags);
296extern int pwc_mpt_set_angle(struct pwc_device *pdev, int pan, int tilt); 359extern int pwc_mpt_set_angle(struct pwc_device *pdev, int pan, int tilt);
297
298/* Various controls; should be obvious. Value 0..65535, or < 0 on error */
299extern int pwc_get_brightness(struct pwc_device *pdev);
300extern int pwc_set_brightness(struct pwc_device *pdev, int value);
301extern int pwc_get_contrast(struct pwc_device *pdev);
302extern int pwc_set_contrast(struct pwc_device *pdev, int value);
303extern int pwc_get_gamma(struct pwc_device *pdev);
304extern int pwc_set_gamma(struct pwc_device *pdev, int value);
305extern int pwc_get_saturation(struct pwc_device *pdev, int *value);
306extern int pwc_set_saturation(struct pwc_device *pdev, int value);
307extern int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value); 360extern int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value);
308extern int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor); 361extern int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor);
309extern int pwc_restore_user(struct pwc_device *pdev); 362extern int send_control_msg(struct pwc_device *pdev,
310extern int pwc_save_user(struct pwc_device *pdev); 363 u8 request, u16 value, void *buf, int buflen);
311extern int pwc_restore_factory(struct pwc_device *pdev); 364
312 365/* Control get / set helpers */
313/* exported for use by v4l2 controls */ 366int pwc_get_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data);
314extern int pwc_get_red_gain(struct pwc_device *pdev, int *value); 367int pwc_set_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, u8 data);
315extern int pwc_set_red_gain(struct pwc_device *pdev, int value); 368int pwc_get_s8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data);
316extern int pwc_get_blue_gain(struct pwc_device *pdev, int *value); 369#define pwc_set_s8_ctrl pwc_set_u8_ctrl
317extern int pwc_set_blue_gain(struct pwc_device *pdev, int value); 370int pwc_get_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *dat);
318extern int pwc_get_awb(struct pwc_device *pdev); 371int pwc_set_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, u16 data);
319extern int pwc_set_awb(struct pwc_device *pdev, int mode); 372int pwc_button_ctrl(struct pwc_device *pdev, u16 value);
320extern int pwc_set_agc(struct pwc_device *pdev, int mode, int value); 373int pwc_init_controls(struct pwc_device *pdev);
321extern int pwc_get_agc(struct pwc_device *pdev, int *value);
322extern int pwc_set_shutter_speed(struct pwc_device *pdev, int mode, int value);
323extern int pwc_get_shutter_speed(struct pwc_device *pdev, int *value);
324
325extern int pwc_set_colour_mode(struct pwc_device *pdev, int colour);
326extern int pwc_get_colour_mode(struct pwc_device *pdev, int *colour);
327extern int pwc_set_contour(struct pwc_device *pdev, int contour);
328extern int pwc_get_contour(struct pwc_device *pdev, int *contour);
329extern int pwc_set_backlight(struct pwc_device *pdev, int backlight);
330extern int pwc_get_backlight(struct pwc_device *pdev, int *backlight);
331extern int pwc_set_flicker(struct pwc_device *pdev, int flicker);
332extern int pwc_get_flicker(struct pwc_device *pdev, int *flicker);
333extern int pwc_set_dynamic_noise(struct pwc_device *pdev, int noise);
334extern int pwc_get_dynamic_noise(struct pwc_device *pdev, int *noise);
335 374
336/* Power down or up the camera; not supported by all models */ 375/* Power down or up the camera; not supported by all models */
337extern int pwc_camera_power(struct pwc_device *pdev, int power); 376extern void pwc_camera_power(struct pwc_device *pdev, int power);
338 377
339/* Private ioctl()s; see pwc-ioctl.h */ 378/* Private ioctl()s; see pwc-ioctl.h */
340extern long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg); 379extern long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg);
@@ -343,12 +382,6 @@ extern const struct v4l2_ioctl_ops pwc_ioctl_ops;
343 382
344/** pwc-uncompress.c */ 383/** pwc-uncompress.c */
345/* Expand frame to image, possibly including decompression. Uses read_frame and fill_image */ 384/* Expand frame to image, possibly including decompression. Uses read_frame and fill_image */
346extern int pwc_decompress(struct pwc_device *pdev); 385int pwc_decompress(struct pwc_device *pdev, struct pwc_frame_buf *fbuf);
347
348#ifdef __cplusplus
349}
350#endif
351
352 386
353#endif 387#endif
354/* vim: set cino= formatoptions=croql cindent shiftwidth=8 tabstop=8: */
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index b42bfa5ccdf2..d07df22a5ec6 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -22,7 +22,6 @@
22#include <linux/mm.h> 22#include <linux/mm.h>
23#include <linux/moduleparam.h> 23#include <linux/moduleparam.h>
24#include <linux/time.h> 24#include <linux/time.h>
25#include <linux/version.h>
26#include <linux/device.h> 25#include <linux/device.h>
27#include <linux/platform_device.h> 26#include <linux/platform_device.h>
28#include <linux/clk.h> 27#include <linux/clk.h>
@@ -40,7 +39,7 @@
40#include <mach/dma.h> 39#include <mach/dma.h>
41#include <mach/camera.h> 40#include <mach/camera.h>
42 41
43#define PXA_CAM_VERSION_CODE KERNEL_VERSION(0, 0, 5) 42#define PXA_CAM_VERSION "0.0.6"
44#define PXA_CAM_DRV_NAME "pxa27x-camera" 43#define PXA_CAM_DRV_NAME "pxa27x-camera"
45 44
46/* Camera Interface */ 45/* Camera Interface */
@@ -247,7 +246,7 @@ static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
247 if (bytes_per_line < 0) 246 if (bytes_per_line < 0)
248 return bytes_per_line; 247 return bytes_per_line;
249 248
250 dev_dbg(icd->dev.parent, "count=%d, size=%d\n", *count, *size); 249 dev_dbg(icd->parent, "count=%d, size=%d\n", *count, *size);
251 250
252 *size = bytes_per_line * icd->user_height; 251 *size = bytes_per_line * icd->user_height;
253 252
@@ -262,13 +261,13 @@ static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
262static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf) 261static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf)
263{ 262{
264 struct soc_camera_device *icd = vq->priv_data; 263 struct soc_camera_device *icd = vq->priv_data;
265 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 264 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
266 struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb); 265 struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
267 int i; 266 int i;
268 267
269 BUG_ON(in_interrupt()); 268 BUG_ON(in_interrupt());
270 269
271 dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__, 270 dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
272 &buf->vb, buf->vb.baddr, buf->vb.bsize); 271 &buf->vb, buf->vb.baddr, buf->vb.bsize);
273 272
274 /* 273 /*
@@ -429,7 +428,7 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
429 struct videobuf_buffer *vb, enum v4l2_field field) 428 struct videobuf_buffer *vb, enum v4l2_field field)
430{ 429{
431 struct soc_camera_device *icd = vq->priv_data; 430 struct soc_camera_device *icd = vq->priv_data;
432 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 431 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
433 struct pxa_camera_dev *pcdev = ici->priv; 432 struct pxa_camera_dev *pcdev = ici->priv;
434 struct device *dev = pcdev->soc_host.v4l2_dev.dev; 433 struct device *dev = pcdev->soc_host.v4l2_dev.dev;
435 struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb); 434 struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
@@ -636,11 +635,11 @@ static void pxa_videobuf_queue(struct videobuf_queue *vq,
636 struct videobuf_buffer *vb) 635 struct videobuf_buffer *vb)
637{ 636{
638 struct soc_camera_device *icd = vq->priv_data; 637 struct soc_camera_device *icd = vq->priv_data;
639 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 638 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
640 struct pxa_camera_dev *pcdev = ici->priv; 639 struct pxa_camera_dev *pcdev = ici->priv;
641 struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb); 640 struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
642 641
643 dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d active=%p\n", 642 dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d active=%p\n",
644 __func__, vb, vb->baddr, vb->bsize, pcdev->active); 643 __func__, vb, vb->baddr, vb->bsize, pcdev->active);
645 644
646 list_add_tail(&vb->queue, &pcdev->capture); 645 list_add_tail(&vb->queue, &pcdev->capture);
@@ -658,7 +657,7 @@ static void pxa_videobuf_release(struct videobuf_queue *vq,
658 struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb); 657 struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
659#ifdef DEBUG 658#ifdef DEBUG
660 struct soc_camera_device *icd = vq->priv_data; 659 struct soc_camera_device *icd = vq->priv_data;
661 struct device *dev = icd->dev.parent; 660 struct device *dev = icd->parent;
662 661
663 dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, 662 dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
664 vb, vb->baddr, vb->bsize); 663 vb, vb->baddr, vb->bsize);
@@ -843,7 +842,7 @@ static struct videobuf_queue_ops pxa_videobuf_ops = {
843static void pxa_camera_init_videobuf(struct videobuf_queue *q, 842static void pxa_camera_init_videobuf(struct videobuf_queue *q,
844 struct soc_camera_device *icd) 843 struct soc_camera_device *icd)
845{ 844{
846 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 845 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
847 struct pxa_camera_dev *pcdev = ici->priv; 846 struct pxa_camera_dev *pcdev = ici->priv;
848 847
849 /* 848 /*
@@ -972,7 +971,7 @@ static irqreturn_t pxa_camera_irq(int irq, void *data)
972 */ 971 */
973static int pxa_camera_add_device(struct soc_camera_device *icd) 972static int pxa_camera_add_device(struct soc_camera_device *icd)
974{ 973{
975 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 974 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
976 struct pxa_camera_dev *pcdev = ici->priv; 975 struct pxa_camera_dev *pcdev = ici->priv;
977 976
978 if (pcdev->icd) 977 if (pcdev->icd)
@@ -982,7 +981,7 @@ static int pxa_camera_add_device(struct soc_camera_device *icd)
982 981
983 pcdev->icd = icd; 982 pcdev->icd = icd;
984 983
985 dev_info(icd->dev.parent, "PXA Camera driver attached to camera %d\n", 984 dev_info(icd->parent, "PXA Camera driver attached to camera %d\n",
986 icd->devnum); 985 icd->devnum);
987 986
988 return 0; 987 return 0;
@@ -991,12 +990,12 @@ static int pxa_camera_add_device(struct soc_camera_device *icd)
991/* Called with .video_lock held */ 990/* Called with .video_lock held */
992static void pxa_camera_remove_device(struct soc_camera_device *icd) 991static void pxa_camera_remove_device(struct soc_camera_device *icd)
993{ 992{
994 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 993 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
995 struct pxa_camera_dev *pcdev = ici->priv; 994 struct pxa_camera_dev *pcdev = ici->priv;
996 995
997 BUG_ON(icd != pcdev->icd); 996 BUG_ON(icd != pcdev->icd);
998 997
999 dev_info(icd->dev.parent, "PXA Camera driver detached from camera %d\n", 998 dev_info(icd->parent, "PXA Camera driver detached from camera %d\n",
1000 icd->devnum); 999 icd->devnum);
1001 1000
1002 /* disable capture, disable interrupts */ 1001 /* disable capture, disable interrupts */
@@ -1057,7 +1056,7 @@ static int test_platform_param(struct pxa_camera_dev *pcdev,
1057static void pxa_camera_setup_cicr(struct soc_camera_device *icd, 1056static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
1058 unsigned long flags, __u32 pixfmt) 1057 unsigned long flags, __u32 pixfmt)
1059{ 1058{
1060 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 1059 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
1061 struct pxa_camera_dev *pcdev = ici->priv; 1060 struct pxa_camera_dev *pcdev = ici->priv;
1062 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 1061 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
1063 unsigned long dw, bpp; 1062 unsigned long dw, bpp;
@@ -1152,7 +1151,7 @@ static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
1152 1151
1153static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) 1152static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
1154{ 1153{
1155 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 1154 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
1156 struct pxa_camera_dev *pcdev = ici->priv; 1155 struct pxa_camera_dev *pcdev = ici->priv;
1157 unsigned long bus_flags, camera_flags, common_flags; 1156 unsigned long bus_flags, camera_flags, common_flags;
1158 int ret; 1157 int ret;
@@ -1210,7 +1209,7 @@ static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
1210static int pxa_camera_try_bus_param(struct soc_camera_device *icd, 1209static int pxa_camera_try_bus_param(struct soc_camera_device *icd,
1211 unsigned char buswidth) 1210 unsigned char buswidth)
1212{ 1211{
1213 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 1212 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
1214 struct pxa_camera_dev *pcdev = ici->priv; 1213 struct pxa_camera_dev *pcdev = ici->priv;
1215 unsigned long bus_flags, camera_flags; 1214 unsigned long bus_flags, camera_flags;
1216 int ret = test_platform_param(pcdev, buswidth, &bus_flags); 1215 int ret = test_platform_param(pcdev, buswidth, &bus_flags);
@@ -1247,7 +1246,7 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, unsigned int id
1247 struct soc_camera_format_xlate *xlate) 1246 struct soc_camera_format_xlate *xlate)
1248{ 1247{
1249 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 1248 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
1250 struct device *dev = icd->dev.parent; 1249 struct device *dev = icd->parent;
1251 int formats = 0, ret; 1250 int formats = 0, ret;
1252 struct pxa_cam *cam; 1251 struct pxa_cam *cam;
1253 enum v4l2_mbus_pixelcode code; 1252 enum v4l2_mbus_pixelcode code;
@@ -1335,9 +1334,9 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd,
1335 struct v4l2_crop *a) 1334 struct v4l2_crop *a)
1336{ 1335{
1337 struct v4l2_rect *rect = &a->c; 1336 struct v4l2_rect *rect = &a->c;
1338 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 1337 struct device *dev = icd->parent;
1338 struct soc_camera_host *ici = to_soc_camera_host(dev);
1339 struct pxa_camera_dev *pcdev = ici->priv; 1339 struct pxa_camera_dev *pcdev = ici->priv;
1340 struct device *dev = icd->dev.parent;
1341 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 1340 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
1342 struct soc_camera_sense sense = { 1341 struct soc_camera_sense sense = {
1343 .master_clock = pcdev->mclk, 1342 .master_clock = pcdev->mclk,
@@ -1379,7 +1378,7 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd,
1379 return ret; 1378 return ret;
1380 1379
1381 if (pxa_camera_check_frame(mf.width, mf.height)) { 1380 if (pxa_camera_check_frame(mf.width, mf.height)) {
1382 dev_warn(icd->dev.parent, 1381 dev_warn(icd->parent,
1383 "Inconsistent state. Use S_FMT to repair\n"); 1382 "Inconsistent state. Use S_FMT to repair\n");
1384 return -EINVAL; 1383 return -EINVAL;
1385 } 1384 }
@@ -1406,9 +1405,9 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd,
1406static int pxa_camera_set_fmt(struct soc_camera_device *icd, 1405static int pxa_camera_set_fmt(struct soc_camera_device *icd,
1407 struct v4l2_format *f) 1406 struct v4l2_format *f)
1408{ 1407{
1409 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 1408 struct device *dev = icd->parent;
1409 struct soc_camera_host *ici = to_soc_camera_host(dev);
1410 struct pxa_camera_dev *pcdev = ici->priv; 1410 struct pxa_camera_dev *pcdev = ici->priv;
1411 struct device *dev = icd->dev.parent;
1412 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 1411 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
1413 const struct soc_camera_format_xlate *xlate = NULL; 1412 const struct soc_camera_format_xlate *xlate = NULL;
1414 struct soc_camera_sense sense = { 1413 struct soc_camera_sense sense = {
@@ -1485,7 +1484,7 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd,
1485 1484
1486 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); 1485 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
1487 if (!xlate) { 1486 if (!xlate) {
1488 dev_warn(icd->dev.parent, "Format %x not found\n", pixfmt); 1487 dev_warn(icd->parent, "Format %x not found\n", pixfmt);
1489 return -EINVAL; 1488 return -EINVAL;
1490 } 1489 }
1491 1490
@@ -1499,16 +1498,11 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd,
1499 &pix->height, 32, 2048, 0, 1498 &pix->height, 32, 2048, 0,
1500 pixfmt == V4L2_PIX_FMT_YUV422P ? 4 : 0); 1499 pixfmt == V4L2_PIX_FMT_YUV422P ? 4 : 0);
1501 1500
1502 pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
1503 xlate->host_fmt);
1504 if (pix->bytesperline < 0)
1505 return pix->bytesperline;
1506 pix->sizeimage = pix->height * pix->bytesperline;
1507
1508 /* limit to sensor capabilities */ 1501 /* limit to sensor capabilities */
1509 mf.width = pix->width; 1502 mf.width = pix->width;
1510 mf.height = pix->height; 1503 mf.height = pix->height;
1511 mf.field = pix->field; 1504 /* Only progressive video supported so far */
1505 mf.field = V4L2_FIELD_NONE;
1512 mf.colorspace = pix->colorspace; 1506 mf.colorspace = pix->colorspace;
1513 mf.code = xlate->code; 1507 mf.code = xlate->code;
1514 1508
@@ -1527,7 +1521,7 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd,
1527 break; 1521 break;
1528 default: 1522 default:
1529 /* TODO: support interlaced at least in pass-through mode */ 1523 /* TODO: support interlaced at least in pass-through mode */
1530 dev_err(icd->dev.parent, "Field type %d unsupported.\n", 1524 dev_err(icd->parent, "Field type %d unsupported.\n",
1531 mf.field); 1525 mf.field);
1532 return -EINVAL; 1526 return -EINVAL;
1533 } 1527 }
@@ -1578,15 +1572,14 @@ static int pxa_camera_querycap(struct soc_camera_host *ici,
1578{ 1572{
1579 /* cap->name is set by the firendly caller:-> */ 1573 /* cap->name is set by the firendly caller:-> */
1580 strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card)); 1574 strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card));
1581 cap->version = PXA_CAM_VERSION_CODE;
1582 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 1575 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
1583 1576
1584 return 0; 1577 return 0;
1585} 1578}
1586 1579
1587static int pxa_camera_suspend(struct soc_camera_device *icd, pm_message_t state) 1580static int pxa_camera_suspend(struct device *dev)
1588{ 1581{
1589 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 1582 struct soc_camera_host *ici = to_soc_camera_host(dev);
1590 struct pxa_camera_dev *pcdev = ici->priv; 1583 struct pxa_camera_dev *pcdev = ici->priv;
1591 int i = 0, ret = 0; 1584 int i = 0, ret = 0;
1592 1585
@@ -1596,15 +1589,19 @@ static int pxa_camera_suspend(struct soc_camera_device *icd, pm_message_t state)
1596 pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR3); 1589 pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR3);
1597 pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR4); 1590 pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR4);
1598 1591
1599 if ((pcdev->icd) && (pcdev->icd->ops->suspend)) 1592 if (pcdev->icd) {
1600 ret = pcdev->icd->ops->suspend(pcdev->icd, state); 1593 struct v4l2_subdev *sd = soc_camera_to_subdev(pcdev->icd);
1594 ret = v4l2_subdev_call(sd, core, s_power, 0);
1595 if (ret == -ENOIOCTLCMD)
1596 ret = 0;
1597 }
1601 1598
1602 return ret; 1599 return ret;
1603} 1600}
1604 1601
1605static int pxa_camera_resume(struct soc_camera_device *icd) 1602static int pxa_camera_resume(struct device *dev)
1606{ 1603{
1607 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 1604 struct soc_camera_host *ici = to_soc_camera_host(dev);
1608 struct pxa_camera_dev *pcdev = ici->priv; 1605 struct pxa_camera_dev *pcdev = ici->priv;
1609 int i = 0, ret = 0; 1606 int i = 0, ret = 0;
1610 1607
@@ -1618,8 +1615,12 @@ static int pxa_camera_resume(struct soc_camera_device *icd)
1618 __raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR3); 1615 __raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR3);
1619 __raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR4); 1616 __raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR4);
1620 1617
1621 if ((pcdev->icd) && (pcdev->icd->ops->resume)) 1618 if (pcdev->icd) {
1622 ret = pcdev->icd->ops->resume(pcdev->icd); 1619 struct v4l2_subdev *sd = soc_camera_to_subdev(pcdev->icd);
1620 ret = v4l2_subdev_call(sd, core, s_power, 1);
1621 if (ret == -ENOIOCTLCMD)
1622 ret = 0;
1623 }
1623 1624
1624 /* Restart frame capture if active buffer exists */ 1625 /* Restart frame capture if active buffer exists */
1625 if (!ret && pcdev->active) 1626 if (!ret && pcdev->active)
@@ -1632,8 +1633,6 @@ static struct soc_camera_host_ops pxa_soc_camera_host_ops = {
1632 .owner = THIS_MODULE, 1633 .owner = THIS_MODULE,
1633 .add = pxa_camera_add_device, 1634 .add = pxa_camera_add_device,
1634 .remove = pxa_camera_remove_device, 1635 .remove = pxa_camera_remove_device,
1635 .suspend = pxa_camera_suspend,
1636 .resume = pxa_camera_resume,
1637 .set_crop = pxa_camera_set_crop, 1636 .set_crop = pxa_camera_set_crop,
1638 .get_formats = pxa_camera_get_formats, 1637 .get_formats = pxa_camera_get_formats,
1639 .put_formats = pxa_camera_put_formats, 1638 .put_formats = pxa_camera_put_formats,
@@ -1818,9 +1817,15 @@ static int __devexit pxa_camera_remove(struct platform_device *pdev)
1818 return 0; 1817 return 0;
1819} 1818}
1820 1819
1820static struct dev_pm_ops pxa_camera_pm = {
1821 .suspend = pxa_camera_suspend,
1822 .resume = pxa_camera_resume,
1823};
1824
1821static struct platform_driver pxa_camera_driver = { 1825static struct platform_driver pxa_camera_driver = {
1822 .driver = { 1826 .driver = {
1823 .name = PXA_CAM_DRV_NAME, 1827 .name = PXA_CAM_DRV_NAME,
1828 .pm = &pxa_camera_pm,
1824 }, 1829 },
1825 .probe = pxa_camera_probe, 1830 .probe = pxa_camera_probe,
1826 .remove = __devexit_p(pxa_camera_remove), 1831 .remove = __devexit_p(pxa_camera_remove),
@@ -1843,4 +1848,5 @@ module_exit(pxa_camera_exit);
1843MODULE_DESCRIPTION("PXA27x SoC Camera Host driver"); 1848MODULE_DESCRIPTION("PXA27x SoC Camera Host driver");
1844MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>"); 1849MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>");
1845MODULE_LICENSE("GPL"); 1850MODULE_LICENSE("GPL");
1851MODULE_VERSION(PXA_CAM_VERSION);
1846MODULE_ALIAS("platform:" PXA_CAM_DRV_NAME); 1852MODULE_ALIAS("platform:" PXA_CAM_DRV_NAME);
diff --git a/drivers/media/video/rj54n1cb0c.c b/drivers/media/video/rj54n1cb0c.c
index 57e11b6f19fb..847ccc067e87 100644
--- a/drivers/media/video/rj54n1cb0c.c
+++ b/drivers/media/video/rj54n1cb0c.c
@@ -1364,10 +1364,9 @@ static int rj54n1_video_probe(struct soc_camera_device *icd,
1364 int data1, data2; 1364 int data1, data2;
1365 int ret; 1365 int ret;
1366 1366
1367 /* This could be a BUG_ON() or a WARN_ON(), or remove it completely */ 1367 /* We must have a parent by now. And it cannot be a wrong one. */
1368 if (!icd->dev.parent || 1368 BUG_ON(!icd->parent ||
1369 to_soc_camera_host(icd->dev.parent)->nr != icd->iface) 1369 to_soc_camera_host(icd->parent)->nr != icd->iface);
1370 return -ENODEV;
1371 1370
1372 /* Read out the chip version register */ 1371 /* Read out the chip version register */
1373 data1 = reg_read(client, RJ54N1_DEV_CODE); 1372 data1 = reg_read(client, RJ54N1_DEV_CODE);
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index 5b9dce85645c..803c9c82e496 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -16,15 +16,10 @@
16 * Example maximum bandwidth utilization: 16 * Example maximum bandwidth utilization:
17 * 17 *
18 * -full size, color mode YUYV or YUV422P: 2 channels at once 18 * -full size, color mode YUYV or YUV422P: 2 channels at once
19 *
20 * -full or half size Grey scale: all 4 channels at once 19 * -full or half size Grey scale: all 4 channels at once
21 *
22 * -half size, color mode YUYV or YUV422P: all 4 channels at once 20 * -half size, color mode YUYV or YUV422P: all 4 channels at once
23 *
24 * -full size, color mode YUYV or YUV422P 1/2 frame rate: all 4 channels 21 * -full size, color mode YUYV or YUV422P 1/2 frame rate: all 4 channels
25 * at once. 22 * at once.
26 * (TODO: Incorporate videodev2 frame rate(FR) enumeration,
27 * which is currently experimental.)
28 * 23 *
29 * This program is free software; you can redistribute it and/or modify 24 * This program is free software; you can redistribute it and/or modify
30 * it under the terms of the GNU General Public License as published by 25 * it under the terms of the GNU General Public License as published by
@@ -47,7 +42,6 @@
47#include <linux/mutex.h> 42#include <linux/mutex.h>
48#include <linux/slab.h> 43#include <linux/slab.h>
49#include <linux/videodev2.h> 44#include <linux/videodev2.h>
50#include <linux/version.h>
51#include <linux/mm.h> 45#include <linux/mm.h>
52#include <media/videobuf-vmalloc.h> 46#include <media/videobuf-vmalloc.h>
53#include <media/v4l2-common.h> 47#include <media/v4l2-common.h>
@@ -56,12 +50,7 @@
56#include <linux/vmalloc.h> 50#include <linux/vmalloc.h>
57#include <linux/usb.h> 51#include <linux/usb.h>
58 52
59#define S2255_MAJOR_VERSION 1 53#define S2255_VERSION "1.22.1"
60#define S2255_MINOR_VERSION 21
61#define S2255_RELEASE 0
62#define S2255_VERSION KERNEL_VERSION(S2255_MAJOR_VERSION, \
63 S2255_MINOR_VERSION, \
64 S2255_RELEASE)
65#define FIRMWARE_FILE_NAME "f2255usb.bin" 54#define FIRMWARE_FILE_NAME "f2255usb.bin"
66 55
67/* default JPEG quality */ 56/* default JPEG quality */
@@ -126,7 +115,7 @@
126#define MASK_COLOR 0x000000ff 115#define MASK_COLOR 0x000000ff
127#define MASK_JPG_QUALITY 0x0000ff00 116#define MASK_JPG_QUALITY 0x0000ff00
128#define MASK_INPUT_TYPE 0x000f0000 117#define MASK_INPUT_TYPE 0x000f0000
129/* frame decimation. Not implemented by V4L yet(experimental in V4L) */ 118/* frame decimation. */
130#define FDEC_1 1 /* capture every frame. default */ 119#define FDEC_1 1 /* capture every frame. default */
131#define FDEC_2 2 /* capture every 2nd frame */ 120#define FDEC_2 2 /* capture every 2nd frame */
132#define FDEC_3 3 /* capture every 3rd frame */ 121#define FDEC_3 3 /* capture every 3rd frame */
@@ -312,9 +301,9 @@ struct s2255_fh {
312}; 301};
313 302
314/* current cypress EEPROM firmware version */ 303/* current cypress EEPROM firmware version */
315#define S2255_CUR_USB_FWVER ((3 << 8) | 11) 304#define S2255_CUR_USB_FWVER ((3 << 8) | 12)
316/* current DSP FW version */ 305/* current DSP FW version */
317#define S2255_CUR_DSP_FWVER 10102 306#define S2255_CUR_DSP_FWVER 10104
318/* Need DSP version 5+ for video status feature */ 307/* Need DSP version 5+ for video status feature */
319#define S2255_MIN_DSP_STATUS 5 308#define S2255_MIN_DSP_STATUS 5
320#define S2255_MIN_DSP_COLORFILTER 8 309#define S2255_MIN_DSP_COLORFILTER 8
@@ -502,7 +491,7 @@ static void planar422p_to_yuv_packed(const unsigned char *in,
502 491
503static void s2255_reset_dsppower(struct s2255_dev *dev) 492static void s2255_reset_dsppower(struct s2255_dev *dev)
504{ 493{
505 s2255_vendor_req(dev, 0x40, 0x0b0b, 0x0b01, NULL, 0, 1); 494 s2255_vendor_req(dev, 0x40, 0x0000, 0x0001, NULL, 0, 1);
506 msleep(10); 495 msleep(10);
507 s2255_vendor_req(dev, 0x50, 0x0000, 0x0000, NULL, 0, 1); 496 s2255_vendor_req(dev, 0x50, 0x0000, 0x0000, NULL, 0, 1);
508 msleep(600); 497 msleep(600);
@@ -856,7 +845,6 @@ static int vidioc_querycap(struct file *file, void *priv,
856 strlcpy(cap->driver, "s2255", sizeof(cap->driver)); 845 strlcpy(cap->driver, "s2255", sizeof(cap->driver));
857 strlcpy(cap->card, "s2255", sizeof(cap->card)); 846 strlcpy(cap->card, "s2255", sizeof(cap->card));
858 usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info)); 847 usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
859 cap->version = S2255_VERSION;
860 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 848 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
861 return 0; 849 return 0;
862} 850}
@@ -1984,9 +1972,8 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
1984 video_device_node_name(&channel->vdev)); 1972 video_device_node_name(&channel->vdev));
1985 1973
1986 } 1974 }
1987 printk(KERN_INFO "Sensoray 2255 V4L driver Revision: %d.%d\n", 1975 printk(KERN_INFO "Sensoray 2255 V4L driver Revision: %s\n",
1988 S2255_MAJOR_VERSION, 1976 S2255_VERSION);
1989 S2255_MINOR_VERSION);
1990 /* if no channels registered, return error and probe will fail*/ 1977 /* if no channels registered, return error and probe will fail*/
1991 if (atomic_read(&dev->num_channels) == 0) { 1978 if (atomic_read(&dev->num_channels) == 0) {
1992 v4l2_device_unregister(&dev->v4l2_dev); 1979 v4l2_device_unregister(&dev->v4l2_dev);
@@ -2302,15 +2289,12 @@ static int s2255_board_init(struct s2255_dev *dev)
2302 /* query the firmware */ 2289 /* query the firmware */
2303 fw_ver = s2255_get_fx2fw(dev); 2290 fw_ver = s2255_get_fx2fw(dev);
2304 2291
2305 printk(KERN_INFO "2255 usb firmware version %d.%d\n", 2292 printk(KERN_INFO "s2255: usb firmware version %d.%d\n",
2306 (fw_ver >> 8) & 0xff, 2293 (fw_ver >> 8) & 0xff,
2307 fw_ver & 0xff); 2294 fw_ver & 0xff);
2308 2295
2309 if (fw_ver < S2255_CUR_USB_FWVER) 2296 if (fw_ver < S2255_CUR_USB_FWVER)
2310 dev_err(&dev->udev->dev, 2297 printk(KERN_INFO "s2255: newer USB firmware available\n");
2311 "usb firmware not up to date %d.%d\n",
2312 (fw_ver >> 8) & 0xff,
2313 fw_ver & 0xff);
2314 2298
2315 for (j = 0; j < MAX_CHANNELS; j++) { 2299 for (j = 0; j < MAX_CHANNELS; j++) {
2316 struct s2255_channel *channel = &dev->channel[j]; 2300 struct s2255_channel *channel = &dev->channel[j];
@@ -2721,3 +2705,4 @@ module_exit(usb_s2255_exit);
2721MODULE_DESCRIPTION("Sensoray 2255 Video for Linux driver"); 2705MODULE_DESCRIPTION("Sensoray 2255 Video for Linux driver");
2722MODULE_AUTHOR("Dean Anderson (Sensoray Company Inc.)"); 2706MODULE_AUTHOR("Dean Anderson (Sensoray Company Inc.)");
2723MODULE_LICENSE("GPL"); 2707MODULE_LICENSE("GPL");
2708MODULE_VERSION(S2255_VERSION);
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index 81b4a826ee5e..0d730e55605d 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -11,7 +11,6 @@
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/version.h>
15#include <linux/types.h> 14#include <linux/types.h>
16#include <linux/errno.h> 15#include <linux/errno.h>
17#include <linux/bug.h> 16#include <linux/bug.h>
@@ -451,7 +450,6 @@ static int fimc_vidioc_querycap_capture(struct file *file, void *priv,
451 strncpy(cap->driver, fimc->pdev->name, sizeof(cap->driver) - 1); 450 strncpy(cap->driver, fimc->pdev->name, sizeof(cap->driver) - 1);
452 strncpy(cap->card, fimc->pdev->name, sizeof(cap->card) - 1); 451 strncpy(cap->card, fimc->pdev->name, sizeof(cap->card) - 1);
453 cap->bus_info[0] = 0; 452 cap->bus_info[0] = 0;
454 cap->version = KERNEL_VERSION(1, 0, 0);
455 cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE | 453 cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE |
456 V4L2_CAP_VIDEO_CAPTURE_MPLANE; 454 V4L2_CAP_VIDEO_CAPTURE_MPLANE;
457 455
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index bdf19ada9172..aa550666cc0b 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -12,7 +12,6 @@
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/version.h>
16#include <linux/types.h> 15#include <linux/types.h>
17#include <linux/errno.h> 16#include <linux/errno.h>
18#include <linux/bug.h> 17#include <linux/bug.h>
@@ -774,7 +773,6 @@ static int fimc_m2m_querycap(struct file *file, void *priv,
774 strncpy(cap->driver, fimc->pdev->name, sizeof(cap->driver) - 1); 773 strncpy(cap->driver, fimc->pdev->name, sizeof(cap->driver) - 1);
775 strncpy(cap->card, fimc->pdev->name, sizeof(cap->card) - 1); 774 strncpy(cap->card, fimc->pdev->name, sizeof(cap->card) - 1);
776 cap->bus_info[0] = 0; 775 cap->bus_info[0] = 0;
777 cap->version = KERNEL_VERSION(1, 0, 0);
778 cap->capabilities = V4L2_CAP_STREAMING | 776 cap->capabilities = V4L2_CAP_STREAMING |
779 V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT | 777 V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
780 V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE; 778 V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
@@ -1937,3 +1935,4 @@ module_exit(fimc_exit);
1937MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>"); 1935MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
1938MODULE_DESCRIPTION("S5P FIMC camera host interface/video postprocessor driver"); 1936MODULE_DESCRIPTION("S5P FIMC camera host interface/video postprocessor driver");
1939MODULE_LICENSE("GPL"); 1937MODULE_LICENSE("GPL");
1938MODULE_VERSION("1.0.1");
diff --git a/drivers/media/video/s5p-mfc/Makefile b/drivers/media/video/s5p-mfc/Makefile
new file mode 100644
index 000000000000..d0663409af00
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) := s5p-mfc.o
2s5p-mfc-y += s5p_mfc.o s5p_mfc_intr.o s5p_mfc_opr.o
3s5p-mfc-y += s5p_mfc_dec.o s5p_mfc_enc.o
4s5p-mfc-y += s5p_mfc_ctrl.o s5p_mfc_cmd.o
5s5p-mfc-y += s5p_mfc_pm.o s5p_mfc_shm.o
diff --git a/drivers/media/video/s5p-mfc/regs-mfc.h b/drivers/media/video/s5p-mfc/regs-mfc.h
new file mode 100644
index 000000000000..053a8a872fd7
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/regs-mfc.h
@@ -0,0 +1,413 @@
1/*
2 * Register definition file for Samsung MFC V5.1 Interface (FIMV) driver
3 *
4 * Kamil Debski, Copyright (c) 2010 Samsung Electronics
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10*/
11
12#ifndef _REGS_FIMV_H
13#define _REGS_FIMV_H
14
15#define S5P_FIMV_REG_SIZE (S5P_FIMV_END_ADDR - S5P_FIMV_START_ADDR)
16#define S5P_FIMV_REG_COUNT ((S5P_FIMV_END_ADDR - S5P_FIMV_START_ADDR) / 4)
17
18/* Number of bits that the buffer address should be shifted for particular
19 * MFC buffers. */
20#define S5P_FIMV_START_ADDR 0x0000
21#define S5P_FIMV_END_ADDR 0xe008
22
23#define S5P_FIMV_SW_RESET 0x0000
24#define S5P_FIMV_RISC_HOST_INT 0x0008
25
26/* Command from HOST to RISC */
27#define S5P_FIMV_HOST2RISC_CMD 0x0030
28#define S5P_FIMV_HOST2RISC_ARG1 0x0034
29#define S5P_FIMV_HOST2RISC_ARG2 0x0038
30#define S5P_FIMV_HOST2RISC_ARG3 0x003c
31#define S5P_FIMV_HOST2RISC_ARG4 0x0040
32
33/* Command from RISC to HOST */
34#define S5P_FIMV_RISC2HOST_CMD 0x0044
35#define S5P_FIMV_RISC2HOST_CMD_MASK 0x1FFFF
36#define S5P_FIMV_RISC2HOST_ARG1 0x0048
37#define S5P_FIMV_RISC2HOST_ARG2 0x004c
38#define S5P_FIMV_RISC2HOST_ARG3 0x0050
39#define S5P_FIMV_RISC2HOST_ARG4 0x0054
40
41#define S5P_FIMV_FW_VERSION 0x0058
42#define S5P_FIMV_SYS_MEM_SZ 0x005c
43#define S5P_FIMV_FW_STATUS 0x0080
44
45/* Memory controller register */
46#define S5P_FIMV_MC_DRAMBASE_ADR_A 0x0508
47#define S5P_FIMV_MC_DRAMBASE_ADR_B 0x050c
48#define S5P_FIMV_MC_STATUS 0x0510
49
50/* Common register */
51#define S5P_FIMV_COMMON_BASE_A 0x0600
52#define S5P_FIMV_COMMON_BASE_B 0x0700
53
54/* Decoder */
55#define S5P_FIMV_DEC_CHROMA_ADR (S5P_FIMV_COMMON_BASE_A)
56#define S5P_FIMV_DEC_LUMA_ADR (S5P_FIMV_COMMON_BASE_B)
57
58/* H.264 decoding */
59#define S5P_FIMV_H264_VERT_NB_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x8c)
60 /* vertical neighbor motion vector */
61#define S5P_FIMV_H264_NB_IP_ADR (S5P_FIMV_COMMON_BASE_A + 0x90)
62 /* neighbor pixels for intra pred */
63#define S5P_FIMV_H264_MV_ADR (S5P_FIMV_COMMON_BASE_B + 0x80)
64 /* H264 motion vector */
65
66/* MPEG4 decoding */
67#define S5P_FIMV_MPEG4_NB_DCAC_ADR (S5P_FIMV_COMMON_BASE_A + 0x8c)
68 /* neighbor AC/DC coeff. */
69#define S5P_FIMV_MPEG4_UP_NB_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x90)
70 /* upper neighbor motion vector */
71#define S5P_FIMV_MPEG4_SA_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x94)
72 /* subseq. anchor motion vector */
73#define S5P_FIMV_MPEG4_OT_LINE_ADR (S5P_FIMV_COMMON_BASE_A + 0x98)
74 /* overlap transform line */
75#define S5P_FIMV_MPEG4_SP_ADR (S5P_FIMV_COMMON_BASE_A + 0xa8)
76 /* syntax parser */
77
78/* H.263 decoding */
79#define S5P_FIMV_H263_NB_DCAC_ADR (S5P_FIMV_COMMON_BASE_A + 0x8c)
80#define S5P_FIMV_H263_UP_NB_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x90)
81#define S5P_FIMV_H263_SA_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x94)
82#define S5P_FIMV_H263_OT_LINE_ADR (S5P_FIMV_COMMON_BASE_A + 0x98)
83
84/* VC-1 decoding */
85#define S5P_FIMV_VC1_NB_DCAC_ADR (S5P_FIMV_COMMON_BASE_A + 0x8c)
86#define S5P_FIMV_VC1_UP_NB_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x90)
87#define S5P_FIMV_VC1_SA_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x94)
88#define S5P_FIMV_VC1_OT_LINE_ADR (S5P_FIMV_COMMON_BASE_A + 0x98)
89#define S5P_FIMV_VC1_BITPLANE3_ADR (S5P_FIMV_COMMON_BASE_A + 0x9c)
90 /* bitplane3 */
91#define S5P_FIMV_VC1_BITPLANE2_ADR (S5P_FIMV_COMMON_BASE_A + 0xa0)
92 /* bitplane2 */
93#define S5P_FIMV_VC1_BITPLANE1_ADR (S5P_FIMV_COMMON_BASE_A + 0xa4)
94 /* bitplane1 */
95
96/* Encoder */
97#define S5P_FIMV_ENC_REF0_LUMA_ADR (S5P_FIMV_COMMON_BASE_A + 0x1c)
98#define S5P_FIMV_ENC_REF1_LUMA_ADR (S5P_FIMV_COMMON_BASE_A + 0x20)
99 /* reconstructed luma */
100#define S5P_FIMV_ENC_REF0_CHROMA_ADR (S5P_FIMV_COMMON_BASE_B)
101#define S5P_FIMV_ENC_REF1_CHROMA_ADR (S5P_FIMV_COMMON_BASE_B + 0x04)
102 /* reconstructed chroma */
103#define S5P_FIMV_ENC_REF2_LUMA_ADR (S5P_FIMV_COMMON_BASE_B + 0x10)
104#define S5P_FIMV_ENC_REF2_CHROMA_ADR (S5P_FIMV_COMMON_BASE_B + 0x08)
105#define S5P_FIMV_ENC_REF3_LUMA_ADR (S5P_FIMV_COMMON_BASE_B + 0x14)
106#define S5P_FIMV_ENC_REF3_CHROMA_ADR (S5P_FIMV_COMMON_BASE_B + 0x0c)
107
108/* H.264 encoding */
109#define S5P_FIMV_H264_UP_MV_ADR (S5P_FIMV_COMMON_BASE_A)
110 /* upper motion vector */
111#define S5P_FIMV_H264_NBOR_INFO_ADR (S5P_FIMV_COMMON_BASE_A + 0x04)
112 /* entropy engine's neighbor info. */
113#define S5P_FIMV_H264_UP_INTRA_MD_ADR (S5P_FIMV_COMMON_BASE_A + 0x08)
114 /* upper intra MD */
115#define S5P_FIMV_H264_COZERO_FLAG_ADR (S5P_FIMV_COMMON_BASE_A + 0x10)
116 /* direct cozero flag */
117#define S5P_FIMV_H264_UP_INTRA_PRED_ADR (S5P_FIMV_COMMON_BASE_B + 0x40)
118 /* upper intra PRED */
119
120/* H.263 encoding */
121#define S5P_FIMV_H263_UP_MV_ADR (S5P_FIMV_COMMON_BASE_A)
122 /* upper motion vector */
123#define S5P_FIMV_H263_ACDC_COEF_ADR (S5P_FIMV_COMMON_BASE_A + 0x04)
124 /* upper Q coeff. */
125
126/* MPEG4 encoding */
127#define S5P_FIMV_MPEG4_UP_MV_ADR (S5P_FIMV_COMMON_BASE_A)
128 /* upper motion vector */
129#define S5P_FIMV_MPEG4_ACDC_COEF_ADR (S5P_FIMV_COMMON_BASE_A + 0x04)
130 /* upper Q coeff. */
131#define S5P_FIMV_MPEG4_COZERO_FLAG_ADR (S5P_FIMV_COMMON_BASE_A + 0x10)
132 /* direct cozero flag */
133
134#define S5P_FIMV_ENC_REF_B_LUMA_ADR 0x062c /* ref B Luma addr */
135#define S5P_FIMV_ENC_REF_B_CHROMA_ADR 0x0630 /* ref B Chroma addr */
136
137#define S5P_FIMV_ENC_CUR_LUMA_ADR 0x0718 /* current Luma addr */
138#define S5P_FIMV_ENC_CUR_CHROMA_ADR 0x071C /* current Chroma addr */
139
140/* Codec common register */
141#define S5P_FIMV_ENC_HSIZE_PX 0x0818 /* frame width at encoder */
142#define S5P_FIMV_ENC_VSIZE_PX 0x081c /* frame height at encoder */
143#define S5P_FIMV_ENC_PROFILE 0x0830 /* profile register */
144#define S5P_FIMV_ENC_PROFILE_H264_MAIN 0
145#define S5P_FIMV_ENC_PROFILE_H264_HIGH 1
146#define S5P_FIMV_ENC_PROFILE_H264_BASELINE 2
147#define S5P_FIMV_ENC_PROFILE_MPEG4_SIMPLE 0
148#define S5P_FIMV_ENC_PROFILE_MPEG4_ADVANCED_SIMPLE 1
149#define S5P_FIMV_ENC_PIC_STRUCT 0x083c /* picture field/frame flag */
150#define S5P_FIMV_ENC_LF_CTRL 0x0848 /* loop filter control */
151#define S5P_FIMV_ENC_ALPHA_OFF 0x084c /* loop filter alpha offset */
152#define S5P_FIMV_ENC_BETA_OFF 0x0850 /* loop filter beta offset */
153#define S5P_FIMV_MR_BUSIF_CTRL 0x0854 /* hidden, bus interface ctrl */
154#define S5P_FIMV_ENC_PXL_CACHE_CTRL 0x0a00 /* pixel cache control */
155
156/* Channel & stream interface register */
157#define S5P_FIMV_SI_RTN_CHID 0x2000 /* Return CH inst ID register */
158#define S5P_FIMV_SI_CH0_INST_ID 0x2040 /* codec instance ID */
159#define S5P_FIMV_SI_CH1_INST_ID 0x2080 /* codec instance ID */
160/* Decoder */
161#define S5P_FIMV_SI_VRESOL 0x2004 /* vertical res of decoder */
162#define S5P_FIMV_SI_HRESOL 0x2008 /* horizontal res of decoder */
163#define S5P_FIMV_SI_BUF_NUMBER 0x200c /* number of frames in the
164 decoded pic */
165#define S5P_FIMV_SI_DISPLAY_Y_ADR 0x2010 /* luma addr of displayed pic */
166#define S5P_FIMV_SI_DISPLAY_C_ADR 0x2014 /* chroma addrof displayed pic */
167#define S5P_FIMV_SI_CONSUMED_BYTES 0x2018 /* Consumed number of bytes to
168 decode a frame */
169#define S5P_FIMV_SI_DISPLAY_STATUS 0x201c /* status of decoded picture */
170
171#define S5P_FIMV_SI_CH0_SB_ST_ADR 0x2044 /* start addr of stream buf */
172#define S5P_FIMV_SI_CH0_SB_FRM_SIZE 0x2048 /* size of stream buf */
173#define S5P_FIMV_SI_CH0_DESC_ADR 0x204c /* addr of descriptor buf */
174#define S5P_FIMV_SI_CH0_CPB_SIZE 0x2058 /* max size of coded pic. buf */
175#define S5P_FIMV_SI_CH0_DESC_SIZE 0x205c /* max size of descriptor buf */
176
177#define S5P_FIMV_SI_CH1_SB_ST_ADR 0x2084 /* start addr of stream buf */
178#define S5P_FIMV_SI_CH1_SB_FRM_SIZE 0x2088 /* size of stream buf */
179#define S5P_FIMV_SI_CH1_DESC_ADR 0x208c /* addr of descriptor buf */
180#define S5P_FIMV_SI_CH1_CPB_SIZE 0x2098 /* max size of coded pic. buf */
181#define S5P_FIMV_SI_CH1_DESC_SIZE 0x209c /* max size of descriptor buf */
182
183#define S5P_FIMV_CRC_LUMA0 0x2030 /* luma crc data per frame
184 (top field) */
185#define S5P_FIMV_CRC_CHROMA0 0x2034 /* chroma crc data per frame
186 (top field) */
187#define S5P_FIMV_CRC_LUMA1 0x2038 /* luma crc data per bottom
188 field */
189#define S5P_FIMV_CRC_CHROMA1 0x203c /* chroma crc data per bottom
190 field */
191
192/* Display status */
193#define S5P_FIMV_DEC_STATUS_DECODING_ONLY 0
194#define S5P_FIMV_DEC_STATUS_DECODING_DISPLAY 1
195#define S5P_FIMV_DEC_STATUS_DISPLAY_ONLY 2
196#define S5P_FIMV_DEC_STATUS_DECODING_EMPTY 3
197#define S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK 7
198#define S5P_FIMV_DEC_STATUS_PROGRESSIVE (0<<3)
199#define S5P_FIMV_DEC_STATUS_INTERLACE (1<<3)
200#define S5P_FIMV_DEC_STATUS_INTERLACE_MASK (1<<3)
201#define S5P_FIMV_DEC_STATUS_CRC_NUMBER_TWO (0<<4)
202#define S5P_FIMV_DEC_STATUS_CRC_NUMBER_FOUR (1<<4)
203#define S5P_FIMV_DEC_STATUS_CRC_NUMBER_MASK (1<<4)
204#define S5P_FIMV_DEC_STATUS_CRC_GENERATED (1<<5)
205#define S5P_FIMV_DEC_STATUS_CRC_NOT_GENERATED (0<<5)
206#define S5P_FIMV_DEC_STATUS_CRC_MASK (1<<5)
207
208#define S5P_FIMV_DEC_STATUS_RESOLUTION_MASK (3<<4)
209#define S5P_FIMV_DEC_STATUS_RESOLUTION_INC (1<<4)
210#define S5P_FIMV_DEC_STATUS_RESOLUTION_DEC (2<<4)
211
212/* Decode frame address */
213#define S5P_FIMV_DECODE_Y_ADR 0x2024
214#define S5P_FIMV_DECODE_C_ADR 0x2028
215
216/* Decoded frame tpe */
217#define S5P_FIMV_DECODE_FRAME_TYPE 0x2020
218#define S5P_FIMV_DECODE_FRAME_MASK 7
219
220#define S5P_FIMV_DECODE_FRAME_SKIPPED 0
221#define S5P_FIMV_DECODE_FRAME_I_FRAME 1
222#define S5P_FIMV_DECODE_FRAME_P_FRAME 2
223#define S5P_FIMV_DECODE_FRAME_B_FRAME 3
224#define S5P_FIMV_DECODE_FRAME_OTHER_FRAME 4
225
226/* Sizes of buffers required for decoding */
227#define S5P_FIMV_DEC_NB_IP_SIZE (32 * 1024)
228#define S5P_FIMV_DEC_VERT_NB_MV_SIZE (16 * 1024)
229#define S5P_FIMV_DEC_NB_DCAC_SIZE (16 * 1024)
230#define S5P_FIMV_DEC_UPNB_MV_SIZE (68 * 1024)
231#define S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE (136 * 1024)
232#define S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE (32 * 1024)
233#define S5P_FIMV_DEC_VC1_BITPLANE_SIZE (2 * 1024)
234#define S5P_FIMV_DEC_STX_PARSER_SIZE (68 * 1024)
235
236#define S5P_FIMV_DEC_BUF_ALIGN (8 * 1024)
237#define S5P_FIMV_ENC_BUF_ALIGN (8 * 1024)
238#define S5P_FIMV_NV12M_HALIGN 16
239#define S5P_FIMV_NV12M_LVALIGN 16
240#define S5P_FIMV_NV12M_CVALIGN 8
241#define S5P_FIMV_NV12MT_HALIGN 128
242#define S5P_FIMV_NV12MT_VALIGN 32
243#define S5P_FIMV_NV12M_SALIGN 2048
244#define S5P_FIMV_NV12MT_SALIGN 8192
245
246/* Sizes of buffers required for encoding */
247#define S5P_FIMV_ENC_UPMV_SIZE 0x10000
248#define S5P_FIMV_ENC_COLFLG_SIZE 0x10000
249#define S5P_FIMV_ENC_INTRAMD_SIZE 0x10000
250#define S5P_FIMV_ENC_INTRAPRED_SIZE 0x4000
251#define S5P_FIMV_ENC_NBORINFO_SIZE 0x10000
252#define S5P_FIMV_ENC_ACDCCOEF_SIZE 0x10000
253
254/* Encoder */
255#define S5P_FIMV_ENC_SI_STRM_SIZE 0x2004 /* stream size */
256#define S5P_FIMV_ENC_SI_PIC_CNT 0x2008 /* picture count */
257#define S5P_FIMV_ENC_SI_WRITE_PTR 0x200c /* write pointer */
258#define S5P_FIMV_ENC_SI_SLICE_TYPE 0x2010 /* slice type(I/P/B/IDR) */
259#define S5P_FIMV_ENC_SI_SLICE_TYPE_NON_CODED 0
260#define S5P_FIMV_ENC_SI_SLICE_TYPE_I 1
261#define S5P_FIMV_ENC_SI_SLICE_TYPE_P 2
262#define S5P_FIMV_ENC_SI_SLICE_TYPE_B 3
263#define S5P_FIMV_ENC_SI_SLICE_TYPE_SKIPPED 4
264#define S5P_FIMV_ENC_SI_SLICE_TYPE_OTHERS 5
265#define S5P_FIMV_ENCODED_Y_ADDR 0x2014 /* the addr of the encoded
266 luma pic */
267#define S5P_FIMV_ENCODED_C_ADDR 0x2018 /* the addr of the encoded
268 chroma pic */
269
270#define S5P_FIMV_ENC_SI_CH0_SB_ADR 0x2044 /* addr of stream buf */
271#define S5P_FIMV_ENC_SI_CH0_SB_SIZE 0x204c /* size of stream buf */
272#define S5P_FIMV_ENC_SI_CH0_CUR_Y_ADR 0x2050 /* current Luma addr */
273#define S5P_FIMV_ENC_SI_CH0_CUR_C_ADR 0x2054 /* current Chroma addr */
274#define S5P_FIMV_ENC_SI_CH0_FRAME_INS 0x2058 /* frame insertion */
275
276#define S5P_FIMV_ENC_SI_CH1_SB_ADR 0x2084 /* addr of stream buf */
277#define S5P_FIMV_ENC_SI_CH1_SB_SIZE 0x208c /* size of stream buf */
278#define S5P_FIMV_ENC_SI_CH1_CUR_Y_ADR 0x2090 /* current Luma addr */
279#define S5P_FIMV_ENC_SI_CH1_CUR_C_ADR 0x2094 /* current Chroma addr */
280#define S5P_FIMV_ENC_SI_CH1_FRAME_INS 0x2098 /* frame insertion */
281
282#define S5P_FIMV_ENC_PIC_TYPE_CTRL 0xc504 /* pic type level control */
283#define S5P_FIMV_ENC_B_RECON_WRITE_ON 0xc508 /* B frame recon write ctrl */
284#define S5P_FIMV_ENC_MSLICE_CTRL 0xc50c /* multi slice control */
285#define S5P_FIMV_ENC_MSLICE_MB 0xc510 /* MB number in the one slice */
286#define S5P_FIMV_ENC_MSLICE_BIT 0xc514 /* bit count for one slice */
287#define S5P_FIMV_ENC_CIR_CTRL 0xc518 /* number of intra refresh MB */
288#define S5P_FIMV_ENC_MAP_FOR_CUR 0xc51c /* linear or tiled mode */
289#define S5P_FIMV_ENC_PADDING_CTRL 0xc520 /* padding control */
290
291#define S5P_FIMV_ENC_RC_CONFIG 0xc5a0 /* RC config */
292#define S5P_FIMV_ENC_RC_BIT_RATE 0xc5a8 /* bit rate */
293#define S5P_FIMV_ENC_RC_QBOUND 0xc5ac /* max/min QP */
294#define S5P_FIMV_ENC_RC_RPARA 0xc5b0 /* rate control reaction coeff */
295#define S5P_FIMV_ENC_RC_MB_CTRL 0xc5b4 /* MB adaptive scaling */
296
297/* Encoder for H264 only */
298#define S5P_FIMV_ENC_H264_ENTROPY_MODE 0xd004 /* CAVLC or CABAC */
299#define S5P_FIMV_ENC_H264_ALPHA_OFF 0xd008 /* loop filter alpha offset */
300#define S5P_FIMV_ENC_H264_BETA_OFF 0xd00c /* loop filter beta offset */
301#define S5P_FIMV_ENC_H264_NUM_OF_REF 0xd010 /* number of reference for P/B */
302#define S5P_FIMV_ENC_H264_TRANS_FLAG 0xd034 /* 8x8 transform flag in PPS &
303 high profile */
304
305#define S5P_FIMV_ENC_RC_FRAME_RATE 0xd0d0 /* frame rate */
306
307/* Encoder for MPEG4 only */
308#define S5P_FIMV_ENC_MPEG4_QUART_PXL 0xe008 /* qpel interpolation ctrl */
309
310/* Additional */
311#define S5P_FIMV_SI_CH0_DPB_CONF_CTRL 0x2068 /* DPB Config Control Register */
312#define S5P_FIMV_SLICE_INT_MASK 1
313#define S5P_FIMV_SLICE_INT_SHIFT 31
314#define S5P_FIMV_DDELAY_ENA_SHIFT 30
315#define S5P_FIMV_DDELAY_VAL_MASK 0xff
316#define S5P_FIMV_DDELAY_VAL_SHIFT 16
317#define S5P_FIMV_DPB_COUNT_MASK 0xffff
318#define S5P_FIMV_DPB_FLUSH_MASK 1
319#define S5P_FIMV_DPB_FLUSH_SHIFT 14
320
321
322#define S5P_FIMV_SI_CH0_RELEASE_BUF 0x2060 /* DPB release buffer register */
323#define S5P_FIMV_SI_CH0_HOST_WR_ADR 0x2064 /* address of shared memory */
324
325/* Codec numbers */
326#define S5P_FIMV_CODEC_NONE -1
327
328#define S5P_FIMV_CODEC_H264_DEC 0
329#define S5P_FIMV_CODEC_VC1_DEC 1
330#define S5P_FIMV_CODEC_MPEG4_DEC 2
331#define S5P_FIMV_CODEC_MPEG2_DEC 3
332#define S5P_FIMV_CODEC_H263_DEC 4
333#define S5P_FIMV_CODEC_VC1RCV_DEC 5
334
335#define S5P_FIMV_CODEC_H264_ENC 16
336#define S5P_FIMV_CODEC_MPEG4_ENC 17
337#define S5P_FIMV_CODEC_H263_ENC 18
338
339/* Channel Control Register */
340#define S5P_FIMV_CH_SEQ_HEADER 1
341#define S5P_FIMV_CH_FRAME_START 2
342#define S5P_FIMV_CH_LAST_FRAME 3
343#define S5P_FIMV_CH_INIT_BUFS 4
344#define S5P_FIMV_CH_FRAME_START_REALLOC 5
345#define S5P_FIMV_CH_MASK 7
346#define S5P_FIMV_CH_SHIFT 16
347
348
349/* Host to RISC command */
350#define S5P_FIMV_H2R_CMD_EMPTY 0
351#define S5P_FIMV_H2R_CMD_OPEN_INSTANCE 1
352#define S5P_FIMV_H2R_CMD_CLOSE_INSTANCE 2
353#define S5P_FIMV_H2R_CMD_SYS_INIT 3
354#define S5P_FIMV_H2R_CMD_FLUSH 4
355#define S5P_FIMV_H2R_CMD_SLEEP 5
356#define S5P_FIMV_H2R_CMD_WAKEUP 6
357
358#define S5P_FIMV_R2H_CMD_EMPTY 0
359#define S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET 1
360#define S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET 2
361#define S5P_FIMV_R2H_CMD_RSV_RET 3
362#define S5P_FIMV_R2H_CMD_SEQ_DONE_RET 4
363#define S5P_FIMV_R2H_CMD_FRAME_DONE_RET 5
364#define S5P_FIMV_R2H_CMD_SLICE_DONE_RET 6
365#define S5P_FIMV_R2H_CMD_ENC_COMPLETE_RET 7
366#define S5P_FIMV_R2H_CMD_SYS_INIT_RET 8
367#define S5P_FIMV_R2H_CMD_FW_STATUS_RET 9
368#define S5P_FIMV_R2H_CMD_SLEEP_RET 10
369#define S5P_FIMV_R2H_CMD_WAKEUP_RET 11
370#define S5P_FIMV_R2H_CMD_FLUSH_RET 12
371#define S5P_FIMV_R2H_CMD_INIT_BUFFERS_RET 15
372#define S5P_FIMV_R2H_CMD_EDFU_INIT_RET 16
373#define S5P_FIMV_R2H_CMD_ERR_RET 32
374
375/* Error handling defines */
376#define S5P_FIMV_ERR_WARNINGS_START 145
377#define S5P_FIMV_ERR_DEC_MASK 0xFFFF
378#define S5P_FIMV_ERR_DEC_SHIFT 0
379#define S5P_FIMV_ERR_DSPL_MASK 0xFFFF0000
380#define S5P_FIMV_ERR_DSPL_SHIFT 16
381
382/* Shared memory registers' offsets */
383
384/* An offset of the start position in the stream when
385 * the start position is not aligned */
386#define S5P_FIMV_SHARED_CROP_INFO_H 0x0020
387#define S5P_FIMV_SHARED_CROP_LEFT_MASK 0xFFFF
388#define S5P_FIMV_SHARED_CROP_LEFT_SHIFT 0
389#define S5P_FIMV_SHARED_CROP_RIGHT_MASK 0xFFFF0000
390#define S5P_FIMV_SHARED_CROP_RIGHT_SHIFT 16
391#define S5P_FIMV_SHARED_CROP_INFO_V 0x0024
392#define S5P_FIMV_SHARED_CROP_TOP_MASK 0xFFFF
393#define S5P_FIMV_SHARED_CROP_TOP_SHIFT 0
394#define S5P_FIMV_SHARED_CROP_BOTTOM_MASK 0xFFFF0000
395#define S5P_FIMV_SHARED_CROP_BOTTOM_SHIFT 16
396#define S5P_FIMV_SHARED_SET_FRAME_TAG 0x0004
397#define S5P_FIMV_SHARED_GET_FRAME_TAG_TOP 0x0008
398#define S5P_FIMV_SHARED_GET_FRAME_TAG_BOT 0x000C
399#define S5P_FIMV_SHARED_START_BYTE_NUM 0x0018
400#define S5P_FIMV_SHARED_RC_VOP_TIMING 0x0030
401#define S5P_FIMV_SHARED_LUMA_DPB_SIZE 0x0064
402#define S5P_FIMV_SHARED_CHROMA_DPB_SIZE 0x0068
403#define S5P_FIMV_SHARED_MV_SIZE 0x006C
404#define S5P_FIMV_SHARED_PIC_TIME_TOP 0x0010
405#define S5P_FIMV_SHARED_PIC_TIME_BOTTOM 0x0014
406#define S5P_FIMV_SHARED_EXT_ENC_CONTROL 0x0028
407#define S5P_FIMV_SHARED_P_B_FRAME_QP 0x0070
408#define S5P_FIMV_SHARED_ASPECT_RATIO_IDC 0x0074
409#define S5P_FIMV_SHARED_EXTENDED_SAR 0x0078
410#define S5P_FIMV_SHARED_H264_I_PERIOD 0x009C
411#define S5P_FIMV_SHARED_RC_CONTROL_CONFIG 0x00A0
412
413#endif /* _REGS_FIMV_H */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc.c b/drivers/media/video/s5p-mfc/s5p_mfc.c
new file mode 100644
index 000000000000..7dc7eab58b38
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc.c
@@ -0,0 +1,1274 @@
1/*
2 * Samsung S5P Multi Format Codec v 5.1
3 *
4 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
5 * Kamil Debski, <k.debski@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include <linux/clk.h>
14#include <linux/delay.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/module.h>
18#include <linux/platform_device.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/version.h>
22#include <linux/videodev2.h>
23#include <linux/workqueue.h>
24#include <media/videobuf2-core.h>
25#include "regs-mfc.h"
26#include "s5p_mfc_ctrl.h"
27#include "s5p_mfc_debug.h"
28#include "s5p_mfc_dec.h"
29#include "s5p_mfc_enc.h"
30#include "s5p_mfc_intr.h"
31#include "s5p_mfc_opr.h"
32#include "s5p_mfc_pm.h"
33#include "s5p_mfc_shm.h"
34
35#define S5P_MFC_NAME "s5p-mfc"
36#define S5P_MFC_DEC_NAME "s5p-mfc-dec"
37#define S5P_MFC_ENC_NAME "s5p-mfc-enc"
38
39int debug;
40module_param(debug, int, S_IRUGO | S_IWUSR);
41MODULE_PARM_DESC(debug, "Debug level - higher value produces more verbose messages");
42
43/* Helper functions for interrupt processing */
44/* Remove from hw execution round robin */
45static void clear_work_bit(struct s5p_mfc_ctx *ctx)
46{
47 struct s5p_mfc_dev *dev = ctx->dev;
48
49 spin_lock(&dev->condlock);
50 clear_bit(ctx->num, &dev->ctx_work_bits);
51 spin_unlock(&dev->condlock);
52}
53
54/* Wake up context wait_queue */
55static void wake_up_ctx(struct s5p_mfc_ctx *ctx, unsigned int reason,
56 unsigned int err)
57{
58 ctx->int_cond = 1;
59 ctx->int_type = reason;
60 ctx->int_err = err;
61 wake_up(&ctx->queue);
62}
63
64/* Wake up device wait_queue */
65static void wake_up_dev(struct s5p_mfc_dev *dev, unsigned int reason,
66 unsigned int err)
67{
68 dev->int_cond = 1;
69 dev->int_type = reason;
70 dev->int_err = err;
71 wake_up(&dev->queue);
72}
73
74void s5p_mfc_watchdog(unsigned long arg)
75{
76 struct s5p_mfc_dev *dev = (struct s5p_mfc_dev *)arg;
77
78 if (test_bit(0, &dev->hw_lock))
79 atomic_inc(&dev->watchdog_cnt);
80 if (atomic_read(&dev->watchdog_cnt) >= MFC_WATCHDOG_CNT) {
81 /* This means that hw is busy and no interrupts were
82 * generated by hw for the Nth time of running this
83 * watchdog timer. This usually means a serious hw
84 * error. Now it is time to kill all instances and
85 * reset the MFC. */
86 mfc_err("Time out during waiting for HW\n");
87 queue_work(dev->watchdog_workqueue, &dev->watchdog_work);
88 }
89 dev->watchdog_timer.expires = jiffies +
90 msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
91 add_timer(&dev->watchdog_timer);
92}
93
94static void s5p_mfc_watchdog_worker(struct work_struct *work)
95{
96 struct s5p_mfc_dev *dev;
97 struct s5p_mfc_ctx *ctx;
98 unsigned long flags;
99 int mutex_locked;
100 int i, ret;
101
102 dev = container_of(work, struct s5p_mfc_dev, watchdog_work);
103
104 mfc_err("Driver timeout error handling\n");
105 /* Lock the mutex that protects open and release.
106 * This is necessary as they may load and unload firmware. */
107 mutex_locked = mutex_trylock(&dev->mfc_mutex);
108 if (!mutex_locked)
109 mfc_err("Error: some instance may be closing/opening\n");
110 spin_lock_irqsave(&dev->irqlock, flags);
111
112 s5p_mfc_clock_off();
113
114 for (i = 0; i < MFC_NUM_CONTEXTS; i++) {
115 ctx = dev->ctx[i];
116 if (!ctx)
117 continue;
118 ctx->state = MFCINST_ERROR;
119 s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
120 s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
121 clear_work_bit(ctx);
122 wake_up_ctx(ctx, S5P_FIMV_R2H_CMD_ERR_RET, 0);
123 }
124 clear_bit(0, &dev->hw_lock);
125 spin_unlock_irqrestore(&dev->irqlock, flags);
126 /* Double check if there is at least one instance running.
127 * If no instance is in memory than no firmware should be present */
128 if (dev->num_inst > 0) {
129 ret = s5p_mfc_reload_firmware(dev);
130 if (ret) {
131 mfc_err("Failed to reload FW\n");
132 goto unlock;
133 }
134 s5p_mfc_clock_on();
135 ret = s5p_mfc_init_hw(dev);
136 if (ret)
137 mfc_err("Failed to reinit FW\n");
138 }
139unlock:
140 if (mutex_locked)
141 mutex_unlock(&dev->mfc_mutex);
142}
143
144static enum s5p_mfc_node_type s5p_mfc_get_node_type(struct file *file)
145{
146 struct video_device *vdev = video_devdata(file);
147
148 if (!vdev) {
149 mfc_err("failed to get video_device");
150 return MFCNODE_INVALID;
151 }
152 if (vdev->index == 0)
153 return MFCNODE_DECODER;
154 else if (vdev->index == 1)
155 return MFCNODE_ENCODER;
156 return MFCNODE_INVALID;
157}
158
159static void s5p_mfc_clear_int_flags(struct s5p_mfc_dev *dev)
160{
161 mfc_write(dev, 0, S5P_FIMV_RISC_HOST_INT);
162 mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD);
163 mfc_write(dev, 0xffff, S5P_FIMV_SI_RTN_CHID);
164}
165
166static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
167{
168 struct s5p_mfc_buf *dst_buf;
169
170 ctx->state = MFCINST_FINISHED;
171 ctx->sequence++;
172 while (!list_empty(&ctx->dst_queue)) {
173 dst_buf = list_entry(ctx->dst_queue.next,
174 struct s5p_mfc_buf, list);
175 mfc_debug(2, "Cleaning up buffer: %d\n",
176 dst_buf->b->v4l2_buf.index);
177 vb2_set_plane_payload(dst_buf->b, 0, 0);
178 vb2_set_plane_payload(dst_buf->b, 1, 0);
179 list_del(&dst_buf->list);
180 ctx->dst_queue_cnt--;
181 dst_buf->b->v4l2_buf.sequence = (ctx->sequence++);
182
183 if (s5p_mfc_read_shm(ctx, PIC_TIME_TOP) ==
184 s5p_mfc_read_shm(ctx, PIC_TIME_BOT))
185 dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
186 else
187 dst_buf->b->v4l2_buf.field = V4L2_FIELD_INTERLACED;
188
189 ctx->dec_dst_flag &= ~(1 << dst_buf->b->v4l2_buf.index);
190 vb2_buffer_done(dst_buf->b, VB2_BUF_STATE_DONE);
191 }
192}
193
194static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
195{
196 struct s5p_mfc_dev *dev = ctx->dev;
197 struct s5p_mfc_buf *dst_buf, *src_buf;
198 size_t dec_y_addr = s5p_mfc_get_dec_y_adr();
199 unsigned int frame_type = s5p_mfc_get_frame_type();
200
201 /* Copy timestamp / timecode from decoded src to dst and set
202 appropraite flags */
203 src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
204 list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
205 if (vb2_dma_contig_plane_paddr(dst_buf->b, 0) == dec_y_addr) {
206 memcpy(&dst_buf->b->v4l2_buf.timecode,
207 &src_buf->b->v4l2_buf.timecode,
208 sizeof(struct v4l2_timecode));
209 memcpy(&dst_buf->b->v4l2_buf.timestamp,
210 &src_buf->b->v4l2_buf.timestamp,
211 sizeof(struct timeval));
212 switch (frame_type) {
213 case S5P_FIMV_DECODE_FRAME_I_FRAME:
214 dst_buf->b->v4l2_buf.flags |=
215 V4L2_BUF_FLAG_KEYFRAME;
216 break;
217 case S5P_FIMV_DECODE_FRAME_P_FRAME:
218 dst_buf->b->v4l2_buf.flags |=
219 V4L2_BUF_FLAG_PFRAME;
220 break;
221 case S5P_FIMV_DECODE_FRAME_B_FRAME:
222 dst_buf->b->v4l2_buf.flags |=
223 V4L2_BUF_FLAG_BFRAME;
224 break;
225 }
226 break;
227 }
228 }
229}
230
231static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
232{
233 struct s5p_mfc_dev *dev = ctx->dev;
234 struct s5p_mfc_buf *dst_buf;
235 size_t dspl_y_addr = s5p_mfc_get_dspl_y_adr();
236 unsigned int frame_type = s5p_mfc_get_frame_type();
237 unsigned int index;
238
239 /* If frame is same as previous then skip and do not dequeue */
240 if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) {
241 if (!ctx->after_packed_pb)
242 ctx->sequence++;
243 ctx->after_packed_pb = 0;
244 return;
245 }
246 ctx->sequence++;
247 /* The MFC returns address of the buffer, now we have to
248 * check which videobuf does it correspond to */
249 list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
250 /* Check if this is the buffer we're looking for */
251 if (vb2_dma_contig_plane_paddr(dst_buf->b, 0) == dspl_y_addr) {
252 list_del(&dst_buf->list);
253 ctx->dst_queue_cnt--;
254 dst_buf->b->v4l2_buf.sequence = ctx->sequence;
255 if (s5p_mfc_read_shm(ctx, PIC_TIME_TOP) ==
256 s5p_mfc_read_shm(ctx, PIC_TIME_BOT))
257 dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
258 else
259 dst_buf->b->v4l2_buf.field =
260 V4L2_FIELD_INTERLACED;
261 vb2_set_plane_payload(dst_buf->b, 0, ctx->luma_size);
262 vb2_set_plane_payload(dst_buf->b, 1, ctx->chroma_size);
263 clear_bit(dst_buf->b->v4l2_buf.index,
264 &ctx->dec_dst_flag);
265
266 vb2_buffer_done(dst_buf->b,
267 err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
268
269 index = dst_buf->b->v4l2_buf.index;
270 break;
271 }
272 }
273}
274
275/* Handle frame decoding interrupt */
276static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
277 unsigned int reason, unsigned int err)
278{
279 struct s5p_mfc_dev *dev = ctx->dev;
280 unsigned int dst_frame_status;
281 struct s5p_mfc_buf *src_buf;
282 unsigned long flags;
283 unsigned int res_change;
284
285 unsigned int index;
286
287 dst_frame_status = s5p_mfc_get_dspl_status()
288 & S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK;
289 res_change = s5p_mfc_get_dspl_status()
290 & S5P_FIMV_DEC_STATUS_RESOLUTION_MASK;
291 mfc_debug(2, "Frame Status: %x\n", dst_frame_status);
292 if (ctx->state == MFCINST_RES_CHANGE_INIT)
293 ctx->state = MFCINST_RES_CHANGE_FLUSH;
294 if (res_change) {
295 ctx->state = MFCINST_RES_CHANGE_INIT;
296 s5p_mfc_clear_int_flags(dev);
297 wake_up_ctx(ctx, reason, err);
298 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
299 BUG();
300 s5p_mfc_clock_off();
301 s5p_mfc_try_run(dev);
302 return;
303 }
304 if (ctx->dpb_flush_flag)
305 ctx->dpb_flush_flag = 0;
306
307 spin_lock_irqsave(&dev->irqlock, flags);
308 /* All frames remaining in the buffer have been extracted */
309 if (dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_EMPTY) {
310 if (ctx->state == MFCINST_RES_CHANGE_FLUSH) {
311 s5p_mfc_handle_frame_all_extracted(ctx);
312 ctx->state = MFCINST_RES_CHANGE_END;
313 goto leave_handle_frame;
314 } else {
315 s5p_mfc_handle_frame_all_extracted(ctx);
316 }
317 }
318
319 if (dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_DISPLAY ||
320 dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_ONLY)
321 s5p_mfc_handle_frame_copy_time(ctx);
322
323 /* A frame has been decoded and is in the buffer */
324 if (dst_frame_status == S5P_FIMV_DEC_STATUS_DISPLAY_ONLY ||
325 dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_DISPLAY) {
326 s5p_mfc_handle_frame_new(ctx, err);
327 } else {
328 mfc_debug(2, "No frame decode\n");
329 }
330 /* Mark source buffer as complete */
331 if (dst_frame_status != S5P_FIMV_DEC_STATUS_DISPLAY_ONLY
332 && !list_empty(&ctx->src_queue)) {
333 src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
334 list);
335 ctx->consumed_stream += s5p_mfc_get_consumed_stream();
336 if (ctx->codec_mode != S5P_FIMV_CODEC_H264_DEC &&
337 s5p_mfc_get_frame_type() == S5P_FIMV_DECODE_FRAME_P_FRAME
338 && ctx->consumed_stream + STUFF_BYTE <
339 src_buf->b->v4l2_planes[0].bytesused) {
340 /* Run MFC again on the same buffer */
341 mfc_debug(2, "Running again the same buffer\n");
342 ctx->after_packed_pb = 1;
343 } else {
344 index = src_buf->b->v4l2_buf.index;
345 mfc_debug(2, "MFC needs next buffer\n");
346 ctx->consumed_stream = 0;
347 list_del(&src_buf->list);
348 ctx->src_queue_cnt--;
349 if (s5p_mfc_err_dec(err) > 0)
350 vb2_buffer_done(src_buf->b, VB2_BUF_STATE_ERROR);
351 else
352 vb2_buffer_done(src_buf->b, VB2_BUF_STATE_DONE);
353 }
354 }
355leave_handle_frame:
356 spin_unlock_irqrestore(&dev->irqlock, flags);
357 if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING)
358 || ctx->dst_queue_cnt < ctx->dpb_count)
359 clear_work_bit(ctx);
360 s5p_mfc_clear_int_flags(dev);
361 wake_up_ctx(ctx, reason, err);
362 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
363 BUG();
364 s5p_mfc_clock_off();
365 s5p_mfc_try_run(dev);
366}
367
368/* Error handling for interrupt */
369static void s5p_mfc_handle_error(struct s5p_mfc_ctx *ctx,
370 unsigned int reason, unsigned int err)
371{
372 struct s5p_mfc_dev *dev;
373 unsigned long flags;
374
375 /* If no context is available then all necessary
376 * processing has been done. */
377 if (ctx == 0)
378 return;
379
380 dev = ctx->dev;
381 mfc_err("Interrupt Error: %08x\n", err);
382 s5p_mfc_clear_int_flags(dev);
383 wake_up_dev(dev, reason, err);
384
385 /* Error recovery is dependent on the state of context */
386 switch (ctx->state) {
387 case MFCINST_INIT:
388 /* This error had to happen while acquireing instance */
389 case MFCINST_GOT_INST:
390 /* This error had to happen while parsing the header */
391 case MFCINST_HEAD_PARSED:
392 /* This error had to happen while setting dst buffers */
393 case MFCINST_RETURN_INST:
394 /* This error had to happen while releasing instance */
395 clear_work_bit(ctx);
396 wake_up_ctx(ctx, reason, err);
397 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
398 BUG();
399 s5p_mfc_clock_off();
400 ctx->state = MFCINST_ERROR;
401 break;
402 case MFCINST_FINISHING:
403 case MFCINST_FINISHED:
404 case MFCINST_RUNNING:
405 /* It is higly probable that an error occured
406 * while decoding a frame */
407 clear_work_bit(ctx);
408 ctx->state = MFCINST_ERROR;
409 /* Mark all dst buffers as having an error */
410 spin_lock_irqsave(&dev->irqlock, flags);
411 s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
412 /* Mark all src buffers as having an error */
413 s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
414 spin_unlock_irqrestore(&dev->irqlock, flags);
415 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
416 BUG();
417 s5p_mfc_clock_off();
418 break;
419 default:
420 mfc_err("Encountered an error interrupt which had not been handled\n");
421 break;
422 }
423 return;
424}
425
426/* Header parsing interrupt handling */
427static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
428 unsigned int reason, unsigned int err)
429{
430 struct s5p_mfc_dev *dev;
431 unsigned int guard_width, guard_height;
432
433 if (ctx == 0)
434 return;
435 dev = ctx->dev;
436 if (ctx->c_ops->post_seq_start) {
437 if (ctx->c_ops->post_seq_start(ctx))
438 mfc_err("post_seq_start() failed\n");
439 } else {
440 ctx->img_width = s5p_mfc_get_img_width();
441 ctx->img_height = s5p_mfc_get_img_height();
442
443 ctx->buf_width = ALIGN(ctx->img_width,
444 S5P_FIMV_NV12MT_HALIGN);
445 ctx->buf_height = ALIGN(ctx->img_height,
446 S5P_FIMV_NV12MT_VALIGN);
447 mfc_debug(2, "SEQ Done: Movie dimensions %dx%d, "
448 "buffer dimensions: %dx%d\n", ctx->img_width,
449 ctx->img_height, ctx->buf_width,
450 ctx->buf_height);
451 if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC) {
452 ctx->luma_size = ALIGN(ctx->buf_width *
453 ctx->buf_height, S5P_FIMV_DEC_BUF_ALIGN);
454 ctx->chroma_size = ALIGN(ctx->buf_width *
455 ALIGN((ctx->img_height >> 1),
456 S5P_FIMV_NV12MT_VALIGN),
457 S5P_FIMV_DEC_BUF_ALIGN);
458 ctx->mv_size = ALIGN(ctx->buf_width *
459 ALIGN((ctx->buf_height >> 2),
460 S5P_FIMV_NV12MT_VALIGN),
461 S5P_FIMV_DEC_BUF_ALIGN);
462 } else {
463 guard_width = ALIGN(ctx->img_width + 24,
464 S5P_FIMV_NV12MT_HALIGN);
465 guard_height = ALIGN(ctx->img_height + 16,
466 S5P_FIMV_NV12MT_VALIGN);
467 ctx->luma_size = ALIGN(guard_width *
468 guard_height, S5P_FIMV_DEC_BUF_ALIGN);
469 guard_width = ALIGN(ctx->img_width + 16,
470 S5P_FIMV_NV12MT_HALIGN);
471 guard_height = ALIGN((ctx->img_height >> 1) + 4,
472 S5P_FIMV_NV12MT_VALIGN);
473 ctx->chroma_size = ALIGN(guard_width *
474 guard_height, S5P_FIMV_DEC_BUF_ALIGN);
475 ctx->mv_size = 0;
476 }
477 ctx->dpb_count = s5p_mfc_get_dpb_count();
478 if (ctx->img_width == 0 || ctx->img_width == 0)
479 ctx->state = MFCINST_ERROR;
480 else
481 ctx->state = MFCINST_HEAD_PARSED;
482 }
483 s5p_mfc_clear_int_flags(dev);
484 clear_work_bit(ctx);
485 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
486 BUG();
487 s5p_mfc_clock_off();
488 s5p_mfc_try_run(dev);
489 wake_up_ctx(ctx, reason, err);
490}
491
492/* Header parsing interrupt handling */
493static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
494 unsigned int reason, unsigned int err)
495{
496 struct s5p_mfc_buf *src_buf;
497 struct s5p_mfc_dev *dev;
498 unsigned long flags;
499
500 if (ctx == 0)
501 return;
502 dev = ctx->dev;
503 s5p_mfc_clear_int_flags(dev);
504 ctx->int_type = reason;
505 ctx->int_err = err;
506 ctx->int_cond = 1;
507 spin_lock(&dev->condlock);
508 clear_bit(ctx->num, &dev->ctx_work_bits);
509 spin_unlock(&dev->condlock);
510 if (err == 0) {
511 ctx->state = MFCINST_RUNNING;
512 if (!ctx->dpb_flush_flag) {
513 spin_lock_irqsave(&dev->irqlock, flags);
514 if (!list_empty(&ctx->src_queue)) {
515 src_buf = list_entry(ctx->src_queue.next,
516 struct s5p_mfc_buf, list);
517 list_del(&src_buf->list);
518 ctx->src_queue_cnt--;
519 vb2_buffer_done(src_buf->b,
520 VB2_BUF_STATE_DONE);
521 }
522 spin_unlock_irqrestore(&dev->irqlock, flags);
523 } else {
524 ctx->dpb_flush_flag = 0;
525 }
526 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
527 BUG();
528
529 s5p_mfc_clock_off();
530
531 wake_up(&ctx->queue);
532 s5p_mfc_try_run(dev);
533 } else {
534 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
535 BUG();
536
537 s5p_mfc_clock_off();
538
539 wake_up(&ctx->queue);
540 }
541}
542
543/* Interrupt processing */
544static irqreturn_t s5p_mfc_irq(int irq, void *priv)
545{
546 struct s5p_mfc_dev *dev = priv;
547 struct s5p_mfc_ctx *ctx;
548 unsigned int reason;
549 unsigned int err;
550
551 mfc_debug_enter();
552 /* Reset the timeout watchdog */
553 atomic_set(&dev->watchdog_cnt, 0);
554 ctx = dev->ctx[dev->curr_ctx];
555 /* Get the reason of interrupt and the error code */
556 reason = s5p_mfc_get_int_reason();
557 err = s5p_mfc_get_int_err();
558 mfc_debug(1, "Int reason: %d (err: %08x)\n", reason, err);
559 switch (reason) {
560 case S5P_FIMV_R2H_CMD_ERR_RET:
561 /* An error has occured */
562 if (ctx->state == MFCINST_RUNNING &&
563 s5p_mfc_err_dec(err) >= S5P_FIMV_ERR_WARNINGS_START)
564 s5p_mfc_handle_frame(ctx, reason, err);
565 else
566 s5p_mfc_handle_error(ctx, reason, err);
567 clear_bit(0, &dev->enter_suspend);
568 break;
569
570 case S5P_FIMV_R2H_CMD_SLICE_DONE_RET:
571 case S5P_FIMV_R2H_CMD_FRAME_DONE_RET:
572 if (ctx->c_ops->post_frame_start) {
573 if (ctx->c_ops->post_frame_start(ctx))
574 mfc_err("post_frame_start() failed\n");
575 s5p_mfc_clear_int_flags(dev);
576 wake_up_ctx(ctx, reason, err);
577 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
578 BUG();
579 s5p_mfc_clock_off();
580 s5p_mfc_try_run(dev);
581 } else {
582 s5p_mfc_handle_frame(ctx, reason, err);
583 }
584 break;
585
586 case S5P_FIMV_R2H_CMD_SEQ_DONE_RET:
587 s5p_mfc_handle_seq_done(ctx, reason, err);
588 break;
589
590 case S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET:
591 ctx->inst_no = s5p_mfc_get_inst_no();
592 ctx->state = MFCINST_GOT_INST;
593 clear_work_bit(ctx);
594 wake_up(&ctx->queue);
595 goto irq_cleanup_hw;
596
597 case S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET:
598 clear_work_bit(ctx);
599 ctx->state = MFCINST_FREE;
600 wake_up(&ctx->queue);
601 goto irq_cleanup_hw;
602
603 case S5P_FIMV_R2H_CMD_SYS_INIT_RET:
604 case S5P_FIMV_R2H_CMD_FW_STATUS_RET:
605 case S5P_FIMV_R2H_CMD_SLEEP_RET:
606 case S5P_FIMV_R2H_CMD_WAKEUP_RET:
607 if (ctx)
608 clear_work_bit(ctx);
609 s5p_mfc_clear_int_flags(dev);
610 wake_up_dev(dev, reason, err);
611 clear_bit(0, &dev->hw_lock);
612 clear_bit(0, &dev->enter_suspend);
613 break;
614
615 case S5P_FIMV_R2H_CMD_INIT_BUFFERS_RET:
616 s5p_mfc_handle_init_buffers(ctx, reason, err);
617 break;
618 default:
619 mfc_debug(2, "Unknown int reason\n");
620 s5p_mfc_clear_int_flags(dev);
621 }
622 mfc_debug_leave();
623 return IRQ_HANDLED;
624irq_cleanup_hw:
625 s5p_mfc_clear_int_flags(dev);
626 ctx->int_type = reason;
627 ctx->int_err = err;
628 ctx->int_cond = 1;
629 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
630 mfc_err("Failed to unlock hw\n");
631
632 s5p_mfc_clock_off();
633
634 s5p_mfc_try_run(dev);
635 mfc_debug(2, "Exit via irq_cleanup_hw\n");
636 return IRQ_HANDLED;
637}
638
639/* Open an MFC node */
640static int s5p_mfc_open(struct file *file)
641{
642 struct s5p_mfc_dev *dev = video_drvdata(file);
643 struct s5p_mfc_ctx *ctx = NULL;
644 struct vb2_queue *q;
645 unsigned long flags;
646 int ret = 0;
647
648 mfc_debug_enter();
649 dev->num_inst++; /* It is guarded by mfc_mutex in vfd */
650 /* Allocate memory for context */
651 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
652 if (!ctx) {
653 mfc_err("Not enough memory\n");
654 ret = -ENOMEM;
655 goto err_alloc;
656 }
657 v4l2_fh_init(&ctx->fh, video_devdata(file));
658 file->private_data = &ctx->fh;
659 v4l2_fh_add(&ctx->fh);
660 ctx->dev = dev;
661 INIT_LIST_HEAD(&ctx->src_queue);
662 INIT_LIST_HEAD(&ctx->dst_queue);
663 ctx->src_queue_cnt = 0;
664 ctx->dst_queue_cnt = 0;
665 /* Get context number */
666 ctx->num = 0;
667 while (dev->ctx[ctx->num]) {
668 ctx->num++;
669 if (ctx->num >= MFC_NUM_CONTEXTS) {
670 mfc_err("Too many open contexts\n");
671 ret = -EBUSY;
672 goto err_no_ctx;
673 }
674 }
675 /* Mark context as idle */
676 spin_lock_irqsave(&dev->condlock, flags);
677 clear_bit(ctx->num, &dev->ctx_work_bits);
678 spin_unlock_irqrestore(&dev->condlock, flags);
679 dev->ctx[ctx->num] = ctx;
680 if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
681 ctx->type = MFCINST_DECODER;
682 ctx->c_ops = get_dec_codec_ops();
683 /* Setup ctrl handler */
684 ret = s5p_mfc_dec_ctrls_setup(ctx);
685 if (ret) {
686 mfc_err("Failed to setup mfc controls\n");
687 goto err_ctrls_setup;
688 }
689 } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
690 ctx->type = MFCINST_ENCODER;
691 ctx->c_ops = get_enc_codec_ops();
692 /* only for encoder */
693 INIT_LIST_HEAD(&ctx->ref_queue);
694 ctx->ref_queue_cnt = 0;
695 /* Setup ctrl handler */
696 ret = s5p_mfc_enc_ctrls_setup(ctx);
697 if (ret) {
698 mfc_err("Failed to setup mfc controls\n");
699 goto err_ctrls_setup;
700 }
701 } else {
702 ret = -ENOENT;
703 goto err_bad_node;
704 }
705 ctx->fh.ctrl_handler = &ctx->ctrl_handler;
706 ctx->inst_no = -1;
707 /* Load firmware if this is the first instance */
708 if (dev->num_inst == 1) {
709 dev->watchdog_timer.expires = jiffies +
710 msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
711 add_timer(&dev->watchdog_timer);
712 ret = s5p_mfc_power_on();
713 if (ret < 0) {
714 mfc_err("power on failed\n");
715 goto err_pwr_enable;
716 }
717 s5p_mfc_clock_on();
718 ret = s5p_mfc_alloc_and_load_firmware(dev);
719 if (ret)
720 goto err_alloc_fw;
721 /* Init the FW */
722 ret = s5p_mfc_init_hw(dev);
723 if (ret)
724 goto err_init_hw;
725 s5p_mfc_clock_off();
726 }
727 /* Init videobuf2 queue for CAPTURE */
728 q = &ctx->vq_dst;
729 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
730 q->drv_priv = &ctx->fh;
731 if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
732 q->io_modes = VB2_MMAP;
733 q->ops = get_dec_queue_ops();
734 } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
735 q->io_modes = VB2_MMAP | VB2_USERPTR;
736 q->ops = get_enc_queue_ops();
737 } else {
738 ret = -ENOENT;
739 goto err_queue_init;
740 }
741 q->mem_ops = (struct vb2_mem_ops *)&vb2_dma_contig_memops;
742 ret = vb2_queue_init(q);
743 if (ret) {
744 mfc_err("Failed to initialize videobuf2 queue(capture)\n");
745 goto err_queue_init;
746 }
747 /* Init videobuf2 queue for OUTPUT */
748 q = &ctx->vq_src;
749 q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
750 q->io_modes = VB2_MMAP;
751 q->drv_priv = &ctx->fh;
752 if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
753 q->io_modes = VB2_MMAP;
754 q->ops = get_dec_queue_ops();
755 } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
756 q->io_modes = VB2_MMAP | VB2_USERPTR;
757 q->ops = get_enc_queue_ops();
758 } else {
759 ret = -ENOENT;
760 goto err_queue_init;
761 }
762 q->mem_ops = (struct vb2_mem_ops *)&vb2_dma_contig_memops;
763 ret = vb2_queue_init(q);
764 if (ret) {
765 mfc_err("Failed to initialize videobuf2 queue(output)\n");
766 goto err_queue_init;
767 }
768 init_waitqueue_head(&ctx->queue);
769 mfc_debug_leave();
770 return ret;
771 /* Deinit when failure occured */
772err_queue_init:
773err_init_hw:
774 s5p_mfc_release_firmware(dev);
775err_alloc_fw:
776 dev->ctx[ctx->num] = 0;
777 del_timer_sync(&dev->watchdog_timer);
778 s5p_mfc_clock_off();
779err_pwr_enable:
780 if (dev->num_inst == 1) {
781 if (s5p_mfc_power_off() < 0)
782 mfc_err("power off failed\n");
783 s5p_mfc_release_firmware(dev);
784 }
785err_ctrls_setup:
786 s5p_mfc_dec_ctrls_delete(ctx);
787err_bad_node:
788err_no_ctx:
789 v4l2_fh_del(&ctx->fh);
790 v4l2_fh_exit(&ctx->fh);
791 kfree(ctx);
792err_alloc:
793 dev->num_inst--;
794 mfc_debug_leave();
795 return ret;
796}
797
798/* Release MFC context */
799static int s5p_mfc_release(struct file *file)
800{
801 struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
802 struct s5p_mfc_dev *dev = ctx->dev;
803 unsigned long flags;
804
805 mfc_debug_enter();
806 s5p_mfc_clock_on();
807 vb2_queue_release(&ctx->vq_src);
808 vb2_queue_release(&ctx->vq_dst);
809 /* Mark context as idle */
810 spin_lock_irqsave(&dev->condlock, flags);
811 clear_bit(ctx->num, &dev->ctx_work_bits);
812 spin_unlock_irqrestore(&dev->condlock, flags);
813 /* If instance was initialised then
814 * return instance and free reosurces */
815 if (ctx->inst_no != MFC_NO_INSTANCE_SET) {
816 mfc_debug(2, "Has to free instance\n");
817 ctx->state = MFCINST_RETURN_INST;
818 spin_lock_irqsave(&dev->condlock, flags);
819 set_bit(ctx->num, &dev->ctx_work_bits);
820 spin_unlock_irqrestore(&dev->condlock, flags);
821 s5p_mfc_clean_ctx_int_flags(ctx);
822 s5p_mfc_try_run(dev);
823 /* Wait until instance is returned or timeout occured */
824 if (s5p_mfc_wait_for_done_ctx
825 (ctx, S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET, 0)) {
826 s5p_mfc_clock_off();
827 mfc_err("Err returning instance\n");
828 }
829 mfc_debug(2, "After free instance\n");
830 /* Free resources */
831 s5p_mfc_release_codec_buffers(ctx);
832 s5p_mfc_release_instance_buffer(ctx);
833 if (ctx->type == MFCINST_DECODER)
834 s5p_mfc_release_dec_desc_buffer(ctx);
835
836 ctx->inst_no = MFC_NO_INSTANCE_SET;
837 }
838 /* hardware locking scheme */
839 if (dev->curr_ctx == ctx->num)
840 clear_bit(0, &dev->hw_lock);
841 dev->num_inst--;
842 if (dev->num_inst == 0) {
843 mfc_debug(2, "Last instance - release firmware\n");
844 /* reset <-> F/W release */
845 s5p_mfc_reset(dev);
846 s5p_mfc_release_firmware(dev);
847 del_timer_sync(&dev->watchdog_timer);
848 if (s5p_mfc_power_off() < 0)
849 mfc_err("Power off failed\n");
850 }
851 mfc_debug(2, "Shutting down clock\n");
852 s5p_mfc_clock_off();
853 dev->ctx[ctx->num] = 0;
854 s5p_mfc_dec_ctrls_delete(ctx);
855 v4l2_fh_del(&ctx->fh);
856 v4l2_fh_exit(&ctx->fh);
857 kfree(ctx);
858 mfc_debug_leave();
859 return 0;
860}
861
862/* Poll */
863static unsigned int s5p_mfc_poll(struct file *file,
864 struct poll_table_struct *wait)
865{
866 struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
867 struct s5p_mfc_dev *dev = ctx->dev;
868 struct vb2_queue *src_q, *dst_q;
869 struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
870 unsigned int rc = 0;
871 unsigned long flags;
872
873 src_q = &ctx->vq_src;
874 dst_q = &ctx->vq_dst;
875 /*
876 * There has to be at least one buffer queued on each queued_list, which
877 * means either in driver already or waiting for driver to claim it
878 * and start processing.
879 */
880 if ((!src_q->streaming || list_empty(&src_q->queued_list))
881 && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
882 rc = POLLERR;
883 goto end;
884 }
885 mutex_unlock(&dev->mfc_mutex);
886 poll_wait(file, &src_q->done_wq, wait);
887 poll_wait(file, &dst_q->done_wq, wait);
888 mutex_lock(&dev->mfc_mutex);
889 spin_lock_irqsave(&src_q->done_lock, flags);
890 if (!list_empty(&src_q->done_list))
891 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
892 done_entry);
893 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
894 || src_vb->state == VB2_BUF_STATE_ERROR))
895 rc |= POLLOUT | POLLWRNORM;
896 spin_unlock_irqrestore(&src_q->done_lock, flags);
897 spin_lock_irqsave(&dst_q->done_lock, flags);
898 if (!list_empty(&dst_q->done_list))
899 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
900 done_entry);
901 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
902 || dst_vb->state == VB2_BUF_STATE_ERROR))
903 rc |= POLLIN | POLLRDNORM;
904 spin_unlock_irqrestore(&dst_q->done_lock, flags);
905end:
906 return rc;
907}
908
909/* Mmap */
910static int s5p_mfc_mmap(struct file *file, struct vm_area_struct *vma)
911{
912 struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
913 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
914 int ret;
915 if (offset < DST_QUEUE_OFF_BASE) {
916 mfc_debug(2, "mmaping source\n");
917 ret = vb2_mmap(&ctx->vq_src, vma);
918 } else { /* capture */
919 mfc_debug(2, "mmaping destination\n");
920 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
921 ret = vb2_mmap(&ctx->vq_dst, vma);
922 }
923 return ret;
924}
925
926/* v4l2 ops */
927static const struct v4l2_file_operations s5p_mfc_fops = {
928 .owner = THIS_MODULE,
929 .open = s5p_mfc_open,
930 .release = s5p_mfc_release,
931 .poll = s5p_mfc_poll,
932 .unlocked_ioctl = video_ioctl2,
933 .mmap = s5p_mfc_mmap,
934};
935
936static int match_child(struct device *dev, void *data)
937{
938 if (!dev_name(dev))
939 return 0;
940 return !strcmp(dev_name(dev), (char *)data);
941}
942
943
944/* MFC probe function */
945static int __devinit s5p_mfc_probe(struct platform_device *pdev)
946{
947 struct s5p_mfc_dev *dev;
948 struct video_device *vfd;
949 struct resource *res;
950 int ret;
951
952 pr_debug("%s++\n", __func__);
953 dev = kzalloc(sizeof *dev, GFP_KERNEL);
954 if (!dev) {
955 dev_err(&pdev->dev, "Not enough memory for MFC device\n");
956 return -ENOMEM;
957 }
958
959 spin_lock_init(&dev->irqlock);
960 spin_lock_init(&dev->condlock);
961 dev->plat_dev = pdev;
962 if (!dev->plat_dev) {
963 dev_err(&pdev->dev, "No platform data specified\n");
964 ret = -ENODEV;
965 goto err_dev;
966 }
967
968 ret = s5p_mfc_init_pm(dev);
969 if (ret < 0) {
970 dev_err(&pdev->dev, "failed to get mfc clock source\n");
971 goto err_clk;
972 }
973
974 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
975 if (res == NULL) {
976 dev_err(&pdev->dev, "failed to get memory region resource\n");
977 ret = -ENOENT;
978 goto err_res;
979 }
980
981 dev->mfc_mem = request_mem_region(res->start, resource_size(res),
982 pdev->name);
983 if (dev->mfc_mem == NULL) {
984 dev_err(&pdev->dev, "failed to get memory region\n");
985 ret = -ENOENT;
986 goto err_mem_reg;
987 }
988 dev->regs_base = ioremap(dev->mfc_mem->start, resource_size(dev->mfc_mem));
989 if (dev->regs_base == NULL) {
990 dev_err(&pdev->dev, "failed to ioremap address region\n");
991 ret = -ENOENT;
992 goto err_ioremap;
993 }
994
995 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
996 if (res == NULL) {
997 dev_err(&pdev->dev, "failed to get irq resource\n");
998 ret = -ENOENT;
999 goto err_get_res;
1000 }
1001 dev->irq = res->start;
1002 ret = request_irq(dev->irq, s5p_mfc_irq, IRQF_DISABLED, pdev->name,
1003 dev);
1004 if (ret) {
1005 dev_err(&pdev->dev, "Failed to install irq (%d)\n", ret);
1006 goto err_req_irq;
1007 }
1008
1009 dev->mem_dev_l = device_find_child(&dev->plat_dev->dev, "s5p-mfc-l",
1010 match_child);
1011 if (!dev->mem_dev_l) {
1012 mfc_err("Mem child (L) device get failed\n");
1013 ret = -ENODEV;
1014 goto err_find_child;
1015 }
1016 dev->mem_dev_r = device_find_child(&dev->plat_dev->dev, "s5p-mfc-r",
1017 match_child);
1018 if (!dev->mem_dev_r) {
1019 mfc_err("Mem child (R) device get failed\n");
1020 ret = -ENODEV;
1021 goto err_find_child;
1022 }
1023
1024 dev->alloc_ctx[0] = vb2_dma_contig_init_ctx(dev->mem_dev_l);
1025 if (IS_ERR_OR_NULL(dev->alloc_ctx[0])) {
1026 ret = PTR_ERR(dev->alloc_ctx[0]);
1027 goto err_mem_init_ctx_0;
1028 }
1029 dev->alloc_ctx[1] = vb2_dma_contig_init_ctx(dev->mem_dev_r);
1030 if (IS_ERR_OR_NULL(dev->alloc_ctx[1])) {
1031 ret = PTR_ERR(dev->alloc_ctx[1]);
1032 goto err_mem_init_ctx_1;
1033 }
1034
1035 mutex_init(&dev->mfc_mutex);
1036
1037 ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
1038 if (ret)
1039 goto err_v4l2_dev_reg;
1040 init_waitqueue_head(&dev->queue);
1041
1042 /* decoder */
1043 vfd = video_device_alloc();
1044 if (!vfd) {
1045 v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
1046 ret = -ENOMEM;
1047 goto err_dec_alloc;
1048 }
1049 vfd->fops = &s5p_mfc_fops,
1050 vfd->ioctl_ops = get_dec_v4l2_ioctl_ops();
1051 vfd->release = video_device_release,
1052 vfd->lock = &dev->mfc_mutex;
1053 vfd->v4l2_dev = &dev->v4l2_dev;
1054 snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_DEC_NAME);
1055 dev->vfd_dec = vfd;
1056 ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
1057 if (ret) {
1058 v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
1059 video_device_release(vfd);
1060 goto err_dec_reg;
1061 }
1062 v4l2_info(&dev->v4l2_dev,
1063 "decoder registered as /dev/video%d\n", vfd->num);
1064 video_set_drvdata(vfd, dev);
1065
1066 /* encoder */
1067 vfd = video_device_alloc();
1068 if (!vfd) {
1069 v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
1070 ret = -ENOMEM;
1071 goto err_enc_alloc;
1072 }
1073 vfd->fops = &s5p_mfc_fops,
1074 vfd->ioctl_ops = get_enc_v4l2_ioctl_ops();
1075 vfd->release = video_device_release,
1076 vfd->lock = &dev->mfc_mutex;
1077 vfd->v4l2_dev = &dev->v4l2_dev;
1078 snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_ENC_NAME);
1079 dev->vfd_enc = vfd;
1080 ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
1081 if (ret) {
1082 v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
1083 video_device_release(vfd);
1084 goto err_enc_reg;
1085 }
1086 v4l2_info(&dev->v4l2_dev,
1087 "encoder registered as /dev/video%d\n", vfd->num);
1088 video_set_drvdata(vfd, dev);
1089 platform_set_drvdata(pdev, dev);
1090
1091 dev->hw_lock = 0;
1092 dev->watchdog_workqueue = create_singlethread_workqueue(S5P_MFC_NAME);
1093 INIT_WORK(&dev->watchdog_work, s5p_mfc_watchdog_worker);
1094 atomic_set(&dev->watchdog_cnt, 0);
1095 init_timer(&dev->watchdog_timer);
1096 dev->watchdog_timer.data = (unsigned long)dev;
1097 dev->watchdog_timer.function = s5p_mfc_watchdog;
1098
1099 pr_debug("%s--\n", __func__);
1100 return 0;
1101
1102/* Deinit MFC if probe had failed */
1103err_enc_reg:
1104 video_device_release(dev->vfd_enc);
1105err_enc_alloc:
1106 video_unregister_device(dev->vfd_dec);
1107err_dec_reg:
1108 video_device_release(dev->vfd_dec);
1109err_dec_alloc:
1110 v4l2_device_unregister(&dev->v4l2_dev);
1111err_v4l2_dev_reg:
1112 vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
1113err_mem_init_ctx_1:
1114 vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
1115err_mem_init_ctx_0:
1116err_find_child:
1117 free_irq(dev->irq, dev);
1118err_req_irq:
1119err_get_res:
1120 iounmap(dev->regs_base);
1121 dev->regs_base = NULL;
1122err_ioremap:
1123 release_resource(dev->mfc_mem);
1124 kfree(dev->mfc_mem);
1125err_mem_reg:
1126err_res:
1127 s5p_mfc_final_pm(dev);
1128err_clk:
1129err_dev:
1130 kfree(dev);
1131 pr_debug("%s-- with error\n", __func__);
1132 return ret;
1133
1134}
1135
1136/* Remove the driver */
1137static int __devexit s5p_mfc_remove(struct platform_device *pdev)
1138{
1139 struct s5p_mfc_dev *dev = platform_get_drvdata(pdev);
1140
1141 v4l2_info(&dev->v4l2_dev, "Removing %s\n", pdev->name);
1142
1143 del_timer_sync(&dev->watchdog_timer);
1144 flush_workqueue(dev->watchdog_workqueue);
1145 destroy_workqueue(dev->watchdog_workqueue);
1146
1147 video_unregister_device(dev->vfd_enc);
1148 video_unregister_device(dev->vfd_dec);
1149 v4l2_device_unregister(&dev->v4l2_dev);
1150 vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
1151 vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
1152
1153 free_irq(dev->irq, dev);
1154 iounmap(dev->regs_base);
1155 if (dev->mfc_mem) {
1156 release_resource(dev->mfc_mem);
1157 kfree(dev->mfc_mem);
1158 dev->mfc_mem = NULL;
1159 }
1160 s5p_mfc_final_pm(dev);
1161 kfree(dev);
1162 return 0;
1163}
1164
1165#ifdef CONFIG_PM_SLEEP
1166
1167static int s5p_mfc_suspend(struct device *dev)
1168{
1169 struct platform_device *pdev = to_platform_device(dev);
1170 struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
1171 int ret;
1172
1173 if (m_dev->num_inst == 0)
1174 return 0;
1175 return s5p_mfc_sleep(m_dev);
1176 if (test_and_set_bit(0, &m_dev->enter_suspend) != 0) {
1177 mfc_err("Error: going to suspend for a second time\n");
1178 return -EIO;
1179 }
1180
1181 /* Check if we're processing then wait if it necessary. */
1182 while (test_and_set_bit(0, &m_dev->hw_lock) != 0) {
1183 /* Try and lock the HW */
1184 /* Wait on the interrupt waitqueue */
1185 ret = wait_event_interruptible_timeout(m_dev->queue,
1186 m_dev->int_cond || m_dev->ctx[m_dev->curr_ctx]->int_cond,
1187 msecs_to_jiffies(MFC_INT_TIMEOUT));
1188
1189 if (ret == 0) {
1190 mfc_err("Waiting for hardware to finish timed out\n");
1191 return -EIO;
1192 }
1193 }
1194 return 0;
1195}
1196
1197static int s5p_mfc_resume(struct device *dev)
1198{
1199 struct platform_device *pdev = to_platform_device(dev);
1200 struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
1201
1202 if (m_dev->num_inst == 0)
1203 return 0;
1204 return s5p_mfc_wakeup(m_dev);
1205}
1206#endif
1207
1208#ifdef CONFIG_PM_RUNTIME
1209static int s5p_mfc_runtime_suspend(struct device *dev)
1210{
1211 struct platform_device *pdev = to_platform_device(dev);
1212 struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
1213
1214 atomic_set(&m_dev->pm.power, 0);
1215 return 0;
1216}
1217
1218static int s5p_mfc_runtime_resume(struct device *dev)
1219{
1220 struct platform_device *pdev = to_platform_device(dev);
1221 struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
1222 int pre_power;
1223
1224 if (!m_dev->alloc_ctx)
1225 return 0;
1226 pre_power = atomic_read(&m_dev->pm.power);
1227 atomic_set(&m_dev->pm.power, 1);
1228 return 0;
1229}
1230#endif
1231
1232/* Power management */
1233static const struct dev_pm_ops s5p_mfc_pm_ops = {
1234 SET_SYSTEM_SLEEP_PM_OPS(s5p_mfc_suspend, s5p_mfc_resume)
1235 SET_RUNTIME_PM_OPS(s5p_mfc_runtime_suspend, s5p_mfc_runtime_resume,
1236 NULL)
1237};
1238
1239static struct platform_driver s5p_mfc_pdrv = {
1240 .probe = s5p_mfc_probe,
1241 .remove = __devexit_p(s5p_mfc_remove),
1242 .driver = {
1243 .name = S5P_MFC_NAME,
1244 .owner = THIS_MODULE,
1245 .pm = &s5p_mfc_pm_ops
1246 },
1247};
1248
1249static char banner[] __initdata =
1250 "S5P MFC V4L2 Driver, (C) 2011 Samsung Electronics\n";
1251
1252static int __init s5p_mfc_init(void)
1253{
1254 int ret;
1255
1256 pr_info("%s", banner);
1257 ret = platform_driver_register(&s5p_mfc_pdrv);
1258 if (ret)
1259 pr_err("Platform device registration failed.\n");
1260 return ret;
1261}
1262
1263static void __devexit s5p_mfc_exit(void)
1264{
1265 platform_driver_unregister(&s5p_mfc_pdrv);
1266}
1267
1268module_init(s5p_mfc_init);
1269module_exit(s5p_mfc_exit);
1270
1271MODULE_LICENSE("GPL");
1272MODULE_AUTHOR("Kamil Debski <k.debski@samsung.com>");
1273MODULE_DESCRIPTION("Samsung S5P Multi Format Codec V4L2 driver");
1274
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_cmd.c b/drivers/media/video/s5p-mfc/s5p_mfc_cmd.c
new file mode 100644
index 000000000000..f0665ed1a529
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_cmd.c
@@ -0,0 +1,120 @@
1/*
2 * linux/drivers/media/video/s5p-mfc/s5p_mfc_cmd.c
3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include "regs-mfc.h"
14#include "s5p_mfc_cmd.h"
15#include "s5p_mfc_common.h"
16#include "s5p_mfc_debug.h"
17
18/* This function is used to send a command to the MFC */
19static int s5p_mfc_cmd_host2risc(struct s5p_mfc_dev *dev, int cmd,
20 struct s5p_mfc_cmd_args *args)
21{
22 int cur_cmd;
23 unsigned long timeout;
24
25 timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT);
26 /* wait until host to risc command register becomes 'H2R_CMD_EMPTY' */
27 do {
28 if (time_after(jiffies, timeout)) {
29 mfc_err("Timeout while waiting for hardware\n");
30 return -EIO;
31 }
32 cur_cmd = mfc_read(dev, S5P_FIMV_HOST2RISC_CMD);
33 } while (cur_cmd != S5P_FIMV_H2R_CMD_EMPTY);
34 mfc_write(dev, args->arg[0], S5P_FIMV_HOST2RISC_ARG1);
35 mfc_write(dev, args->arg[1], S5P_FIMV_HOST2RISC_ARG2);
36 mfc_write(dev, args->arg[2], S5P_FIMV_HOST2RISC_ARG3);
37 mfc_write(dev, args->arg[3], S5P_FIMV_HOST2RISC_ARG4);
38 /* Issue the command */
39 mfc_write(dev, cmd, S5P_FIMV_HOST2RISC_CMD);
40 return 0;
41}
42
43/* Initialize the MFC */
44int s5p_mfc_sys_init_cmd(struct s5p_mfc_dev *dev)
45{
46 struct s5p_mfc_cmd_args h2r_args;
47
48 memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
49 h2r_args.arg[0] = dev->fw_size;
50 return s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_SYS_INIT, &h2r_args);
51}
52
53/* Suspend the MFC hardware */
54int s5p_mfc_sleep_cmd(struct s5p_mfc_dev *dev)
55{
56 struct s5p_mfc_cmd_args h2r_args;
57
58 memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
59 return s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_SLEEP, &h2r_args);
60}
61
62/* Wake up the MFC hardware */
63int s5p_mfc_wakeup_cmd(struct s5p_mfc_dev *dev)
64{
65 struct s5p_mfc_cmd_args h2r_args;
66
67 memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
68 return s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_WAKEUP, &h2r_args);
69}
70
71
72int s5p_mfc_open_inst_cmd(struct s5p_mfc_ctx *ctx)
73{
74 struct s5p_mfc_dev *dev = ctx->dev;
75 struct s5p_mfc_cmd_args h2r_args;
76 int ret;
77
78 /* Preparing decoding - getting instance number */
79 mfc_debug(2, "Getting instance number (codec: %d)\n", ctx->codec_mode);
80 dev->curr_ctx = ctx->num;
81 memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
82 h2r_args.arg[0] = ctx->codec_mode;
83 h2r_args.arg[1] = 0; /* no crc & no pixelcache */
84 h2r_args.arg[2] = ctx->ctx_ofs;
85 h2r_args.arg[3] = ctx->ctx_size;
86 ret = s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_OPEN_INSTANCE,
87 &h2r_args);
88 if (ret) {
89 mfc_err("Failed to create a new instance\n");
90 ctx->state = MFCINST_ERROR;
91 }
92 return ret;
93}
94
95int s5p_mfc_close_inst_cmd(struct s5p_mfc_ctx *ctx)
96{
97 struct s5p_mfc_dev *dev = ctx->dev;
98 struct s5p_mfc_cmd_args h2r_args;
99 int ret;
100
101 if (ctx->state == MFCINST_FREE) {
102 mfc_err("Instance already returned\n");
103 ctx->state = MFCINST_ERROR;
104 return -EINVAL;
105 }
106 /* Closing decoding instance */
107 mfc_debug(2, "Returning instance number %d\n", ctx->inst_no);
108 dev->curr_ctx = ctx->num;
109 memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
110 h2r_args.arg[0] = ctx->inst_no;
111 ret = s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_CLOSE_INSTANCE,
112 &h2r_args);
113 if (ret) {
114 mfc_err("Failed to return an instance\n");
115 ctx->state = MFCINST_ERROR;
116 return -EINVAL;
117 }
118 return 0;
119}
120
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_cmd.h b/drivers/media/video/s5p-mfc/s5p_mfc_cmd.h
new file mode 100644
index 000000000000..5ceebfe6131a
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_cmd.h
@@ -0,0 +1,30 @@
1/*
2 * linux/drivers/media/video/s5p-mfc/s5p_mfc_cmd.h
3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#ifndef S5P_MFC_CMD_H_
14#define S5P_MFC_CMD_H_
15
16#include "s5p_mfc_common.h"
17
18#define MAX_H2R_ARG 4
19
20struct s5p_mfc_cmd_args {
21 unsigned int arg[MAX_H2R_ARG];
22};
23
24int s5p_mfc_sys_init_cmd(struct s5p_mfc_dev *dev);
25int s5p_mfc_sleep_cmd(struct s5p_mfc_dev *dev);
26int s5p_mfc_wakeup_cmd(struct s5p_mfc_dev *dev);
27int s5p_mfc_open_inst_cmd(struct s5p_mfc_ctx *ctx);
28int s5p_mfc_close_inst_cmd(struct s5p_mfc_ctx *ctx);
29
30#endif /* S5P_MFC_CMD_H_ */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_common.h b/drivers/media/video/s5p-mfc/s5p_mfc_common.h
new file mode 100644
index 000000000000..91146fa622e4
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_common.h
@@ -0,0 +1,572 @@
1/*
2 * Samsung S5P Multi Format Codec v 5.0
3 *
4 * This file contains definitions of enums and structs used by the codec
5 * driver.
6 *
7 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
8 * Kamil Debski, <k.debski@samsung.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version
14 */
15
16#ifndef S5P_MFC_COMMON_H_
17#define S5P_MFC_COMMON_H_
18
19#include "regs-mfc.h"
20#include <linux/platform_device.h>
21#include <linux/videodev2.h>
22#include <media/v4l2-ctrls.h>
23#include <media/v4l2-device.h>
24#include <media/v4l2-ioctl.h>
25#include <media/videobuf2-core.h>
26
27/* Definitions related to MFC memory */
28
29/* Offset base used to differentiate between CAPTURE and OUTPUT
30* while mmaping */
31#define DST_QUEUE_OFF_BASE (TASK_SIZE / 2)
32
33/* Offset used by the hardware to store addresses */
34#define MFC_OFFSET_SHIFT 11
35
36#define FIRMWARE_ALIGN 0x20000 /* 128KB */
37#define MFC_H264_CTX_BUF_SIZE 0x96000 /* 600KB per H264 instance */
38#define MFC_CTX_BUF_SIZE 0x2800 /* 10KB per instance */
39#define DESC_BUF_SIZE 0x20000 /* 128KB for DESC buffer */
40#define SHARED_BUF_SIZE 0x2000 /* 8KB for shared buffer */
41
42#define DEF_CPB_SIZE 0x40000 /* 512KB */
43
44#define MFC_BANK1_ALLOC_CTX 0
45#define MFC_BANK2_ALLOC_CTX 1
46
47#define MFC_BANK1_ALIGN_ORDER 13
48#define MFC_BANK2_ALIGN_ORDER 13
49#define MFC_BASE_ALIGN_ORDER 17
50
51#include <media/videobuf2-dma-contig.h>
52
53static inline dma_addr_t s5p_mfc_mem_cookie(void *a, void *b)
54{
55 /* Same functionality as the vb2_dma_contig_plane_paddr */
56 dma_addr_t *paddr = vb2_dma_contig_memops.cookie(b);
57
58 return *paddr;
59}
60
61/* MFC definitions */
62#define MFC_MAX_EXTRA_DPB 5
63#define MFC_MAX_BUFFERS 32
64#define MFC_NUM_CONTEXTS 4
65/* Interrupt timeout */
66#define MFC_INT_TIMEOUT 2000
67/* Busy wait timeout */
68#define MFC_BW_TIMEOUT 500
69/* Watchdog interval */
70#define MFC_WATCHDOG_INTERVAL 1000
71/* After how many executions watchdog should assume lock up */
72#define MFC_WATCHDOG_CNT 10
73#define MFC_NO_INSTANCE_SET -1
74#define MFC_ENC_CAP_PLANE_COUNT 1
75#define MFC_ENC_OUT_PLANE_COUNT 2
76#define STUFF_BYTE 4
77#define MFC_MAX_CTRLS 64
78
79#define mfc_read(dev, offset) readl(dev->regs_base + (offset))
80#define mfc_write(dev, data, offset) writel((data), dev->regs_base + \
81 (offset))
82
83/**
84 * enum s5p_mfc_fmt_type - type of the pixelformat
85 */
86enum s5p_mfc_fmt_type {
87 MFC_FMT_DEC,
88 MFC_FMT_ENC,
89 MFC_FMT_RAW,
90};
91
92/**
93 * enum s5p_mfc_node_type - The type of an MFC device node.
94 */
95enum s5p_mfc_node_type {
96 MFCNODE_INVALID = -1,
97 MFCNODE_DECODER = 0,
98 MFCNODE_ENCODER = 1,
99};
100
101/**
102 * enum s5p_mfc_inst_type - The type of an MFC instance.
103 */
104enum s5p_mfc_inst_type {
105 MFCINST_INVALID,
106 MFCINST_DECODER,
107 MFCINST_ENCODER,
108};
109
110/**
111 * enum s5p_mfc_inst_state - The state of an MFC instance.
112 */
113enum s5p_mfc_inst_state {
114 MFCINST_FREE = 0,
115 MFCINST_INIT = 100,
116 MFCINST_GOT_INST,
117 MFCINST_HEAD_PARSED,
118 MFCINST_BUFS_SET,
119 MFCINST_RUNNING,
120 MFCINST_FINISHING,
121 MFCINST_FINISHED,
122 MFCINST_RETURN_INST,
123 MFCINST_ERROR,
124 MFCINST_ABORT,
125 MFCINST_RES_CHANGE_INIT,
126 MFCINST_RES_CHANGE_FLUSH,
127 MFCINST_RES_CHANGE_END,
128};
129
130/**
131 * enum s5p_mfc_queue_state - The state of buffer queue.
132 */
133enum s5p_mfc_queue_state {
134 QUEUE_FREE,
135 QUEUE_BUFS_REQUESTED,
136 QUEUE_BUFS_QUERIED,
137 QUEUE_BUFS_MMAPED,
138};
139
140/**
141 * enum s5p_mfc_decode_arg - type of frame decoding
142 */
143enum s5p_mfc_decode_arg {
144 MFC_DEC_FRAME,
145 MFC_DEC_LAST_FRAME,
146 MFC_DEC_RES_CHANGE,
147};
148
149struct s5p_mfc_ctx;
150
151/**
152 * struct s5p_mfc_buf - MFC buffer
153 */
154struct s5p_mfc_buf {
155 struct list_head list;
156 struct vb2_buffer *b;
157 union {
158 struct {
159 size_t luma;
160 size_t chroma;
161 } raw;
162 size_t stream;
163 } cookie;
164 int used;
165};
166
167/**
168 * struct s5p_mfc_pm - power management data structure
169 */
170struct s5p_mfc_pm {
171 struct clk *clock;
172 struct clk *clock_gate;
173 atomic_t power;
174 struct device *device;
175};
176
177/**
178 * struct s5p_mfc_dev - The struct containing driver internal parameters.
179 *
180 * @v4l2_dev: v4l2_device
181 * @vfd_dec: video device for decoding
182 * @vfd_enc: video device for encoding
183 * @plat_dev: platform device
184 * @mem_dev_l: child device of the left memory bank (0)
185 * @mem_dev_r: child device of the right memory bank (1)
186 * @regs_base: base address of the MFC hw registers
187 * @irq: irq resource
188 * @mfc_mem: MFC registers memory resource
189 * @dec_ctrl_handler: control framework handler for decoding
190 * @enc_ctrl_handler: control framework handler for encoding
191 * @pm: power management control
192 * @num_inst: couter of active MFC instances
193 * @irqlock: lock for operations on videobuf2 queues
194 * @condlock: lock for changing/checking if a context is ready to be
195 * processed
196 * @mfc_mutex: lock for video_device
197 * @int_cond: variable used by the waitqueue
198 * @int_type: type of last interrupt
199 * @int_err: error number for last interrupt
200 * @queue: waitqueue for waiting for completion of device commands
201 * @fw_size: size of firmware
202 * @bank1: address of the beggining of bank 1 memory
203 * @bank2: address of the beggining of bank 2 memory
204 * @hw_lock: used for hardware locking
205 * @ctx: array of driver contexts
206 * @curr_ctx: number of the currently running context
207 * @ctx_work_bits: used to mark which contexts are waiting for hardware
208 * @watchdog_cnt: counter for the watchdog
209 * @watchdog_workqueue: workqueue for the watchdog
210 * @watchdog_work: worker for the watchdog
211 * @alloc_ctx: videobuf2 allocator contexts for two memory banks
212 * @enter_suspend: flag set when entering suspend
213 *
214 */
215struct s5p_mfc_dev {
216 struct v4l2_device v4l2_dev;
217 struct video_device *vfd_dec;
218 struct video_device *vfd_enc;
219 struct platform_device *plat_dev;
220 struct device *mem_dev_l;
221 struct device *mem_dev_r;
222 void __iomem *regs_base;
223 int irq;
224 struct resource *mfc_mem;
225 struct v4l2_ctrl_handler dec_ctrl_handler;
226 struct v4l2_ctrl_handler enc_ctrl_handler;
227 struct s5p_mfc_pm pm;
228 int num_inst;
229 spinlock_t irqlock; /* lock when operating on videobuf2 queues */
230 spinlock_t condlock; /* lock when changing/checking if a context is
231 ready to be processed */
232 struct mutex mfc_mutex; /* video_device lock */
233 int int_cond;
234 int int_type;
235 unsigned int int_err;
236 wait_queue_head_t queue;
237 size_t fw_size;
238 size_t bank1;
239 size_t bank2;
240 unsigned long hw_lock;
241 struct s5p_mfc_ctx *ctx[MFC_NUM_CONTEXTS];
242 int curr_ctx;
243 unsigned long ctx_work_bits;
244 atomic_t watchdog_cnt;
245 struct timer_list watchdog_timer;
246 struct workqueue_struct *watchdog_workqueue;
247 struct work_struct watchdog_work;
248 void *alloc_ctx[2];
249 unsigned long enter_suspend;
250};
251
252/**
253 * struct s5p_mfc_h264_enc_params - encoding parameters for h264
254 */
255struct s5p_mfc_h264_enc_params {
256 enum v4l2_mpeg_video_h264_profile profile;
257 enum v4l2_mpeg_video_h264_loop_filter_mode loop_filter_mode;
258 s8 loop_filter_alpha;
259 s8 loop_filter_beta;
260 enum v4l2_mpeg_video_h264_entropy_mode entropy_mode;
261 u8 max_ref_pic;
262 u8 num_ref_pic_4p;
263 int _8x8_transform;
264 int rc_mb;
265 int rc_mb_dark;
266 int rc_mb_smooth;
267 int rc_mb_static;
268 int rc_mb_activity;
269 int vui_sar;
270 u8 vui_sar_idc;
271 u16 vui_ext_sar_width;
272 u16 vui_ext_sar_height;
273 int open_gop;
274 u16 open_gop_size;
275 u8 rc_frame_qp;
276 u8 rc_min_qp;
277 u8 rc_max_qp;
278 u8 rc_p_frame_qp;
279 u8 rc_b_frame_qp;
280 enum v4l2_mpeg_video_h264_level level_v4l2;
281 int level;
282 u16 cpb_size;
283};
284
285/**
286 * struct s5p_mfc_mpeg4_enc_params - encoding parameters for h263 and mpeg4
287 */
288struct s5p_mfc_mpeg4_enc_params {
289 /* MPEG4 Only */
290 enum v4l2_mpeg_video_mpeg4_profile profile;
291 int quarter_pixel;
292 /* Common for MPEG4, H263 */
293 u16 vop_time_res;
294 u16 vop_frm_delta;
295 u8 rc_frame_qp;
296 u8 rc_min_qp;
297 u8 rc_max_qp;
298 u8 rc_p_frame_qp;
299 u8 rc_b_frame_qp;
300 enum v4l2_mpeg_video_mpeg4_level level_v4l2;
301 int level;
302};
303
304/**
305 * struct s5p_mfc_enc_params - general encoding parameters
306 */
307struct s5p_mfc_enc_params {
308 u16 width;
309 u16 height;
310
311 u16 gop_size;
312 enum v4l2_mpeg_video_multi_slice_mode slice_mode;
313 u16 slice_mb;
314 u32 slice_bit;
315 u16 intra_refresh_mb;
316 int pad;
317 u8 pad_luma;
318 u8 pad_cb;
319 u8 pad_cr;
320 int rc_frame;
321 u32 rc_bitrate;
322 u16 rc_reaction_coeff;
323 u16 vbv_size;
324
325 enum v4l2_mpeg_video_header_mode seq_hdr_mode;
326 enum v4l2_mpeg_mfc51_video_frame_skip_mode frame_skip_mode;
327 int fixed_target_bit;
328
329 u8 num_b_frame;
330 u32 rc_framerate_num;
331 u32 rc_framerate_denom;
332 int interlace;
333
334 union {
335 struct s5p_mfc_h264_enc_params h264;
336 struct s5p_mfc_mpeg4_enc_params mpeg4;
337 } codec;
338
339};
340
341/**
342 * struct s5p_mfc_codec_ops - codec ops, used by encoding
343 */
344struct s5p_mfc_codec_ops {
345 /* initialization routines */
346 int (*pre_seq_start) (struct s5p_mfc_ctx *ctx);
347 int (*post_seq_start) (struct s5p_mfc_ctx *ctx);
348 /* execution routines */
349 int (*pre_frame_start) (struct s5p_mfc_ctx *ctx);
350 int (*post_frame_start) (struct s5p_mfc_ctx *ctx);
351};
352
353#define call_cop(c, op, args...) \
354 (((c)->c_ops->op) ? \
355 ((c)->c_ops->op(args)) : 0)
356
357/**
358 * struct s5p_mfc_ctx - This struct contains the instance context
359 *
360 * @dev: pointer to the s5p_mfc_dev of the device
361 * @fh: struct v4l2_fh
362 * @num: number of the context that this structure describes
363 * @int_cond: variable used by the waitqueue
364 * @int_type: type of the last interrupt
365 * @int_err: error number received from MFC hw in the interrupt
366 * @queue: waitqueue that can be used to wait for this context to
367 * finish
368 * @src_fmt: source pixelformat information
369 * @dst_fmt: destination pixelformat information
370 * @vq_src: vb2 queue for source buffers
371 * @vq_dst: vb2 queue for destination buffers
372 * @src_queue: driver internal queue for source buffers
373 * @dst_queue: driver internal queue for destination buffers
374 * @src_queue_cnt: number of buffers queued on the source internal queue
375 * @dst_queue_cnt: number of buffers queued on the dest internal queue
376 * @type: type of the instance - decoder or encoder
377 * @state: state of the context
378 * @inst_no: number of hw instance associated with the context
379 * @img_width: width of the image that is decoded or encoded
380 * @img_height: height of the image that is decoded or encoded
381 * @buf_width: width of the buffer for processed image
382 * @buf_height: height of the buffer for processed image
383 * @luma_size: size of a luma plane
384 * @chroma_size: size of a chroma plane
385 * @mv_size: size of a motion vectors buffer
386 * @consumed_stream: number of bytes that have been used so far from the
387 * decoding buffer
388 * @dpb_flush_flag: flag used to indicate that a DPB buffers are being
389 * flushed
390 * @bank1_buf: handle to memory allocated for temporary buffers from
391 * memory bank 1
392 * @bank1_phys: address of the temporary buffers from memory bank 1
393 * @bank1_size: size of the memory allocated for temporary buffers from
394 * memory bank 1
395 * @bank2_buf: handle to memory allocated for temporary buffers from
396 * memory bank 2
397 * @bank2_phys: address of the temporary buffers from memory bank 2
398 * @bank2_size: size of the memory allocated for temporary buffers from
399 * memory bank 2
400 * @capture_state: state of the capture buffers queue
401 * @output_state: state of the output buffers queue
402 * @src_bufs: information on allocated source buffers
403 * @dst_bufs: information on allocated destination buffers
404 * @sequence: counter for the sequence number for v4l2
405 * @dec_dst_flag: flags for buffers queued in the hardware
406 * @dec_src_buf_size: size of the buffer for source buffers in decoding
407 * @codec_mode: number of codec mode used by MFC hw
408 * @slice_interface: slice interface flag
409 * @loop_filter_mpeg4: loop filter for MPEG4 flag
410 * @display_delay: value of the display delay for H264
411 * @display_delay_enable: display delay for H264 enable flag
412 * @after_packed_pb: flag used to track buffer when stream is in
413 * Packed PB format
414 * @dpb_count: count of the DPB buffers required by MFC hw
415 * @total_dpb_count: count of DPB buffers with additional buffers
416 * requested by the application
417 * @ctx_buf: handle to the memory associated with this context
418 * @ctx_phys: address of the memory associated with this context
419 * @ctx_size: size of the memory associated with this context
420 * @desc_buf: description buffer for decoding handle
421 * @desc_phys: description buffer for decoding address
422 * @shm_alloc: handle for the shared memory buffer
423 * @shm: virtual address for the shared memory buffer
424 * @shm_ofs: address offset for shared memory
425 * @enc_params: encoding parameters for MFC
426 * @enc_dst_buf_size: size of the buffers for encoder output
427 * @frame_type: used to force the type of the next encoded frame
428 * @ref_queue: list of the reference buffers for encoding
429 * @ref_queue_cnt: number of the buffers in the reference list
430 * @c_ops: ops for encoding
431 * @ctrls: array of controls, used when adding controls to the
432 * v4l2 control framework
433 * @ctrl_handler: handler for v4l2 framework
434 */
435struct s5p_mfc_ctx {
436 struct s5p_mfc_dev *dev;
437 struct v4l2_fh fh;
438
439 int num;
440
441 int int_cond;
442 int int_type;
443 unsigned int int_err;
444 wait_queue_head_t queue;
445
446 struct s5p_mfc_fmt *src_fmt;
447 struct s5p_mfc_fmt *dst_fmt;
448
449 struct vb2_queue vq_src;
450 struct vb2_queue vq_dst;
451
452 struct list_head src_queue;
453 struct list_head dst_queue;
454
455 unsigned int src_queue_cnt;
456 unsigned int dst_queue_cnt;
457
458 enum s5p_mfc_inst_type type;
459 enum s5p_mfc_inst_state state;
460 int inst_no;
461
462 /* Image parameters */
463 int img_width;
464 int img_height;
465 int buf_width;
466 int buf_height;
467
468 int luma_size;
469 int chroma_size;
470 int mv_size;
471
472 unsigned long consumed_stream;
473
474 unsigned int dpb_flush_flag;
475
476 /* Buffers */
477 void *bank1_buf;
478 size_t bank1_phys;
479 size_t bank1_size;
480
481 void *bank2_buf;
482 size_t bank2_phys;
483 size_t bank2_size;
484
485 enum s5p_mfc_queue_state capture_state;
486 enum s5p_mfc_queue_state output_state;
487
488 struct s5p_mfc_buf src_bufs[MFC_MAX_BUFFERS];
489 int src_bufs_cnt;
490 struct s5p_mfc_buf dst_bufs[MFC_MAX_BUFFERS];
491 int dst_bufs_cnt;
492
493 unsigned int sequence;
494 unsigned long dec_dst_flag;
495 size_t dec_src_buf_size;
496
497 /* Control values */
498 int codec_mode;
499 int slice_interface;
500 int loop_filter_mpeg4;
501 int display_delay;
502 int display_delay_enable;
503 int after_packed_pb;
504
505 int dpb_count;
506 int total_dpb_count;
507
508 /* Buffers */
509 void *ctx_buf;
510 size_t ctx_phys;
511 size_t ctx_ofs;
512 size_t ctx_size;
513
514 void *desc_buf;
515 size_t desc_phys;
516
517
518 void *shm_alloc;
519 void *shm;
520 size_t shm_ofs;
521
522 struct s5p_mfc_enc_params enc_params;
523
524 size_t enc_dst_buf_size;
525
526 enum v4l2_mpeg_mfc51_video_force_frame_type force_frame_type;
527
528 struct list_head ref_queue;
529 unsigned int ref_queue_cnt;
530
531 struct s5p_mfc_codec_ops *c_ops;
532
533 struct v4l2_ctrl *ctrls[MFC_MAX_CTRLS];
534 struct v4l2_ctrl_handler ctrl_handler;
535};
536
537/*
538 * struct s5p_mfc_fmt - structure used to store information about pixelformats
539 * used by the MFC
540 */
541struct s5p_mfc_fmt {
542 char *name;
543 u32 fourcc;
544 u32 codec_mode;
545 enum s5p_mfc_fmt_type type;
546 u32 num_planes;
547};
548
549/**
550 * struct mfc_control - structure used to store information about MFC controls
551 * it is used to initialize the control framework.
552 */
553struct mfc_control {
554 __u32 id;
555 enum v4l2_ctrl_type type;
556 __u8 name[32]; /* Whatever */
557 __s32 minimum; /* Note signedness */
558 __s32 maximum;
559 __s32 step;
560 __u32 menu_skip_mask;
561 __s32 default_value;
562 __u32 flags;
563 __u32 reserved[2];
564 __u8 is_volatile;
565};
566
567
568#define fh_to_ctx(__fh) container_of(__fh, struct s5p_mfc_ctx, fh)
569#define ctrl_to_ctx(__ctrl) \
570 container_of((__ctrl)->handler, struct s5p_mfc_ctx, ctrl_handler)
571
572#endif /* S5P_MFC_COMMON_H_ */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c
new file mode 100644
index 000000000000..5f4da80051bb
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c
@@ -0,0 +1,343 @@
1/*
2 * linux/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c
3 *
4 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include <linux/delay.h>
14#include <linux/err.h>
15#include <linux/firmware.h>
16#include <linux/jiffies.h>
17#include <linux/sched.h>
18#include "regs-mfc.h"
19#include "s5p_mfc_cmd.h"
20#include "s5p_mfc_common.h"
21#include "s5p_mfc_debug.h"
22#include "s5p_mfc_intr.h"
23#include "s5p_mfc_pm.h"
24
25static void *s5p_mfc_bitproc_buf;
26static size_t s5p_mfc_bitproc_phys;
27static unsigned char *s5p_mfc_bitproc_virt;
28
29/* Allocate and load firmware */
30int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev)
31{
32 struct firmware *fw_blob;
33 size_t bank2_base_phys;
34 void *b_base;
35 int err;
36
37 /* Firmare has to be present as a separate file or compiled
38 * into kernel. */
39 mfc_debug_enter();
40 err = request_firmware((const struct firmware **)&fw_blob,
41 "s5pc110-mfc.fw", dev->v4l2_dev.dev);
42 if (err != 0) {
43 mfc_err("Firmware is not present in the /lib/firmware directory nor compiled in kernel\n");
44 return -EINVAL;
45 }
46 dev->fw_size = ALIGN(fw_blob->size, FIRMWARE_ALIGN);
47 if (s5p_mfc_bitproc_buf) {
48 mfc_err("Attempting to allocate firmware when it seems that it is already loaded\n");
49 release_firmware(fw_blob);
50 return -ENOMEM;
51 }
52 s5p_mfc_bitproc_buf = vb2_dma_contig_memops.alloc(
53 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], dev->fw_size);
54 if (IS_ERR(s5p_mfc_bitproc_buf)) {
55 s5p_mfc_bitproc_buf = 0;
56 mfc_err("Allocating bitprocessor buffer failed\n");
57 release_firmware(fw_blob);
58 return -ENOMEM;
59 }
60 s5p_mfc_bitproc_phys = s5p_mfc_mem_cookie(
61 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], s5p_mfc_bitproc_buf);
62 if (s5p_mfc_bitproc_phys & ((1 << MFC_BASE_ALIGN_ORDER) - 1)) {
63 mfc_err("The base memory for bank 1 is not aligned to 128KB\n");
64 vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
65 s5p_mfc_bitproc_phys = 0;
66 s5p_mfc_bitproc_buf = 0;
67 release_firmware(fw_blob);
68 return -EIO;
69 }
70 s5p_mfc_bitproc_virt = vb2_dma_contig_memops.vaddr(s5p_mfc_bitproc_buf);
71 if (!s5p_mfc_bitproc_virt) {
72 mfc_err("Bitprocessor memory remap failed\n");
73 vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
74 s5p_mfc_bitproc_phys = 0;
75 s5p_mfc_bitproc_buf = 0;
76 release_firmware(fw_blob);
77 return -EIO;
78 }
79 dev->bank1 = s5p_mfc_bitproc_phys;
80 b_base = vb2_dma_contig_memops.alloc(
81 dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], 1 << MFC_BANK2_ALIGN_ORDER);
82 if (IS_ERR(b_base)) {
83 vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
84 s5p_mfc_bitproc_phys = 0;
85 s5p_mfc_bitproc_buf = 0;
86 mfc_err("Allocating bank2 base failed\n");
87 release_firmware(fw_blob);
88 return -ENOMEM;
89 }
90 bank2_base_phys = s5p_mfc_mem_cookie(
91 dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], b_base);
92 vb2_dma_contig_memops.put(b_base);
93 if (bank2_base_phys & ((1 << MFC_BASE_ALIGN_ORDER) - 1)) {
94 mfc_err("The base memory for bank 2 is not aligned to 128KB\n");
95 vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
96 s5p_mfc_bitproc_phys = 0;
97 s5p_mfc_bitproc_buf = 0;
98 release_firmware(fw_blob);
99 return -EIO;
100 }
101 dev->bank2 = bank2_base_phys;
102 memcpy(s5p_mfc_bitproc_virt, fw_blob->data, fw_blob->size);
103 wmb();
104 release_firmware(fw_blob);
105 mfc_debug_leave();
106 return 0;
107}
108
109/* Reload firmware to MFC */
110int s5p_mfc_reload_firmware(struct s5p_mfc_dev *dev)
111{
112 struct firmware *fw_blob;
113 int err;
114
115 /* Firmare has to be present as a separate file or compiled
116 * into kernel. */
117 mfc_debug_enter();
118 err = request_firmware((const struct firmware **)&fw_blob,
119 "s5pc110-mfc.fw", dev->v4l2_dev.dev);
120 if (err != 0) {
121 mfc_err("Firmware is not present in the /lib/firmware directory nor compiled in kernel\n");
122 return -EINVAL;
123 }
124 if (fw_blob->size > dev->fw_size) {
125 mfc_err("MFC firmware is too big to be loaded\n");
126 release_firmware(fw_blob);
127 return -ENOMEM;
128 }
129 if (s5p_mfc_bitproc_buf == 0 || s5p_mfc_bitproc_phys == 0) {
130 mfc_err("MFC firmware is not allocated or was not mapped correctly\n");
131 release_firmware(fw_blob);
132 return -EINVAL;
133 }
134 memcpy(s5p_mfc_bitproc_virt, fw_blob->data, fw_blob->size);
135 wmb();
136 release_firmware(fw_blob);
137 mfc_debug_leave();
138 return 0;
139}
140
141/* Release firmware memory */
142int s5p_mfc_release_firmware(struct s5p_mfc_dev *dev)
143{
144 /* Before calling this function one has to make sure
145 * that MFC is no longer processing */
146 if (!s5p_mfc_bitproc_buf)
147 return -EINVAL;
148 vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
149 s5p_mfc_bitproc_virt = 0;
150 s5p_mfc_bitproc_phys = 0;
151 s5p_mfc_bitproc_buf = 0;
152 return 0;
153}
154
155/* Reset the device */
156int s5p_mfc_reset(struct s5p_mfc_dev *dev)
157{
158 unsigned int mc_status;
159 unsigned long timeout;
160
161 mfc_debug_enter();
162 /* Stop procedure */
163 /* reset RISC */
164 mfc_write(dev, 0x3f6, S5P_FIMV_SW_RESET);
165 /* All reset except for MC */
166 mfc_write(dev, 0x3e2, S5P_FIMV_SW_RESET);
167 mdelay(10);
168
169 timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT);
170 /* Check MC status */
171 do {
172 if (time_after(jiffies, timeout)) {
173 mfc_err("Timeout while resetting MFC\n");
174 return -EIO;
175 }
176
177 mc_status = mfc_read(dev, S5P_FIMV_MC_STATUS);
178
179 } while (mc_status & 0x3);
180
181 mfc_write(dev, 0x0, S5P_FIMV_SW_RESET);
182 mfc_write(dev, 0x3fe, S5P_FIMV_SW_RESET);
183 mfc_debug_leave();
184 return 0;
185}
186
187static inline void s5p_mfc_init_memctrl(struct s5p_mfc_dev *dev)
188{
189 mfc_write(dev, dev->bank1, S5P_FIMV_MC_DRAMBASE_ADR_A);
190 mfc_write(dev, dev->bank2, S5P_FIMV_MC_DRAMBASE_ADR_B);
191 mfc_debug(2, "Bank1: %08x, Bank2: %08x\n", dev->bank1, dev->bank2);
192}
193
194static inline void s5p_mfc_clear_cmds(struct s5p_mfc_dev *dev)
195{
196 mfc_write(dev, 0xffffffff, S5P_FIMV_SI_CH0_INST_ID);
197 mfc_write(dev, 0xffffffff, S5P_FIMV_SI_CH1_INST_ID);
198 mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD);
199 mfc_write(dev, 0, S5P_FIMV_HOST2RISC_CMD);
200}
201
202/* Initialize hardware */
203int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
204{
205 unsigned int ver;
206 int ret;
207
208 mfc_debug_enter();
209 if (!s5p_mfc_bitproc_buf)
210 return -EINVAL;
211
212 /* 0. MFC reset */
213 mfc_debug(2, "MFC reset..\n");
214 s5p_mfc_clock_on();
215 ret = s5p_mfc_reset(dev);
216 if (ret) {
217 mfc_err("Failed to reset MFC - timeout\n");
218 return ret;
219 }
220 mfc_debug(2, "Done MFC reset..\n");
221 /* 1. Set DRAM base Addr */
222 s5p_mfc_init_memctrl(dev);
223 /* 2. Initialize registers of channel I/F */
224 s5p_mfc_clear_cmds(dev);
225 /* 3. Release reset signal to the RISC */
226 s5p_mfc_clean_dev_int_flags(dev);
227 mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET);
228 mfc_debug(2, "Will now wait for completion of firmware transfer\n");
229 if (s5p_mfc_wait_for_done_dev(dev, S5P_FIMV_R2H_CMD_FW_STATUS_RET)) {
230 mfc_err("Failed to load firmware\n");
231 s5p_mfc_reset(dev);
232 s5p_mfc_clock_off();
233 return -EIO;
234 }
235 s5p_mfc_clean_dev_int_flags(dev);
236 /* 4. Initialize firmware */
237 ret = s5p_mfc_sys_init_cmd(dev);
238 if (ret) {
239 mfc_err("Failed to send command to MFC - timeout\n");
240 s5p_mfc_reset(dev);
241 s5p_mfc_clock_off();
242 return ret;
243 }
244 mfc_debug(2, "Ok, now will write a command to init the system\n");
245 if (s5p_mfc_wait_for_done_dev(dev, S5P_FIMV_R2H_CMD_SYS_INIT_RET)) {
246 mfc_err("Failed to load firmware\n");
247 s5p_mfc_reset(dev);
248 s5p_mfc_clock_off();
249 return -EIO;
250 }
251 dev->int_cond = 0;
252 if (dev->int_err != 0 || dev->int_type !=
253 S5P_FIMV_R2H_CMD_SYS_INIT_RET) {
254 /* Failure. */
255 mfc_err("Failed to init firmware - error: %d int: %d\n",
256 dev->int_err, dev->int_type);
257 s5p_mfc_reset(dev);
258 s5p_mfc_clock_off();
259 return -EIO;
260 }
261 ver = mfc_read(dev, S5P_FIMV_FW_VERSION);
262 mfc_debug(2, "MFC F/W version : %02xyy, %02xmm, %02xdd\n",
263 (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
264 s5p_mfc_clock_off();
265 mfc_debug_leave();
266 return 0;
267}
268
269
270int s5p_mfc_sleep(struct s5p_mfc_dev *dev)
271{
272 int ret;
273
274 mfc_debug_enter();
275 s5p_mfc_clock_on();
276 s5p_mfc_clean_dev_int_flags(dev);
277 ret = s5p_mfc_sleep_cmd(dev);
278 if (ret) {
279 mfc_err("Failed to send command to MFC - timeout\n");
280 return ret;
281 }
282 if (s5p_mfc_wait_for_done_dev(dev, S5P_FIMV_R2H_CMD_SLEEP_RET)) {
283 mfc_err("Failed to sleep\n");
284 return -EIO;
285 }
286 s5p_mfc_clock_off();
287 dev->int_cond = 0;
288 if (dev->int_err != 0 || dev->int_type !=
289 S5P_FIMV_R2H_CMD_SLEEP_RET) {
290 /* Failure. */
291 mfc_err("Failed to sleep - error: %d int: %d\n", dev->int_err,
292 dev->int_type);
293 return -EIO;
294 }
295 mfc_debug_leave();
296 return ret;
297}
298
299int s5p_mfc_wakeup(struct s5p_mfc_dev *dev)
300{
301 int ret;
302
303 mfc_debug_enter();
304 /* 0. MFC reset */
305 mfc_debug(2, "MFC reset..\n");
306 s5p_mfc_clock_on();
307 ret = s5p_mfc_reset(dev);
308 if (ret) {
309 mfc_err("Failed to reset MFC - timeout\n");
310 return ret;
311 }
312 mfc_debug(2, "Done MFC reset..\n");
313 /* 1. Set DRAM base Addr */
314 s5p_mfc_init_memctrl(dev);
315 /* 2. Initialize registers of channel I/F */
316 s5p_mfc_clear_cmds(dev);
317 s5p_mfc_clean_dev_int_flags(dev);
318 /* 3. Initialize firmware */
319 ret = s5p_mfc_wakeup_cmd(dev);
320 if (ret) {
321 mfc_err("Failed to send command to MFC - timeout\n");
322 return ret;
323 }
324 /* 4. Release reset signal to the RISC */
325 mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET);
326 mfc_debug(2, "Ok, now will write a command to wakeup the system\n");
327 if (s5p_mfc_wait_for_done_dev(dev, S5P_FIMV_R2H_CMD_WAKEUP_RET)) {
328 mfc_err("Failed to load firmware\n");
329 return -EIO;
330 }
331 s5p_mfc_clock_off();
332 dev->int_cond = 0;
333 if (dev->int_err != 0 || dev->int_type !=
334 S5P_FIMV_R2H_CMD_WAKEUP_RET) {
335 /* Failure. */
336 mfc_err("Failed to wakeup - error: %d int: %d\n", dev->int_err,
337 dev->int_type);
338 return -EIO;
339 }
340 mfc_debug_leave();
341 return 0;
342}
343
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.h b/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.h
new file mode 100644
index 000000000000..61dc23b7ee5a
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.h
@@ -0,0 +1,29 @@
1/*
2 * linux/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.h
3 *
4 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#ifndef S5P_MFC_CTRL_H
14#define S5P_MFC_CTRL_H
15
16#include "s5p_mfc_common.h"
17
18int s5p_mfc_release_firmware(struct s5p_mfc_dev *dev);
19int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev);
20int s5p_mfc_reload_firmware(struct s5p_mfc_dev *dev);
21
22int s5p_mfc_init_hw(struct s5p_mfc_dev *dev);
23
24int s5p_mfc_sleep(struct s5p_mfc_dev *dev);
25int s5p_mfc_wakeup(struct s5p_mfc_dev *dev);
26
27int s5p_mfc_reset(struct s5p_mfc_dev *dev);
28
29#endif /* S5P_MFC_CTRL_H */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_debug.h b/drivers/media/video/s5p-mfc/s5p_mfc_debug.h
new file mode 100644
index 000000000000..ecb8616a492a
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_debug.h
@@ -0,0 +1,48 @@
1/*
2 * drivers/media/video/samsung/mfc5/s5p_mfc_debug.h
3 *
4 * Header file for Samsung MFC (Multi Function Codec - FIMV) driver
5 * This file contains debug macros
6 *
7 * Kamil Debski, Copyright (c) 2011 Samsung Electronics
8 * http://www.samsung.com/
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#ifndef S5P_MFC_DEBUG_H_
16#define S5P_MFC_DEBUG_H_
17
18#define DEBUG
19
20#ifdef DEBUG
21extern int debug;
22
23#define mfc_debug(level, fmt, args...) \
24 do { \
25 if (debug >= level) \
26 printk(KERN_DEBUG "%s:%d: " fmt, \
27 __func__, __LINE__, ##args); \
28 } while (0)
29#else
30#define mfc_debug(level, fmt, args...)
31#endif
32
33#define mfc_debug_enter() mfc_debug(5, "enter")
34#define mfc_debug_leave() mfc_debug(5, "leave")
35
36#define mfc_err(fmt, args...) \
37 do { \
38 printk(KERN_ERR "%s:%d: " fmt, \
39 __func__, __LINE__, ##args); \
40 } while (0)
41
42#define mfc_info(fmt, args...) \
43 do { \
44 printk(KERN_INFO "%s:%d: " fmt, \
45 __func__, __LINE__, ##args); \
46 } while (0)
47
48#endif /* S5P_MFC_DEBUG_H_ */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
new file mode 100644
index 000000000000..b2c5052a9c41
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
@@ -0,0 +1,1036 @@
1/*
2 * linux/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 * Kamil Debski, <k.debski@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/clk.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/module.h>
18#include <linux/platform_device.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/version.h>
22#include <linux/videodev2.h>
23#include <linux/workqueue.h>
24#include <media/v4l2-ctrls.h>
25#include <media/videobuf2-core.h>
26#include "regs-mfc.h"
27#include "s5p_mfc_common.h"
28#include "s5p_mfc_debug.h"
29#include "s5p_mfc_dec.h"
30#include "s5p_mfc_intr.h"
31#include "s5p_mfc_opr.h"
32#include "s5p_mfc_pm.h"
33#include "s5p_mfc_shm.h"
34
35static struct s5p_mfc_fmt formats[] = {
36 {
37 .name = "4:2:0 2 Planes 64x32 Tiles",
38 .fourcc = V4L2_PIX_FMT_NV12MT,
39 .codec_mode = S5P_FIMV_CODEC_NONE,
40 .type = MFC_FMT_RAW,
41 .num_planes = 2,
42 },
43 {
44 .name = "4:2:0 2 Planes",
45 .fourcc = V4L2_PIX_FMT_NV12M,
46 .codec_mode = S5P_FIMV_CODEC_NONE,
47 .type = MFC_FMT_RAW,
48 .num_planes = 2,
49 },
50 {
51 .name = "H264 Encoded Stream",
52 .fourcc = V4L2_PIX_FMT_H264,
53 .codec_mode = S5P_FIMV_CODEC_H264_DEC,
54 .type = MFC_FMT_DEC,
55 .num_planes = 1,
56 },
57 {
58 .name = "H263 Encoded Stream",
59 .fourcc = V4L2_PIX_FMT_H263,
60 .codec_mode = S5P_FIMV_CODEC_H263_DEC,
61 .type = MFC_FMT_DEC,
62 .num_planes = 1,
63 },
64 {
65 .name = "MPEG1 Encoded Stream",
66 .fourcc = V4L2_PIX_FMT_MPEG1,
67 .codec_mode = S5P_FIMV_CODEC_MPEG2_DEC,
68 .type = MFC_FMT_DEC,
69 .num_planes = 1,
70 },
71 {
72 .name = "MPEG2 Encoded Stream",
73 .fourcc = V4L2_PIX_FMT_MPEG2,
74 .codec_mode = S5P_FIMV_CODEC_MPEG2_DEC,
75 .type = MFC_FMT_DEC,
76 .num_planes = 1,
77 },
78 {
79 .name = "MPEG4 Encoded Stream",
80 .fourcc = V4L2_PIX_FMT_MPEG4,
81 .codec_mode = S5P_FIMV_CODEC_MPEG4_DEC,
82 .type = MFC_FMT_DEC,
83 .num_planes = 1,
84 },
85 {
86 .name = "XviD Encoded Stream",
87 .fourcc = V4L2_PIX_FMT_XVID,
88 .codec_mode = S5P_FIMV_CODEC_MPEG4_DEC,
89 .type = MFC_FMT_DEC,
90 .num_planes = 1,
91 },
92 {
93 .name = "VC1 Encoded Stream",
94 .fourcc = V4L2_PIX_FMT_VC1_ANNEX_G,
95 .codec_mode = S5P_FIMV_CODEC_VC1_DEC,
96 .type = MFC_FMT_DEC,
97 .num_planes = 1,
98 },
99 {
100 .name = "VC1 RCV Encoded Stream",
101 .fourcc = V4L2_PIX_FMT_VC1_ANNEX_L,
102 .codec_mode = S5P_FIMV_CODEC_VC1RCV_DEC,
103 .type = MFC_FMT_DEC,
104 .num_planes = 1,
105 },
106};
107
108#define NUM_FORMATS ARRAY_SIZE(formats)
109
110/* Find selected format description */
111static struct s5p_mfc_fmt *find_format(struct v4l2_format *f, unsigned int t)
112{
113 unsigned int i;
114
115 for (i = 0; i < NUM_FORMATS; i++) {
116 if (formats[i].fourcc == f->fmt.pix_mp.pixelformat &&
117 formats[i].type == t)
118 return &formats[i];
119 }
120 return NULL;
121}
122
123static struct mfc_control controls[] = {
124 {
125 .id = V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY,
126 .type = V4L2_CTRL_TYPE_INTEGER,
127 .name = "H264 Display Delay",
128 .minimum = 0,
129 .maximum = 16383,
130 .step = 1,
131 .default_value = 0,
132 },
133 {
134 .id = V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE,
135 .type = V4L2_CTRL_TYPE_BOOLEAN,
136 .name = "H264 Display Delay Enable",
137 .minimum = 0,
138 .maximum = 1,
139 .step = 1,
140 .default_value = 0,
141 },
142 {
143 .id = V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER,
144 .type = V4L2_CTRL_TYPE_BOOLEAN,
145 .name = "Mpeg4 Loop Filter Enable",
146 .minimum = 0,
147 .maximum = 1,
148 .step = 1,
149 .default_value = 0,
150 },
151 {
152 .id = V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE,
153 .type = V4L2_CTRL_TYPE_BOOLEAN,
154 .name = "Slice Interface Enable",
155 .minimum = 0,
156 .maximum = 1,
157 .step = 1,
158 .default_value = 0,
159 },
160 {
161 .id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE,
162 .type = V4L2_CTRL_TYPE_INTEGER,
163 .name = "Minimum number of cap bufs",
164 .minimum = 1,
165 .maximum = 32,
166 .step = 1,
167 .default_value = 1,
168 .is_volatile = 1,
169 },
170};
171
172#define NUM_CTRLS ARRAY_SIZE(controls)
173
174/* Check whether a context should be run on hardware */
175static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
176{
177 /* Context is to parse header */
178 if (ctx->src_queue_cnt >= 1 && ctx->state == MFCINST_GOT_INST)
179 return 1;
180 /* Context is to decode a frame */
181 if (ctx->src_queue_cnt >= 1 &&
182 ctx->state == MFCINST_RUNNING &&
183 ctx->dst_queue_cnt >= ctx->dpb_count)
184 return 1;
185 /* Context is to return last frame */
186 if (ctx->state == MFCINST_FINISHING &&
187 ctx->dst_queue_cnt >= ctx->dpb_count)
188 return 1;
189 /* Context is to set buffers */
190 if (ctx->src_queue_cnt >= 1 &&
191 ctx->state == MFCINST_HEAD_PARSED &&
192 ctx->capture_state == QUEUE_BUFS_MMAPED)
193 return 1;
194 /* Resolution change */
195 if ((ctx->state == MFCINST_RES_CHANGE_INIT ||
196 ctx->state == MFCINST_RES_CHANGE_FLUSH) &&
197 ctx->dst_queue_cnt >= ctx->dpb_count)
198 return 1;
199 if (ctx->state == MFCINST_RES_CHANGE_END &&
200 ctx->src_queue_cnt >= 1)
201 return 1;
202 mfc_debug(2, "ctx is not ready\n");
203 return 0;
204}
205
206static struct s5p_mfc_codec_ops decoder_codec_ops = {
207 .pre_seq_start = NULL,
208 .post_seq_start = NULL,
209 .pre_frame_start = NULL,
210 .post_frame_start = NULL,
211};
212
213/* Query capabilities of the device */
214static int vidioc_querycap(struct file *file, void *priv,
215 struct v4l2_capability *cap)
216{
217 struct s5p_mfc_dev *dev = video_drvdata(file);
218
219 strncpy(cap->driver, dev->plat_dev->name, sizeof(cap->driver) - 1);
220 strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
221 cap->bus_info[0] = 0;
222 cap->version = KERNEL_VERSION(1, 0, 0);
223 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
224 | V4L2_CAP_STREAMING;
225 return 0;
226}
227
228/* Enumerate format */
229static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool mplane, bool out)
230{
231 struct s5p_mfc_fmt *fmt;
232 int i, j = 0;
233
234 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
235 if (mplane && formats[i].num_planes == 1)
236 continue;
237 else if (!mplane && formats[i].num_planes > 1)
238 continue;
239 if (out && formats[i].type != MFC_FMT_DEC)
240 continue;
241 else if (!out && formats[i].type != MFC_FMT_RAW)
242 continue;
243
244 if (j == f->index)
245 break;
246 ++j;
247 }
248 if (i == ARRAY_SIZE(formats))
249 return -EINVAL;
250 fmt = &formats[i];
251 strlcpy(f->description, fmt->name, sizeof(f->description));
252 f->pixelformat = fmt->fourcc;
253 return 0;
254}
255
256static int vidioc_enum_fmt_vid_cap(struct file *file, void *pirv,
257 struct v4l2_fmtdesc *f)
258{
259 return vidioc_enum_fmt(f, false, false);
260}
261
262static int vidioc_enum_fmt_vid_cap_mplane(struct file *file, void *pirv,
263 struct v4l2_fmtdesc *f)
264{
265 return vidioc_enum_fmt(f, true, false);
266}
267
268static int vidioc_enum_fmt_vid_out(struct file *file, void *prov,
269 struct v4l2_fmtdesc *f)
270{
271 return vidioc_enum_fmt(f, false, true);
272}
273
274static int vidioc_enum_fmt_vid_out_mplane(struct file *file, void *prov,
275 struct v4l2_fmtdesc *f)
276{
277 return vidioc_enum_fmt(f, true, true);
278}
279
280/* Get format */
281static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
282{
283 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
284 struct v4l2_pix_format_mplane *pix_mp;
285
286 mfc_debug_enter();
287 pix_mp = &f->fmt.pix_mp;
288 if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
289 (ctx->state == MFCINST_GOT_INST || ctx->state ==
290 MFCINST_RES_CHANGE_END)) {
291 /* If the MFC is parsing the header,
292 * so wait until it is finished */
293 s5p_mfc_clean_ctx_int_flags(ctx);
294 s5p_mfc_wait_for_done_ctx(ctx, S5P_FIMV_R2H_CMD_SEQ_DONE_RET,
295 0);
296 }
297 if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
298 ctx->state >= MFCINST_HEAD_PARSED &&
299 ctx->state < MFCINST_ABORT) {
300 /* This is run on CAPTURE (decode output) */
301 /* Width and height are set to the dimensions
302 of the movie, the buffer is bigger and
303 further processing stages should crop to this
304 rectangle. */
305 pix_mp->width = ctx->buf_width;
306 pix_mp->height = ctx->buf_height;
307 pix_mp->field = V4L2_FIELD_NONE;
308 pix_mp->num_planes = 2;
309 /* Set pixelformat to the format in which MFC
310 outputs the decoded frame */
311 pix_mp->pixelformat = V4L2_PIX_FMT_NV12MT;
312 pix_mp->plane_fmt[0].bytesperline = ctx->buf_width;
313 pix_mp->plane_fmt[0].sizeimage = ctx->luma_size;
314 pix_mp->plane_fmt[1].bytesperline = ctx->buf_width;
315 pix_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
316 } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
317 /* This is run on OUTPUT
318 The buffer contains compressed image
319 so width and height have no meaning */
320 pix_mp->width = 0;
321 pix_mp->height = 0;
322 pix_mp->field = V4L2_FIELD_NONE;
323 pix_mp->plane_fmt[0].bytesperline = ctx->dec_src_buf_size;
324 pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size;
325 pix_mp->pixelformat = ctx->src_fmt->fourcc;
326 pix_mp->num_planes = ctx->src_fmt->num_planes;
327 } else {
328 mfc_err("Format could not be read\n");
329 mfc_debug(2, "%s-- with error\n", __func__);
330 return -EINVAL;
331 }
332 mfc_debug_leave();
333 return 0;
334}
335
336/* Try format */
337static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
338{
339 struct s5p_mfc_fmt *fmt;
340
341 if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
342 mfc_err("This node supports decoding only\n");
343 return -EINVAL;
344 }
345 fmt = find_format(f, MFC_FMT_DEC);
346 if (!fmt) {
347 mfc_err("Unsupported format\n");
348 return -EINVAL;
349 }
350 if (fmt->type != MFC_FMT_DEC) {
351 mfc_err("\n");
352 return -EINVAL;
353 }
354 return 0;
355}
356
357/* Set format */
358static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
359{
360 struct s5p_mfc_dev *dev = video_drvdata(file);
361 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
362 int ret = 0;
363 struct s5p_mfc_fmt *fmt;
364 struct v4l2_pix_format_mplane *pix_mp;
365
366 mfc_debug_enter();
367 ret = vidioc_try_fmt(file, priv, f);
368 pix_mp = &f->fmt.pix_mp;
369 if (ret)
370 return ret;
371 if (ctx->vq_src.streaming || ctx->vq_dst.streaming) {
372 v4l2_err(&dev->v4l2_dev, "%s queue busy\n", __func__);
373 ret = -EBUSY;
374 goto out;
375 }
376 fmt = find_format(f, MFC_FMT_DEC);
377 if (!fmt || fmt->codec_mode == S5P_FIMV_CODEC_NONE) {
378 mfc_err("Unknown codec\n");
379 ret = -EINVAL;
380 goto out;
381 }
382 if (fmt->type != MFC_FMT_DEC) {
383 mfc_err("Wrong format selected, you should choose "
384 "format for decoding\n");
385 ret = -EINVAL;
386 goto out;
387 }
388 ctx->src_fmt = fmt;
389 ctx->codec_mode = fmt->codec_mode;
390 mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode);
391 pix_mp->height = 0;
392 pix_mp->width = 0;
393 if (pix_mp->plane_fmt[0].sizeimage)
394 ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage;
395 else
396 pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size =
397 DEF_CPB_SIZE;
398 pix_mp->plane_fmt[0].bytesperline = 0;
399 ctx->state = MFCINST_INIT;
400out:
401 mfc_debug_leave();
402 return ret;
403}
404
405/* Reqeust buffers */
406static int vidioc_reqbufs(struct file *file, void *priv,
407 struct v4l2_requestbuffers *reqbufs)
408{
409 struct s5p_mfc_dev *dev = video_drvdata(file);
410 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
411 int ret = 0;
412 unsigned long flags;
413
414 if (reqbufs->memory != V4L2_MEMORY_MMAP) {
415 mfc_err("Only V4L2_MEMORY_MAP is supported\n");
416 return -EINVAL;
417 }
418 if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
419 /* Can only request buffers after an instance has been opened.*/
420 if (ctx->state == MFCINST_INIT) {
421 ctx->src_bufs_cnt = 0;
422 if (reqbufs->count == 0) {
423 mfc_debug(2, "Freeing buffers\n");
424 s5p_mfc_clock_on();
425 ret = vb2_reqbufs(&ctx->vq_src, reqbufs);
426 s5p_mfc_clock_off();
427 return ret;
428 }
429 /* Decoding */
430 if (ctx->output_state != QUEUE_FREE) {
431 mfc_err("Bufs have already been requested\n");
432 return -EINVAL;
433 }
434 s5p_mfc_clock_on();
435 ret = vb2_reqbufs(&ctx->vq_src, reqbufs);
436 s5p_mfc_clock_off();
437 if (ret) {
438 mfc_err("vb2_reqbufs on output failed\n");
439 return ret;
440 }
441 mfc_debug(2, "vb2_reqbufs: %d\n", ret);
442 ctx->output_state = QUEUE_BUFS_REQUESTED;
443 }
444 } else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
445 ctx->dst_bufs_cnt = 0;
446 if (reqbufs->count == 0) {
447 mfc_debug(2, "Freeing buffers\n");
448 s5p_mfc_clock_on();
449 ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
450 s5p_mfc_clock_off();
451 return ret;
452 }
453 if (ctx->capture_state != QUEUE_FREE) {
454 mfc_err("Bufs have already been requested\n");
455 return -EINVAL;
456 }
457 ctx->capture_state = QUEUE_BUFS_REQUESTED;
458 s5p_mfc_clock_on();
459 ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
460 s5p_mfc_clock_off();
461 if (ret) {
462 mfc_err("vb2_reqbufs on capture failed\n");
463 return ret;
464 }
465 if (reqbufs->count < ctx->dpb_count) {
466 mfc_err("Not enough buffers allocated\n");
467 reqbufs->count = 0;
468 s5p_mfc_clock_on();
469 ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
470 s5p_mfc_clock_off();
471 return -ENOMEM;
472 }
473 ctx->total_dpb_count = reqbufs->count;
474 ret = s5p_mfc_alloc_codec_buffers(ctx);
475 if (ret) {
476 mfc_err("Failed to allocate decoding buffers\n");
477 reqbufs->count = 0;
478 s5p_mfc_clock_on();
479 ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
480 s5p_mfc_clock_off();
481 return -ENOMEM;
482 }
483 if (ctx->dst_bufs_cnt == ctx->total_dpb_count) {
484 ctx->capture_state = QUEUE_BUFS_MMAPED;
485 } else {
486 mfc_err("Not all buffers passed to buf_init\n");
487 reqbufs->count = 0;
488 s5p_mfc_clock_on();
489 ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
490 s5p_mfc_release_codec_buffers(ctx);
491 s5p_mfc_clock_off();
492 return -ENOMEM;
493 }
494 if (s5p_mfc_ctx_ready(ctx)) {
495 spin_lock_irqsave(&dev->condlock, flags);
496 set_bit(ctx->num, &dev->ctx_work_bits);
497 spin_unlock_irqrestore(&dev->condlock, flags);
498 }
499 s5p_mfc_try_run(dev);
500 s5p_mfc_wait_for_done_ctx(ctx,
501 S5P_FIMV_R2H_CMD_INIT_BUFFERS_RET, 0);
502 }
503 return ret;
504}
505
506/* Query buffer */
507static int vidioc_querybuf(struct file *file, void *priv,
508 struct v4l2_buffer *buf)
509{
510 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
511 int ret;
512 int i;
513
514 if (buf->memory != V4L2_MEMORY_MMAP) {
515 mfc_err("Only mmaped buffers can be used\n");
516 return -EINVAL;
517 }
518 mfc_debug(2, "State: %d, buf->type: %d\n", ctx->state, buf->type);
519 if (ctx->state == MFCINST_INIT &&
520 buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
521 ret = vb2_querybuf(&ctx->vq_src, buf);
522 } else if (ctx->state == MFCINST_RUNNING &&
523 buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
524 ret = vb2_querybuf(&ctx->vq_dst, buf);
525 for (i = 0; i < buf->length; i++)
526 buf->m.planes[i].m.mem_offset += DST_QUEUE_OFF_BASE;
527 } else {
528 mfc_err("vidioc_querybuf called in an inappropriate state\n");
529 ret = -EINVAL;
530 }
531 mfc_debug_leave();
532 return ret;
533}
534
535/* Queue a buffer */
536static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
537{
538 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
539
540 if (ctx->state == MFCINST_ERROR) {
541 mfc_err("Call on QBUF after unrecoverable error\n");
542 return -EIO;
543 }
544 if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
545 return vb2_qbuf(&ctx->vq_src, buf);
546 else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
547 return vb2_qbuf(&ctx->vq_dst, buf);
548 return -EINVAL;
549}
550
551/* Dequeue a buffer */
552static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
553{
554 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
555
556 if (ctx->state == MFCINST_ERROR) {
557 mfc_err("Call on DQBUF after unrecoverable error\n");
558 return -EIO;
559 }
560 if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
561 return vb2_dqbuf(&ctx->vq_src, buf, file->f_flags & O_NONBLOCK);
562 else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
563 return vb2_dqbuf(&ctx->vq_dst, buf, file->f_flags & O_NONBLOCK);
564 return -EINVAL;
565}
566
567/* Stream on */
568static int vidioc_streamon(struct file *file, void *priv,
569 enum v4l2_buf_type type)
570{
571 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
572 struct s5p_mfc_dev *dev = ctx->dev;
573 unsigned long flags;
574 int ret = -EINVAL;
575
576 mfc_debug_enter();
577 if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
578
579 if (ctx->state == MFCINST_INIT) {
580 ctx->dst_bufs_cnt = 0;
581 ctx->src_bufs_cnt = 0;
582 ctx->capture_state = QUEUE_FREE;
583 ctx->output_state = QUEUE_FREE;
584 s5p_mfc_alloc_instance_buffer(ctx);
585 s5p_mfc_alloc_dec_temp_buffers(ctx);
586 spin_lock_irqsave(&dev->condlock, flags);
587 set_bit(ctx->num, &dev->ctx_work_bits);
588 spin_unlock_irqrestore(&dev->condlock, flags);
589 s5p_mfc_clean_ctx_int_flags(ctx);
590 s5p_mfc_try_run(dev);
591
592 if (s5p_mfc_wait_for_done_ctx(ctx,
593 S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET, 0)) {
594 /* Error or timeout */
595 mfc_err("Error getting instance from hardware\n");
596 s5p_mfc_release_instance_buffer(ctx);
597 s5p_mfc_release_dec_desc_buffer(ctx);
598 return -EIO;
599 }
600 mfc_debug(2, "Got instance number: %d\n", ctx->inst_no);
601 }
602 ret = vb2_streamon(&ctx->vq_src, type);
603 }
604 else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
605 ret = vb2_streamon(&ctx->vq_dst, type);
606 mfc_debug_leave();
607 return ret;
608}
609
610/* Stream off, which equals to a pause */
611static int vidioc_streamoff(struct file *file, void *priv,
612 enum v4l2_buf_type type)
613{
614 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
615
616 if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
617 return vb2_streamoff(&ctx->vq_src, type);
618 else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
619 return vb2_streamoff(&ctx->vq_dst, type);
620 return -EINVAL;
621}
622
623/* Set controls - v4l2 control framework */
624static int s5p_mfc_dec_s_ctrl(struct v4l2_ctrl *ctrl)
625{
626 struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl);
627
628 switch (ctrl->id) {
629 case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY:
630 ctx->loop_filter_mpeg4 = ctrl->val;
631 break;
632 case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE:
633 ctx->display_delay_enable = ctrl->val;
634 break;
635 case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
636 ctx->display_delay = ctrl->val;
637 break;
638 case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:
639 ctx->slice_interface = ctrl->val;
640 break;
641 default:
642 mfc_err("Invalid control 0x%08x\n", ctrl->id);
643 return -EINVAL;
644 }
645 return 0;
646}
647
648static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl)
649{
650 struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl);
651 struct s5p_mfc_dev *dev = ctx->dev;
652
653 switch (ctrl->id) {
654 case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
655 if (ctx->state >= MFCINST_HEAD_PARSED &&
656 ctx->state < MFCINST_ABORT) {
657 ctrl->val = ctx->dpb_count;
658 break;
659 } else if (ctx->state != MFCINST_INIT) {
660 v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
661 return -EINVAL;
662 }
663 /* Should wait for the header to be parsed */
664 s5p_mfc_clean_ctx_int_flags(ctx);
665 s5p_mfc_wait_for_done_ctx(ctx,
666 S5P_FIMV_R2H_CMD_SEQ_DONE_RET, 0);
667 if (ctx->state >= MFCINST_HEAD_PARSED &&
668 ctx->state < MFCINST_ABORT) {
669 ctrl->val = ctx->dpb_count;
670 } else {
671 v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
672 return -EINVAL;
673 }
674 break;
675 }
676 return 0;
677}
678
679
680static const struct v4l2_ctrl_ops s5p_mfc_dec_ctrl_ops = {
681 .s_ctrl = s5p_mfc_dec_s_ctrl,
682 .g_volatile_ctrl = s5p_mfc_dec_g_v_ctrl,
683};
684
685/* Get cropping information */
686static int vidioc_g_crop(struct file *file, void *priv,
687 struct v4l2_crop *cr)
688{
689 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
690 u32 left, right, top, bottom;
691
692 if (ctx->state != MFCINST_HEAD_PARSED &&
693 ctx->state != MFCINST_RUNNING && ctx->state != MFCINST_FINISHING
694 && ctx->state != MFCINST_FINISHED) {
695 mfc_err("Cannont set crop\n");
696 return -EINVAL;
697 }
698 if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_H264) {
699 left = s5p_mfc_read_shm(ctx, CROP_INFO_H);
700 right = left >> S5P_FIMV_SHARED_CROP_RIGHT_SHIFT;
701 left = left & S5P_FIMV_SHARED_CROP_LEFT_MASK;
702 top = s5p_mfc_read_shm(ctx, CROP_INFO_V);
703 bottom = top >> S5P_FIMV_SHARED_CROP_BOTTOM_SHIFT;
704 top = top & S5P_FIMV_SHARED_CROP_TOP_MASK;
705 cr->c.left = left;
706 cr->c.top = top;
707 cr->c.width = ctx->img_width - left - right;
708 cr->c.height = ctx->img_height - top - bottom;
709 mfc_debug(2, "Cropping info [h264]: l=%d t=%d "
710 "w=%d h=%d (r=%d b=%d fw=%d fh=%d\n", left, top,
711 cr->c.width, cr->c.height, right, bottom,
712 ctx->buf_width, ctx->buf_height);
713 } else {
714 cr->c.left = 0;
715 cr->c.top = 0;
716 cr->c.width = ctx->img_width;
717 cr->c.height = ctx->img_height;
718 mfc_debug(2, "Cropping info: w=%d h=%d fw=%d "
719 "fh=%d\n", cr->c.width, cr->c.height, ctx->buf_width,
720 ctx->buf_height);
721 }
722 return 0;
723}
724
725/* v4l2_ioctl_ops */
726static const struct v4l2_ioctl_ops s5p_mfc_dec_ioctl_ops = {
727 .vidioc_querycap = vidioc_querycap,
728 .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
729 .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap_mplane,
730 .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
731 .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out_mplane,
732 .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt,
733 .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt,
734 .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt,
735 .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt,
736 .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt,
737 .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt,
738 .vidioc_reqbufs = vidioc_reqbufs,
739 .vidioc_querybuf = vidioc_querybuf,
740 .vidioc_qbuf = vidioc_qbuf,
741 .vidioc_dqbuf = vidioc_dqbuf,
742 .vidioc_streamon = vidioc_streamon,
743 .vidioc_streamoff = vidioc_streamoff,
744 .vidioc_g_crop = vidioc_g_crop,
745};
746
747static int s5p_mfc_queue_setup(struct vb2_queue *vq, unsigned int *buf_count,
748 unsigned int *plane_count, unsigned long psize[],
749 void *allocators[])
750{
751 struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
752
753 /* Video output for decoding (source)
754 * this can be set after getting an instance */
755 if (ctx->state == MFCINST_INIT &&
756 vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
757 /* A single plane is required for input */
758 *plane_count = 1;
759 if (*buf_count < 1)
760 *buf_count = 1;
761 if (*buf_count > MFC_MAX_BUFFERS)
762 *buf_count = MFC_MAX_BUFFERS;
763 /* Video capture for decoding (destination)
764 * this can be set after the header was parsed */
765 } else if (ctx->state == MFCINST_HEAD_PARSED &&
766 vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
767 /* Output plane count is 2 - one for Y and one for CbCr */
768 *plane_count = 2;
769 /* Setup buffer count */
770 if (*buf_count < ctx->dpb_count)
771 *buf_count = ctx->dpb_count;
772 if (*buf_count > ctx->dpb_count + MFC_MAX_EXTRA_DPB)
773 *buf_count = ctx->dpb_count + MFC_MAX_EXTRA_DPB;
774 if (*buf_count > MFC_MAX_BUFFERS)
775 *buf_count = MFC_MAX_BUFFERS;
776 } else {
777 mfc_err("State seems invalid. State = %d, vq->type = %d\n",
778 ctx->state, vq->type);
779 return -EINVAL;
780 }
781 mfc_debug(2, "Buffer count=%d, plane count=%d\n",
782 *buf_count, *plane_count);
783 if (ctx->state == MFCINST_HEAD_PARSED &&
784 vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
785 psize[0] = ctx->luma_size;
786 psize[1] = ctx->chroma_size;
787 allocators[0] = ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX];
788 allocators[1] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
789 } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
790 ctx->state == MFCINST_INIT) {
791 psize[0] = ctx->dec_src_buf_size;
792 allocators[0] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
793 } else {
794 mfc_err("This video node is dedicated to decoding. Decoding not initalised\n");
795 return -EINVAL;
796 }
797 return 0;
798}
799
800static void s5p_mfc_unlock(struct vb2_queue *q)
801{
802 struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
803 struct s5p_mfc_dev *dev = ctx->dev;
804
805 mutex_unlock(&dev->mfc_mutex);
806}
807
808static void s5p_mfc_lock(struct vb2_queue *q)
809{
810 struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
811 struct s5p_mfc_dev *dev = ctx->dev;
812
813 mutex_lock(&dev->mfc_mutex);
814}
815
816static int s5p_mfc_buf_init(struct vb2_buffer *vb)
817{
818 struct vb2_queue *vq = vb->vb2_queue;
819 struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
820 unsigned int i;
821
822 if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
823 if (ctx->capture_state == QUEUE_BUFS_MMAPED)
824 return 0;
825 for (i = 0; i <= ctx->src_fmt->num_planes ; i++) {
826 if (IS_ERR_OR_NULL(ERR_PTR(
827 vb2_dma_contig_plane_paddr(vb, i)))) {
828 mfc_err("Plane mem not allocated\n");
829 return -EINVAL;
830 }
831 }
832 if (vb2_plane_size(vb, 0) < ctx->luma_size ||
833 vb2_plane_size(vb, 1) < ctx->chroma_size) {
834 mfc_err("Plane buffer (CAPTURE) is too small\n");
835 return -EINVAL;
836 }
837 i = vb->v4l2_buf.index;
838 ctx->dst_bufs[i].b = vb;
839 ctx->dst_bufs[i].cookie.raw.luma =
840 vb2_dma_contig_plane_paddr(vb, 0);
841 ctx->dst_bufs[i].cookie.raw.chroma =
842 vb2_dma_contig_plane_paddr(vb, 1);
843 ctx->dst_bufs_cnt++;
844 } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
845 if (IS_ERR_OR_NULL(ERR_PTR(
846 vb2_dma_contig_plane_paddr(vb, 0)))) {
847 mfc_err("Plane memory not allocated\n");
848 return -EINVAL;
849 }
850 if (vb2_plane_size(vb, 0) < ctx->dec_src_buf_size) {
851 mfc_err("Plane buffer (OUTPUT) is too small\n");
852 return -EINVAL;
853 }
854
855 i = vb->v4l2_buf.index;
856 ctx->src_bufs[i].b = vb;
857 ctx->src_bufs[i].cookie.stream =
858 vb2_dma_contig_plane_paddr(vb, 0);
859 ctx->src_bufs_cnt++;
860 } else {
861 mfc_err("s5p_mfc_buf_init: unknown queue type\n");
862 return -EINVAL;
863 }
864 return 0;
865}
866
867static int s5p_mfc_start_streaming(struct vb2_queue *q)
868{
869 struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
870 struct s5p_mfc_dev *dev = ctx->dev;
871 unsigned long flags;
872
873 v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
874 if (ctx->state == MFCINST_FINISHING ||
875 ctx->state == MFCINST_FINISHED)
876 ctx->state = MFCINST_RUNNING;
877 /* If context is ready then dev = work->data;schedule it to run */
878 if (s5p_mfc_ctx_ready(ctx)) {
879 spin_lock_irqsave(&dev->condlock, flags);
880 set_bit(ctx->num, &dev->ctx_work_bits);
881 spin_unlock_irqrestore(&dev->condlock, flags);
882 }
883 s5p_mfc_try_run(dev);
884 return 0;
885}
886
887static int s5p_mfc_stop_streaming(struct vb2_queue *q)
888{
889 unsigned long flags;
890 struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
891 struct s5p_mfc_dev *dev = ctx->dev;
892 int aborted = 0;
893
894 if ((ctx->state == MFCINST_FINISHING ||
895 ctx->state == MFCINST_RUNNING) &&
896 dev->curr_ctx == ctx->num && dev->hw_lock) {
897 ctx->state = MFCINST_ABORT;
898 s5p_mfc_wait_for_done_ctx(ctx,
899 S5P_FIMV_R2H_CMD_FRAME_DONE_RET, 0);
900 aborted = 1;
901 }
902 spin_lock_irqsave(&dev->irqlock, flags);
903 if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
904 s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
905 INIT_LIST_HEAD(&ctx->dst_queue);
906 ctx->dst_queue_cnt = 0;
907 ctx->dpb_flush_flag = 1;
908 ctx->dec_dst_flag = 0;
909 }
910 if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
911 s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
912 INIT_LIST_HEAD(&ctx->src_queue);
913 ctx->src_queue_cnt = 0;
914 }
915 if (aborted)
916 ctx->state = MFCINST_RUNNING;
917 spin_unlock_irqrestore(&dev->irqlock, flags);
918 return 0;
919}
920
921
922static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
923{
924 struct vb2_queue *vq = vb->vb2_queue;
925 struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
926 struct s5p_mfc_dev *dev = ctx->dev;
927 unsigned long flags;
928 struct s5p_mfc_buf *mfc_buf;
929
930 if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
931 mfc_buf = &ctx->src_bufs[vb->v4l2_buf.index];
932 mfc_buf->used = 0;
933 spin_lock_irqsave(&dev->irqlock, flags);
934 list_add_tail(&mfc_buf->list, &ctx->src_queue);
935 ctx->src_queue_cnt++;
936 spin_unlock_irqrestore(&dev->irqlock, flags);
937 } else if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
938 mfc_buf = &ctx->dst_bufs[vb->v4l2_buf.index];
939 mfc_buf->used = 0;
940 /* Mark destination as available for use by MFC */
941 spin_lock_irqsave(&dev->irqlock, flags);
942 set_bit(vb->v4l2_buf.index, &ctx->dec_dst_flag);
943 list_add_tail(&mfc_buf->list, &ctx->dst_queue);
944 ctx->dst_queue_cnt++;
945 spin_unlock_irqrestore(&dev->irqlock, flags);
946 } else {
947 mfc_err("Unsupported buffer type (%d)\n", vq->type);
948 }
949 if (s5p_mfc_ctx_ready(ctx)) {
950 spin_lock_irqsave(&dev->condlock, flags);
951 set_bit(ctx->num, &dev->ctx_work_bits);
952 spin_unlock_irqrestore(&dev->condlock, flags);
953 }
954 s5p_mfc_try_run(dev);
955}
956
957static struct vb2_ops s5p_mfc_dec_qops = {
958 .queue_setup = s5p_mfc_queue_setup,
959 .wait_prepare = s5p_mfc_unlock,
960 .wait_finish = s5p_mfc_lock,
961 .buf_init = s5p_mfc_buf_init,
962 .start_streaming = s5p_mfc_start_streaming,
963 .stop_streaming = s5p_mfc_stop_streaming,
964 .buf_queue = s5p_mfc_buf_queue,
965};
966
967struct s5p_mfc_codec_ops *get_dec_codec_ops(void)
968{
969 return &decoder_codec_ops;
970}
971
972struct vb2_ops *get_dec_queue_ops(void)
973{
974 return &s5p_mfc_dec_qops;
975}
976
977const struct v4l2_ioctl_ops *get_dec_v4l2_ioctl_ops(void)
978{
979 return &s5p_mfc_dec_ioctl_ops;
980}
981
982#define IS_MFC51_PRIV(x) ((V4L2_CTRL_ID2CLASS(x) == V4L2_CTRL_CLASS_MPEG) \
983 && V4L2_CTRL_DRIVER_PRIV(x))
984
985int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx)
986{
987 struct v4l2_ctrl_config cfg;
988 int i;
989
990 v4l2_ctrl_handler_init(&ctx->ctrl_handler, NUM_CTRLS);
991 if (ctx->ctrl_handler.error) {
992 mfc_err("v4l2_ctrl_handler_init failed\n");
993 return ctx->ctrl_handler.error;
994 }
995
996 for (i = 0; i < NUM_CTRLS; i++) {
997 if (IS_MFC51_PRIV(controls[i].id)) {
998 cfg.ops = &s5p_mfc_dec_ctrl_ops;
999 cfg.id = controls[i].id;
1000 cfg.min = controls[i].minimum;
1001 cfg.max = controls[i].maximum;
1002 cfg.def = controls[i].default_value;
1003 cfg.name = controls[i].name;
1004 cfg.type = controls[i].type;
1005
1006 cfg.step = controls[i].step;
1007 cfg.menu_skip_mask = 0;
1008
1009 ctx->ctrls[i] = v4l2_ctrl_new_custom(&ctx->ctrl_handler,
1010 &cfg, NULL);
1011 } else {
1012 ctx->ctrls[i] = v4l2_ctrl_new_std(&ctx->ctrl_handler,
1013 &s5p_mfc_dec_ctrl_ops,
1014 controls[i].id, controls[i].minimum,
1015 controls[i].maximum, controls[i].step,
1016 controls[i].default_value);
1017 }
1018 if (ctx->ctrl_handler.error) {
1019 mfc_err("Adding control (%d) failed\n", i);
1020 return ctx->ctrl_handler.error;
1021 }
1022 if (controls[i].is_volatile && ctx->ctrls[i])
1023 ctx->ctrls[i]->is_volatile = 1;
1024 }
1025 return 0;
1026}
1027
1028void s5p_mfc_dec_ctrls_delete(struct s5p_mfc_ctx *ctx)
1029{
1030 int i;
1031
1032 v4l2_ctrl_handler_free(&ctx->ctrl_handler);
1033 for (i = 0; i < NUM_CTRLS; i++)
1034 ctx->ctrls[i] = NULL;
1035}
1036
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.h b/drivers/media/video/s5p-mfc/s5p_mfc_dec.h
new file mode 100644
index 000000000000..fb8b215db0e7
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.h
@@ -0,0 +1,23 @@
1/*
2 * linux/drivers/media/video/s5p-mfc/s5p_mfc_dec.h
3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#ifndef S5P_MFC_DEC_H_
14#define S5P_MFC_DEC_H_
15
16struct s5p_mfc_codec_ops *get_dec_codec_ops(void);
17struct vb2_ops *get_dec_queue_ops(void);
18const struct v4l2_ioctl_ops *get_dec_v4l2_ioctl_ops(void);
19struct s5p_mfc_fmt *get_dec_def_fmt(bool src);
20int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx);
21void s5p_mfc_dec_ctrls_delete(struct s5p_mfc_ctx *ctx);
22
23#endif /* S5P_MFC_DEC_H_ */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
new file mode 100644
index 000000000000..fee094a14f4c
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
@@ -0,0 +1,1829 @@
1/*
2 * linux/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
3 *
4 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * Jeongtae Park <jtp.park@samsung.com>
8 * Kamil Debski <k.debski@samsung.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#include <linux/clk.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/module.h>
20#include <linux/platform_device.h>
21#include <linux/sched.h>
22#include <linux/version.h>
23#include <linux/videodev2.h>
24#include <linux/workqueue.h>
25#include <media/v4l2-ctrls.h>
26#include <media/videobuf2-core.h>
27#include "regs-mfc.h"
28#include "s5p_mfc_common.h"
29#include "s5p_mfc_debug.h"
30#include "s5p_mfc_enc.h"
31#include "s5p_mfc_intr.h"
32#include "s5p_mfc_opr.h"
33
34static struct s5p_mfc_fmt formats[] = {
35 {
36 .name = "4:2:0 2 Planes 64x32 Tiles",
37 .fourcc = V4L2_PIX_FMT_NV12MT,
38 .codec_mode = S5P_FIMV_CODEC_NONE,
39 .type = MFC_FMT_RAW,
40 .num_planes = 2,
41 },
42 {
43 .name = "4:2:0 2 Planes",
44 .fourcc = V4L2_PIX_FMT_NV12M,
45 .codec_mode = S5P_FIMV_CODEC_NONE,
46 .type = MFC_FMT_RAW,
47 .num_planes = 2,
48 },
49 {
50 .name = "H264 Encoded Stream",
51 .fourcc = V4L2_PIX_FMT_H264,
52 .codec_mode = S5P_FIMV_CODEC_H264_ENC,
53 .type = MFC_FMT_ENC,
54 .num_planes = 1,
55 },
56 {
57 .name = "MPEG4 Encoded Stream",
58 .fourcc = V4L2_PIX_FMT_MPEG4,
59 .codec_mode = S5P_FIMV_CODEC_MPEG4_ENC,
60 .type = MFC_FMT_ENC,
61 .num_planes = 1,
62 },
63 {
64 .name = "H264 Encoded Stream",
65 .fourcc = V4L2_PIX_FMT_H263,
66 .codec_mode = S5P_FIMV_CODEC_H263_ENC,
67 .type = MFC_FMT_ENC,
68 .num_planes = 1,
69 },
70};
71
72#define NUM_FORMATS ARRAY_SIZE(formats)
73static struct s5p_mfc_fmt *find_format(struct v4l2_format *f, unsigned int t)
74{
75 unsigned int i;
76
77 for (i = 0; i < NUM_FORMATS; i++) {
78 if (formats[i].fourcc == f->fmt.pix_mp.pixelformat &&
79 formats[i].type == t)
80 return &formats[i];
81 }
82 return NULL;
83}
84
85static struct mfc_control controls[] = {
86 {
87 .id = V4L2_CID_MPEG_VIDEO_GOP_SIZE,
88 .type = V4L2_CTRL_TYPE_INTEGER,
89 .minimum = 0,
90 .maximum = (1 << 16) - 1,
91 .step = 1,
92 .default_value = 0,
93 },
94 {
95 .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
96 .type = V4L2_CTRL_TYPE_MENU,
97 .minimum = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE,
98 .maximum = V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES,
99 .default_value = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE,
100 .menu_skip_mask = 0,
101 },
102 {
103 .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB,
104 .type = V4L2_CTRL_TYPE_INTEGER,
105 .minimum = 1,
106 .maximum = (1 << 16) - 1,
107 .step = 1,
108 .default_value = 1,
109 },
110 {
111 .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES,
112 .type = V4L2_CTRL_TYPE_INTEGER,
113 .minimum = 1900,
114 .maximum = (1 << 30) - 1,
115 .step = 1,
116 .default_value = 1900,
117 },
118 {
119 .id = V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB,
120 .type = V4L2_CTRL_TYPE_INTEGER,
121 .minimum = 0,
122 .maximum = (1 << 16) - 1,
123 .step = 1,
124 .default_value = 0,
125 },
126 {
127 .id = V4L2_CID_MPEG_MFC51_VIDEO_PADDING,
128 .type = V4L2_CTRL_TYPE_BOOLEAN,
129 .name = "Padding Control Enable",
130 .minimum = 0,
131 .maximum = 1,
132 .step = 1,
133 .default_value = 0,
134 },
135 {
136 .id = V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV,
137 .type = V4L2_CTRL_TYPE_INTEGER,
138 .name = "Padding Color YUV Value",
139 .minimum = 0,
140 .maximum = (1 << 25) - 1,
141 .step = 1,
142 .default_value = 0,
143 },
144 {
145 .id = V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE,
146 .type = V4L2_CTRL_TYPE_BOOLEAN,
147 .minimum = 0,
148 .maximum = 1,
149 .step = 1,
150 .default_value = 0,
151 },
152 {
153 .id = V4L2_CID_MPEG_VIDEO_BITRATE,
154 .type = V4L2_CTRL_TYPE_INTEGER,
155 .minimum = 1,
156 .maximum = (1 << 30) - 1,
157 .step = 1,
158 .default_value = 1,
159 },
160 {
161 .id = V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF,
162 .type = V4L2_CTRL_TYPE_INTEGER,
163 .name = "Rate Control Reaction Coeff.",
164 .minimum = 1,
165 .maximum = (1 << 16) - 1,
166 .step = 1,
167 .default_value = 1,
168 },
169 {
170 .id = V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE,
171 .type = V4L2_CTRL_TYPE_MENU,
172 .name = "Force frame type",
173 .minimum = V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_DISABLED,
174 .maximum = V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_NOT_CODED,
175 .default_value = V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_DISABLED,
176 .menu_skip_mask = 0,
177 },
178 {
179 .id = V4L2_CID_MPEG_VIDEO_VBV_SIZE,
180 .type = V4L2_CTRL_TYPE_INTEGER,
181 .minimum = 0,
182 .maximum = (1 << 16) - 1,
183 .step = 1,
184 .default_value = 0,
185 },
186 {
187 .id = V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE,
188 .type = V4L2_CTRL_TYPE_INTEGER,
189 .minimum = 0,
190 .maximum = (1 << 16) - 1,
191 .step = 1,
192 .default_value = 0,
193 },
194 {
195 .id = V4L2_CID_MPEG_VIDEO_HEADER_MODE,
196 .type = V4L2_CTRL_TYPE_MENU,
197 .minimum = V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE,
198 .maximum = V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
199 .default_value = V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE,
200 .menu_skip_mask = 0,
201 },
202 {
203 .id = V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE,
204 .type = V4L2_CTRL_TYPE_MENU,
205 .name = "Frame Skip Enable",
206 .minimum = V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_DISABLED,
207 .maximum = V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT,
208 .menu_skip_mask = 0,
209 .default_value = V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_DISABLED,
210 },
211 {
212 .id = V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT,
213 .type = V4L2_CTRL_TYPE_BOOLEAN,
214 .name = "Fixed Target Bit Enable",
215 .minimum = 0,
216 .maximum = 1,
217 .default_value = 0,
218 .menu_skip_mask = 0,
219 },
220 {
221 .id = V4L2_CID_MPEG_VIDEO_B_FRAMES,
222 .type = V4L2_CTRL_TYPE_INTEGER,
223 .minimum = 0,
224 .maximum = 2,
225 .step = 1,
226 .default_value = 0,
227 },
228 {
229 .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
230 .type = V4L2_CTRL_TYPE_MENU,
231 .minimum = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
232 .maximum = V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH,
233 .default_value = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
234 .menu_skip_mask = ~(
235 (1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
236 (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
237 (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)
238 ),
239 },
240 {
241 .id = V4L2_CID_MPEG_VIDEO_H264_LEVEL,
242 .type = V4L2_CTRL_TYPE_MENU,
243 .minimum = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
244 .maximum = V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
245 .default_value = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
246 .menu_skip_mask = ~(
247 (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
248 (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_2) |
249 (1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_0) |
250 (1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_1)
251 ),
252 },
253 {
254 .id = V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
255 .type = V4L2_CTRL_TYPE_MENU,
256 .minimum = V4L2_MPEG_VIDEO_MPEG4_LEVEL_0,
257 .maximum = V4L2_MPEG_VIDEO_MPEG4_LEVEL_5,
258 .default_value = V4L2_MPEG_VIDEO_MPEG4_LEVEL_0,
259 .menu_skip_mask = 0,
260 },
261 {
262 .id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
263 .type = V4L2_CTRL_TYPE_MENU,
264 .minimum = V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED,
265 .maximum = V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY,
266 .default_value = V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED,
267 .menu_skip_mask = 0,
268 },
269 {
270 .id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA,
271 .type = V4L2_CTRL_TYPE_INTEGER,
272 .minimum = -6,
273 .maximum = 6,
274 .step = 1,
275 .default_value = 0,
276 },
277 {
278 .id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA,
279 .type = V4L2_CTRL_TYPE_INTEGER,
280 .minimum = -6,
281 .maximum = 6,
282 .step = 1,
283 .default_value = 0,
284 },
285 {
286 .id = V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE,
287 .type = V4L2_CTRL_TYPE_MENU,
288 .minimum = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
289 .maximum = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
290 .default_value = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
291 .menu_skip_mask = 0,
292 },
293 {
294 .id = V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P,
295 .type = V4L2_CTRL_TYPE_INTEGER,
296 .name = "The Number of Ref. Pic for P",
297 .minimum = 1,
298 .maximum = 2,
299 .step = 1,
300 .default_value = 1,
301 },
302 {
303 .id = V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM,
304 .type = V4L2_CTRL_TYPE_BOOLEAN,
305 .minimum = 0,
306 .maximum = 1,
307 .step = 1,
308 .default_value = 0,
309 },
310 {
311 .id = V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE,
312 .type = V4L2_CTRL_TYPE_BOOLEAN,
313 .minimum = 0,
314 .maximum = 1,
315 .step = 1,
316 .default_value = 0,
317 },
318 {
319 .id = V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP,
320 .type = V4L2_CTRL_TYPE_INTEGER,
321 .minimum = 0,
322 .maximum = 51,
323 .step = 1,
324 .default_value = 1,
325 },
326 {
327 .id = V4L2_CID_MPEG_VIDEO_H264_MIN_QP,
328 .type = V4L2_CTRL_TYPE_INTEGER,
329 .minimum = 0,
330 .maximum = 51,
331 .step = 1,
332 .default_value = 1,
333 },
334 {
335 .id = V4L2_CID_MPEG_VIDEO_H264_MAX_QP,
336 .type = V4L2_CTRL_TYPE_INTEGER,
337 .minimum = 0,
338 .maximum = 51,
339 .step = 1,
340 .default_value = 1,
341 },
342 {
343 .id = V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP,
344 .type = V4L2_CTRL_TYPE_INTEGER,
345 .minimum = 0,
346 .maximum = 51,
347 .step = 1,
348 .default_value = 1,
349 },
350 {
351 .id = V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP,
352 .type = V4L2_CTRL_TYPE_INTEGER,
353 .minimum = 0,
354 .maximum = 51,
355 .step = 1,
356 .default_value = 1,
357 },
358 {
359 .id = V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP,
360 .type = V4L2_CTRL_TYPE_INTEGER,
361 .name = "H263 I-Frame QP value",
362 .minimum = 1,
363 .maximum = 31,
364 .step = 1,
365 .default_value = 1,
366 },
367 {
368 .id = V4L2_CID_MPEG_VIDEO_H263_MIN_QP,
369 .type = V4L2_CTRL_TYPE_INTEGER,
370 .name = "H263 Minimum QP value",
371 .minimum = 1,
372 .maximum = 31,
373 .step = 1,
374 .default_value = 1,
375 },
376 {
377 .id = V4L2_CID_MPEG_VIDEO_H263_MAX_QP,
378 .type = V4L2_CTRL_TYPE_INTEGER,
379 .name = "H263 Maximum QP value",
380 .minimum = 1,
381 .maximum = 31,
382 .step = 1,
383 .default_value = 1,
384 },
385 {
386 .id = V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP,
387 .type = V4L2_CTRL_TYPE_INTEGER,
388 .name = "H263 P frame QP value",
389 .minimum = 1,
390 .maximum = 31,
391 .step = 1,
392 .default_value = 1,
393 },
394 {
395 .id = V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP,
396 .type = V4L2_CTRL_TYPE_INTEGER,
397 .name = "H263 B frame QP value",
398 .minimum = 1,
399 .maximum = 31,
400 .step = 1,
401 .default_value = 1,
402 },
403 {
404 .id = V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP,
405 .type = V4L2_CTRL_TYPE_INTEGER,
406 .name = "MPEG4 I-Frame QP value",
407 .minimum = 1,
408 .maximum = 31,
409 .step = 1,
410 .default_value = 1,
411 },
412 {
413 .id = V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP,
414 .type = V4L2_CTRL_TYPE_INTEGER,
415 .name = "MPEG4 Minimum QP value",
416 .minimum = 1,
417 .maximum = 31,
418 .step = 1,
419 .default_value = 1,
420 },
421 {
422 .id = V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP,
423 .type = V4L2_CTRL_TYPE_INTEGER,
424 .name = "MPEG4 Maximum QP value",
425 .minimum = 0,
426 .maximum = 51,
427 .step = 1,
428 .default_value = 1,
429 },
430 {
431 .id = V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP,
432 .type = V4L2_CTRL_TYPE_INTEGER,
433 .name = "MPEG4 P frame QP value",
434 .minimum = 1,
435 .maximum = 31,
436 .step = 1,
437 .default_value = 1,
438 },
439 {
440 .id = V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP,
441 .type = V4L2_CTRL_TYPE_INTEGER,
442 .name = "MPEG4 B frame QP value",
443 .minimum = 1,
444 .maximum = 31,
445 .step = 1,
446 .default_value = 1,
447 },
448 {
449 .id = V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK,
450 .type = V4L2_CTRL_TYPE_BOOLEAN,
451 .name = "H264 Dark Reg Adaptive RC",
452 .minimum = 0,
453 .maximum = 1,
454 .step = 1,
455 .default_value = 0,
456 },
457 {
458 .id = V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH,
459 .type = V4L2_CTRL_TYPE_BOOLEAN,
460 .name = "H264 Smooth Reg Adaptive RC",
461 .minimum = 0,
462 .maximum = 1,
463 .step = 1,
464 .default_value = 0,
465 },
466 {
467 .id = V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC,
468 .type = V4L2_CTRL_TYPE_BOOLEAN,
469 .name = "H264 Static Reg Adaptive RC",
470 .minimum = 0,
471 .maximum = 1,
472 .step = 1,
473 .default_value = 0,
474 },
475 {
476 .id = V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY,
477 .type = V4L2_CTRL_TYPE_BOOLEAN,
478 .name = "H264 Activity Reg Adaptive RC",
479 .minimum = 0,
480 .maximum = 1,
481 .step = 1,
482 .default_value = 0,
483 },
484 {
485 .id = V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE,
486 .type = V4L2_CTRL_TYPE_BOOLEAN,
487 .minimum = 0,
488 .maximum = 1,
489 .step = 1,
490 .default_value = 0,
491 },
492 {
493 .id = V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC,
494 .type = V4L2_CTRL_TYPE_MENU,
495 .minimum = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED,
496 .maximum = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED,
497 .default_value = 0,
498 .menu_skip_mask = 0,
499 },
500 {
501 .id = V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH,
502 .type = V4L2_CTRL_TYPE_INTEGER,
503 .minimum = 0,
504 .maximum = (1 << 16) - 1,
505 .step = 1,
506 .default_value = 0,
507 },
508 {
509 .id = V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT,
510 .type = V4L2_CTRL_TYPE_INTEGER,
511 .minimum = 0,
512 .maximum = (1 << 16) - 1,
513 .step = 1,
514 .default_value = 0,
515 },
516 {
517 .id = V4L2_CID_MPEG_VIDEO_GOP_CLOSURE,
518 .type = V4L2_CTRL_TYPE_BOOLEAN,
519 .minimum = 0,
520 .maximum = 1,
521 .step = 1,
522 .default_value = 1,
523 },
524 {
525 .id = V4L2_CID_MPEG_VIDEO_H264_I_PERIOD,
526 .type = V4L2_CTRL_TYPE_INTEGER,
527 .minimum = 0,
528 .maximum = (1 << 16) - 1,
529 .step = 1,
530 .default_value = 0,
531 },
532 {
533 .id = V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
534 .type = V4L2_CTRL_TYPE_MENU,
535 .minimum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
536 .maximum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE,
537 .default_value = 0,
538 .menu_skip_mask = 0,
539 },
540 {
541 .id = V4L2_CID_MPEG_VIDEO_MPEG4_QPEL,
542 .type = V4L2_CTRL_TYPE_BOOLEAN,
543 .minimum = 0,
544 .maximum = 1,
545 .step = 1,
546 .default_value = 0,
547 },
548};
549
550#define NUM_CTRLS ARRAY_SIZE(controls)
551static const char * const *mfc51_get_menu(u32 id)
552{
553 static const char * const mfc51_video_frame_skip[] = {
554 "Disabled",
555 "Level Limit",
556 "VBV/CPB Limit",
557 NULL,
558 };
559 static const char * const mfc51_video_force_frame[] = {
560 "Disabled",
561 "I Frame",
562 "Not Coded",
563 NULL,
564 };
565 switch (id) {
566 case V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE:
567 return mfc51_video_frame_skip;
568 case V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE:
569 return mfc51_video_force_frame;
570 }
571 return NULL;
572}
573
574static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
575{
576 mfc_debug(2, "src=%d, dst=%d, state=%d\n",
577 ctx->src_queue_cnt, ctx->dst_queue_cnt, ctx->state);
578 /* context is ready to make header */
579 if (ctx->state == MFCINST_GOT_INST && ctx->dst_queue_cnt >= 1)
580 return 1;
581 /* context is ready to encode a frame */
582 if (ctx->state == MFCINST_RUNNING &&
583 ctx->src_queue_cnt >= 1 && ctx->dst_queue_cnt >= 1)
584 return 1;
585 /* context is ready to encode remain frames */
586 if (ctx->state == MFCINST_FINISHING &&
587 ctx->src_queue_cnt >= 1 && ctx->dst_queue_cnt >= 1)
588 return 1;
589 mfc_debug(2, "ctx is not ready\n");
590 return 0;
591}
592
593static void cleanup_ref_queue(struct s5p_mfc_ctx *ctx)
594{
595 struct s5p_mfc_buf *mb_entry;
596 unsigned long mb_y_addr, mb_c_addr;
597
598 /* move buffers in ref queue to src queue */
599 while (!list_empty(&ctx->ref_queue)) {
600 mb_entry = list_entry((&ctx->ref_queue)->next,
601 struct s5p_mfc_buf, list);
602 mb_y_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 0);
603 mb_c_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 1);
604 list_del(&mb_entry->list);
605 ctx->ref_queue_cnt--;
606 list_add_tail(&mb_entry->list, &ctx->src_queue);
607 ctx->src_queue_cnt++;
608 }
609 mfc_debug(2, "enc src count: %d, enc ref count: %d\n",
610 ctx->src_queue_cnt, ctx->ref_queue_cnt);
611 INIT_LIST_HEAD(&ctx->ref_queue);
612 ctx->ref_queue_cnt = 0;
613}
614
615static int enc_pre_seq_start(struct s5p_mfc_ctx *ctx)
616{
617 struct s5p_mfc_dev *dev = ctx->dev;
618 struct s5p_mfc_buf *dst_mb;
619 unsigned long dst_addr;
620 unsigned int dst_size;
621 unsigned long flags;
622
623 spin_lock_irqsave(&dev->irqlock, flags);
624 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
625 dst_addr = vb2_dma_contig_plane_paddr(dst_mb->b, 0);
626 dst_size = vb2_plane_size(dst_mb->b, 0);
627 s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
628 spin_unlock_irqrestore(&dev->irqlock, flags);
629 return 0;
630}
631
632static int enc_post_seq_start(struct s5p_mfc_ctx *ctx)
633{
634 struct s5p_mfc_dev *dev = ctx->dev;
635 struct s5p_mfc_enc_params *p = &ctx->enc_params;
636 struct s5p_mfc_buf *dst_mb;
637 unsigned long flags;
638
639 if (p->seq_hdr_mode == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) {
640 spin_lock_irqsave(&dev->irqlock, flags);
641 dst_mb = list_entry(ctx->dst_queue.next,
642 struct s5p_mfc_buf, list);
643 list_del(&dst_mb->list);
644 ctx->dst_queue_cnt--;
645 vb2_set_plane_payload(dst_mb->b, 0,
646 s5p_mfc_get_enc_strm_size());
647 vb2_buffer_done(dst_mb->b, VB2_BUF_STATE_DONE);
648 spin_unlock_irqrestore(&dev->irqlock, flags);
649 }
650 ctx->state = MFCINST_RUNNING;
651 if (s5p_mfc_ctx_ready(ctx)) {
652 spin_lock_irqsave(&dev->condlock, flags);
653 set_bit(ctx->num, &dev->ctx_work_bits);
654 spin_unlock_irqrestore(&dev->condlock, flags);
655 }
656 s5p_mfc_try_run(dev);
657 return 0;
658}
659
660static int enc_pre_frame_start(struct s5p_mfc_ctx *ctx)
661{
662 struct s5p_mfc_dev *dev = ctx->dev;
663 struct s5p_mfc_buf *dst_mb;
664 struct s5p_mfc_buf *src_mb;
665 unsigned long flags;
666 unsigned long src_y_addr, src_c_addr, dst_addr;
667 unsigned int dst_size;
668
669 spin_lock_irqsave(&dev->irqlock, flags);
670 src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
671 src_y_addr = vb2_dma_contig_plane_paddr(src_mb->b, 0);
672 src_c_addr = vb2_dma_contig_plane_paddr(src_mb->b, 1);
673 s5p_mfc_set_enc_frame_buffer(ctx, src_y_addr, src_c_addr);
674 spin_unlock_irqrestore(&dev->irqlock, flags);
675
676 spin_lock_irqsave(&dev->irqlock, flags);
677 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
678 dst_addr = vb2_dma_contig_plane_paddr(dst_mb->b, 0);
679 dst_size = vb2_plane_size(dst_mb->b, 0);
680 s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
681 spin_unlock_irqrestore(&dev->irqlock, flags);
682
683 return 0;
684}
685
686static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
687{
688 struct s5p_mfc_dev *dev = ctx->dev;
689 struct s5p_mfc_buf *mb_entry;
690 unsigned long enc_y_addr, enc_c_addr;
691 unsigned long mb_y_addr, mb_c_addr;
692 int slice_type;
693 unsigned int strm_size;
694 unsigned long flags;
695
696 slice_type = s5p_mfc_get_enc_slice_type();
697 strm_size = s5p_mfc_get_enc_strm_size();
698 mfc_debug(2, "Encoded slice type: %d", slice_type);
699 mfc_debug(2, "Encoded stream size: %d", strm_size);
700 mfc_debug(2, "Display order: %d",
701 mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT));
702 spin_lock_irqsave(&dev->irqlock, flags);
703 if (slice_type >= 0) {
704 s5p_mfc_get_enc_frame_buffer(ctx, &enc_y_addr, &enc_c_addr);
705 list_for_each_entry(mb_entry, &ctx->src_queue, list) {
706 mb_y_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 0);
707 mb_c_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 1);
708 if ((enc_y_addr == mb_y_addr) &&
709 (enc_c_addr == mb_c_addr)) {
710 list_del(&mb_entry->list);
711 ctx->src_queue_cnt--;
712 vb2_buffer_done(mb_entry->b,
713 VB2_BUF_STATE_DONE);
714 break;
715 }
716 }
717 list_for_each_entry(mb_entry, &ctx->ref_queue, list) {
718 mb_y_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 0);
719 mb_c_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 1);
720 if ((enc_y_addr == mb_y_addr) &&
721 (enc_c_addr == mb_c_addr)) {
722 list_del(&mb_entry->list);
723 ctx->ref_queue_cnt--;
724 vb2_buffer_done(mb_entry->b,
725 VB2_BUF_STATE_DONE);
726 break;
727 }
728 }
729 }
730 if ((ctx->src_queue_cnt > 0) && (ctx->state == MFCINST_RUNNING)) {
731 mb_entry = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
732 list);
733 if (mb_entry->used) {
734 list_del(&mb_entry->list);
735 ctx->src_queue_cnt--;
736 list_add_tail(&mb_entry->list, &ctx->ref_queue);
737 ctx->ref_queue_cnt++;
738 }
739 mfc_debug(2, "enc src count: %d, enc ref count: %d\n",
740 ctx->src_queue_cnt, ctx->ref_queue_cnt);
741 }
742 if (strm_size > 0) {
743 /* at least one more dest. buffers exist always */
744 mb_entry = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf,
745 list);
746 list_del(&mb_entry->list);
747 ctx->dst_queue_cnt--;
748 switch (slice_type) {
749 case S5P_FIMV_ENC_SI_SLICE_TYPE_I:
750 mb_entry->b->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
751 break;
752 case S5P_FIMV_ENC_SI_SLICE_TYPE_P:
753 mb_entry->b->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
754 break;
755 case S5P_FIMV_ENC_SI_SLICE_TYPE_B:
756 mb_entry->b->v4l2_buf.flags |= V4L2_BUF_FLAG_BFRAME;
757 break;
758 }
759 vb2_set_plane_payload(mb_entry->b, 0, strm_size);
760 vb2_buffer_done(mb_entry->b, VB2_BUF_STATE_DONE);
761 }
762 spin_unlock_irqrestore(&dev->irqlock, flags);
763 if ((ctx->src_queue_cnt == 0) || (ctx->dst_queue_cnt == 0)) {
764 spin_lock(&dev->condlock);
765 clear_bit(ctx->num, &dev->ctx_work_bits);
766 spin_unlock(&dev->condlock);
767 }
768 return 0;
769}
770
771static struct s5p_mfc_codec_ops encoder_codec_ops = {
772 .pre_seq_start = enc_pre_seq_start,
773 .post_seq_start = enc_post_seq_start,
774 .pre_frame_start = enc_pre_frame_start,
775 .post_frame_start = enc_post_frame_start,
776};
777
778/* Query capabilities of the device */
779static int vidioc_querycap(struct file *file, void *priv,
780 struct v4l2_capability *cap)
781{
782 struct s5p_mfc_dev *dev = video_drvdata(file);
783
784 strncpy(cap->driver, dev->plat_dev->name, sizeof(cap->driver) - 1);
785 strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
786 cap->bus_info[0] = 0;
787 cap->version = KERNEL_VERSION(1, 0, 0);
788 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE
789 | V4L2_CAP_VIDEO_OUTPUT
790 | V4L2_CAP_STREAMING;
791 return 0;
792}
793
794static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool mplane, bool out)
795{
796 struct s5p_mfc_fmt *fmt;
797 int i, j = 0;
798
799 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
800 if (mplane && formats[i].num_planes == 1)
801 continue;
802 else if (!mplane && formats[i].num_planes > 1)
803 continue;
804 if (out && formats[i].type != MFC_FMT_RAW)
805 continue;
806 else if (!out && formats[i].type != MFC_FMT_ENC)
807 continue;
808 if (j == f->index) {
809 fmt = &formats[i];
810 strlcpy(f->description, fmt->name,
811 sizeof(f->description));
812 f->pixelformat = fmt->fourcc;
813 return 0;
814 }
815 ++j;
816 }
817 return -EINVAL;
818}
819
820static int vidioc_enum_fmt_vid_cap(struct file *file, void *pirv,
821 struct v4l2_fmtdesc *f)
822{
823 return vidioc_enum_fmt(f, false, false);
824}
825
826static int vidioc_enum_fmt_vid_cap_mplane(struct file *file, void *pirv,
827 struct v4l2_fmtdesc *f)
828{
829 return vidioc_enum_fmt(f, true, false);
830}
831
832static int vidioc_enum_fmt_vid_out(struct file *file, void *prov,
833 struct v4l2_fmtdesc *f)
834{
835 return vidioc_enum_fmt(f, false, true);
836}
837
838static int vidioc_enum_fmt_vid_out_mplane(struct file *file, void *prov,
839 struct v4l2_fmtdesc *f)
840{
841 return vidioc_enum_fmt(f, true, true);
842}
843
844static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
845{
846 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
847 struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
848
849 mfc_debug(2, "f->type = %d ctx->state = %d\n", f->type, ctx->state);
850 if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
851 /* This is run on output (encoder dest) */
852 pix_fmt_mp->width = 0;
853 pix_fmt_mp->height = 0;
854 pix_fmt_mp->field = V4L2_FIELD_NONE;
855 pix_fmt_mp->pixelformat = ctx->dst_fmt->fourcc;
856 pix_fmt_mp->num_planes = ctx->dst_fmt->num_planes;
857
858 pix_fmt_mp->plane_fmt[0].bytesperline = ctx->enc_dst_buf_size;
859 pix_fmt_mp->plane_fmt[0].sizeimage = ctx->enc_dst_buf_size;
860 } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
861 /* This is run on capture (encoder src) */
862 pix_fmt_mp->width = ctx->img_width;
863 pix_fmt_mp->height = ctx->img_height;
864
865 pix_fmt_mp->field = V4L2_FIELD_NONE;
866 pix_fmt_mp->pixelformat = ctx->src_fmt->fourcc;
867 pix_fmt_mp->num_planes = ctx->src_fmt->num_planes;
868
869 pix_fmt_mp->plane_fmt[0].bytesperline = ctx->buf_width;
870 pix_fmt_mp->plane_fmt[0].sizeimage = ctx->luma_size;
871 pix_fmt_mp->plane_fmt[1].bytesperline = ctx->buf_width;
872 pix_fmt_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
873 } else {
874 mfc_err("invalid buf type\n");
875 return -EINVAL;
876 }
877 return 0;
878}
879
880static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
881{
882 struct s5p_mfc_fmt *fmt;
883 struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
884
885 if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
886 fmt = find_format(f, MFC_FMT_ENC);
887 if (!fmt) {
888 mfc_err("failed to try output format\n");
889 return -EINVAL;
890 }
891
892 if (pix_fmt_mp->plane_fmt[0].sizeimage == 0) {
893 mfc_err("must be set encoding output size\n");
894 return -EINVAL;
895 }
896
897 pix_fmt_mp->plane_fmt[0].bytesperline =
898 pix_fmt_mp->plane_fmt[0].sizeimage;
899 } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
900 fmt = find_format(f, MFC_FMT_RAW);
901 if (!fmt) {
902 mfc_err("failed to try output format\n");
903 return -EINVAL;
904 }
905
906 if (fmt->num_planes != pix_fmt_mp->num_planes) {
907 mfc_err("failed to try output format\n");
908 return -EINVAL;
909 }
910 } else {
911 mfc_err("invalid buf type\n");
912 return -EINVAL;
913 }
914 return 0;
915}
916
917static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
918{
919 struct s5p_mfc_dev *dev = video_drvdata(file);
920 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
921 struct s5p_mfc_fmt *fmt;
922 struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
923 unsigned long flags;
924 int ret = 0;
925
926 ret = vidioc_try_fmt(file, priv, f);
927 if (ret)
928 return ret;
929 if (ctx->vq_src.streaming || ctx->vq_dst.streaming) {
930 v4l2_err(&dev->v4l2_dev, "%s queue busy\n", __func__);
931 ret = -EBUSY;
932 goto out;
933 }
934 if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
935 fmt = find_format(f, MFC_FMT_ENC);
936 if (!fmt) {
937 mfc_err("failed to set capture format\n");
938 return -EINVAL;
939 }
940 ctx->state = MFCINST_INIT;
941 ctx->dst_fmt = fmt;
942 ctx->codec_mode = ctx->dst_fmt->codec_mode;
943 ctx->enc_dst_buf_size = pix_fmt_mp->plane_fmt[0].sizeimage;
944 pix_fmt_mp->plane_fmt[0].bytesperline = 0;
945 ctx->dst_bufs_cnt = 0;
946 ctx->capture_state = QUEUE_FREE;
947 s5p_mfc_alloc_instance_buffer(ctx);
948 spin_lock_irqsave(&dev->condlock, flags);
949 set_bit(ctx->num, &dev->ctx_work_bits);
950 spin_unlock_irqrestore(&dev->condlock, flags);
951 s5p_mfc_clean_ctx_int_flags(ctx);
952 s5p_mfc_try_run(dev);
953 if (s5p_mfc_wait_for_done_ctx(ctx, \
954 S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET, 1)) {
955 /* Error or timeout */
956 mfc_err("Error getting instance from hardware\n");
957 s5p_mfc_release_instance_buffer(ctx);
958 ret = -EIO;
959 goto out;
960 }
961 mfc_debug(2, "Got instance number: %d\n", ctx->inst_no);
962 } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
963 fmt = find_format(f, MFC_FMT_RAW);
964 if (!fmt) {
965 mfc_err("failed to set output format\n");
966 return -EINVAL;
967 }
968 if (fmt->num_planes != pix_fmt_mp->num_planes) {
969 mfc_err("failed to set output format\n");
970 ret = -EINVAL;
971 goto out;
972 }
973 ctx->src_fmt = fmt;
974 ctx->img_width = pix_fmt_mp->width;
975 ctx->img_height = pix_fmt_mp->height;
976 mfc_debug(2, "codec number: %d\n", ctx->src_fmt->codec_mode);
977 mfc_debug(2, "fmt - w: %d, h: %d, ctx - w: %d, h: %d\n",
978 pix_fmt_mp->width, pix_fmt_mp->height,
979 ctx->img_width, ctx->img_height);
980 if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M) {
981 ctx->buf_width = ALIGN(ctx->img_width,
982 S5P_FIMV_NV12M_HALIGN);
983 ctx->luma_size = ALIGN(ctx->img_width,
984 S5P_FIMV_NV12M_HALIGN) * ALIGN(ctx->img_height,
985 S5P_FIMV_NV12M_LVALIGN);
986 ctx->chroma_size = ALIGN(ctx->img_width,
987 S5P_FIMV_NV12M_HALIGN) * ALIGN((ctx->img_height
988 >> 1), S5P_FIMV_NV12M_CVALIGN);
989
990 ctx->luma_size = ALIGN(ctx->luma_size,
991 S5P_FIMV_NV12M_SALIGN);
992 ctx->chroma_size = ALIGN(ctx->chroma_size,
993 S5P_FIMV_NV12M_SALIGN);
994
995 pix_fmt_mp->plane_fmt[0].sizeimage = ctx->luma_size;
996 pix_fmt_mp->plane_fmt[0].bytesperline = ctx->buf_width;
997 pix_fmt_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
998 pix_fmt_mp->plane_fmt[1].bytesperline = ctx->buf_width;
999
1000 } else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT) {
1001 ctx->buf_width = ALIGN(ctx->img_width,
1002 S5P_FIMV_NV12MT_HALIGN);
1003 ctx->luma_size = ALIGN(ctx->img_width,
1004 S5P_FIMV_NV12MT_HALIGN) * ALIGN(ctx->img_height,
1005 S5P_FIMV_NV12MT_VALIGN);
1006 ctx->chroma_size = ALIGN(ctx->img_width,
1007 S5P_FIMV_NV12MT_HALIGN) * ALIGN((ctx->img_height
1008 >> 1), S5P_FIMV_NV12MT_VALIGN);
1009 ctx->luma_size = ALIGN(ctx->luma_size,
1010 S5P_FIMV_NV12MT_SALIGN);
1011 ctx->chroma_size = ALIGN(ctx->chroma_size,
1012 S5P_FIMV_NV12MT_SALIGN);
1013
1014 pix_fmt_mp->plane_fmt[0].sizeimage = ctx->luma_size;
1015 pix_fmt_mp->plane_fmt[0].bytesperline = ctx->buf_width;
1016 pix_fmt_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
1017 pix_fmt_mp->plane_fmt[1].bytesperline = ctx->buf_width;
1018 }
1019 ctx->src_bufs_cnt = 0;
1020 ctx->output_state = QUEUE_FREE;
1021 } else {
1022 mfc_err("invalid buf type\n");
1023 return -EINVAL;
1024 }
1025out:
1026 mfc_debug_leave();
1027 return ret;
1028}
1029
1030static int vidioc_reqbufs(struct file *file, void *priv,
1031 struct v4l2_requestbuffers *reqbufs)
1032{
1033 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
1034 int ret = 0;
1035
1036 /* if memory is not mmp or userptr return error */
1037 if ((reqbufs->memory != V4L2_MEMORY_MMAP) &&
1038 (reqbufs->memory != V4L2_MEMORY_USERPTR))
1039 return -EINVAL;
1040 if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
1041 if (ctx->capture_state != QUEUE_FREE) {
1042 mfc_err("invalid capture state: %d\n",
1043 ctx->capture_state);
1044 return -EINVAL;
1045 }
1046 ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
1047 if (ret != 0) {
1048 mfc_err("error in vb2_reqbufs() for E(D)\n");
1049 return ret;
1050 }
1051 ctx->capture_state = QUEUE_BUFS_REQUESTED;
1052 ret = s5p_mfc_alloc_codec_buffers(ctx);
1053 if (ret) {
1054 mfc_err("Failed to allocate encoding buffers\n");
1055 reqbufs->count = 0;
1056 ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
1057 return -ENOMEM;
1058 }
1059 } else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1060 if (ctx->output_state != QUEUE_FREE) {
1061 mfc_err("invalid output state: %d\n",
1062 ctx->output_state);
1063 return -EINVAL;
1064 }
1065 ret = vb2_reqbufs(&ctx->vq_src, reqbufs);
1066 if (ret != 0) {
1067 mfc_err("error in vb2_reqbufs() for E(S)\n");
1068 return ret;
1069 }
1070 ctx->output_state = QUEUE_BUFS_REQUESTED;
1071 } else {
1072 mfc_err("invalid buf type\n");
1073 return -EINVAL;
1074 }
1075 return ret;
1076}
1077
1078static int vidioc_querybuf(struct file *file, void *priv,
1079 struct v4l2_buffer *buf)
1080{
1081 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
1082 int ret = 0;
1083
1084 /* if memory is not mmp or userptr return error */
1085 if ((buf->memory != V4L2_MEMORY_MMAP) &&
1086 (buf->memory != V4L2_MEMORY_USERPTR))
1087 return -EINVAL;
1088 if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
1089 if (ctx->state != MFCINST_GOT_INST) {
1090 mfc_err("invalid context state: %d\n", ctx->state);
1091 return -EINVAL;
1092 }
1093 ret = vb2_querybuf(&ctx->vq_dst, buf);
1094 if (ret != 0) {
1095 mfc_err("error in vb2_querybuf() for E(D)\n");
1096 return ret;
1097 }
1098 buf->m.planes[0].m.mem_offset += DST_QUEUE_OFF_BASE;
1099 } else if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1100 ret = vb2_querybuf(&ctx->vq_src, buf);
1101 if (ret != 0) {
1102 mfc_err("error in vb2_querybuf() for E(S)\n");
1103 return ret;
1104 }
1105 } else {
1106 mfc_err("invalid buf type\n");
1107 return -EINVAL;
1108 }
1109 return ret;
1110}
1111
1112/* Queue a buffer */
1113static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
1114{
1115 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
1116
1117 if (ctx->state == MFCINST_ERROR) {
1118 mfc_err("Call on QBUF after unrecoverable error\n");
1119 return -EIO;
1120 }
1121 if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
1122 return vb2_qbuf(&ctx->vq_src, buf);
1123 else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1124 return vb2_qbuf(&ctx->vq_dst, buf);
1125 return -EINVAL;
1126}
1127
1128/* Dequeue a buffer */
1129static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
1130{
1131 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
1132
1133 if (ctx->state == MFCINST_ERROR) {
1134 mfc_err("Call on DQBUF after unrecoverable error\n");
1135 return -EIO;
1136 }
1137 if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
1138 return vb2_dqbuf(&ctx->vq_src, buf, file->f_flags & O_NONBLOCK);
1139 else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1140 return vb2_dqbuf(&ctx->vq_dst, buf, file->f_flags & O_NONBLOCK);
1141 return -EINVAL;
1142}
1143
1144/* Stream on */
1145static int vidioc_streamon(struct file *file, void *priv,
1146 enum v4l2_buf_type type)
1147{
1148 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
1149
1150 if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
1151 return vb2_streamon(&ctx->vq_src, type);
1152 else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1153 return vb2_streamon(&ctx->vq_dst, type);
1154 return -EINVAL;
1155}
1156
1157/* Stream off, which equals to a pause */
1158static int vidioc_streamoff(struct file *file, void *priv,
1159 enum v4l2_buf_type type)
1160{
1161 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
1162
1163 if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
1164 return vb2_streamoff(&ctx->vq_src, type);
1165 else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1166 return vb2_streamoff(&ctx->vq_dst, type);
1167 return -EINVAL;
1168}
1169
1170static inline int h264_level(enum v4l2_mpeg_video_h264_level lvl)
1171{
1172 static unsigned int t[V4L2_MPEG_VIDEO_H264_LEVEL_4_0 + 1] = {
1173 /* V4L2_MPEG_VIDEO_H264_LEVEL_1_0 */ 10,
1174 /* V4L2_MPEG_VIDEO_H264_LEVEL_1B */ 9,
1175 /* V4L2_MPEG_VIDEO_H264_LEVEL_1_1 */ 11,
1176 /* V4L2_MPEG_VIDEO_H264_LEVEL_1_2 */ 12,
1177 /* V4L2_MPEG_VIDEO_H264_LEVEL_1_3 */ 13,
1178 /* V4L2_MPEG_VIDEO_H264_LEVEL_2_0 */ 20,
1179 /* V4L2_MPEG_VIDEO_H264_LEVEL_2_1 */ 21,
1180 /* V4L2_MPEG_VIDEO_H264_LEVEL_2_2 */ 22,
1181 /* V4L2_MPEG_VIDEO_H264_LEVEL_3_0 */ 30,
1182 /* V4L2_MPEG_VIDEO_H264_LEVEL_3_1 */ 31,
1183 /* V4L2_MPEG_VIDEO_H264_LEVEL_3_2 */ 32,
1184 /* V4L2_MPEG_VIDEO_H264_LEVEL_4_0 */ 40,
1185 };
1186 return t[lvl];
1187}
1188
1189static inline int mpeg4_level(enum v4l2_mpeg_video_mpeg4_level lvl)
1190{
1191 static unsigned int t[V4L2_MPEG_VIDEO_MPEG4_LEVEL_5 + 1] = {
1192 /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_0 */ 0,
1193 /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B */ 9,
1194 /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_1 */ 1,
1195 /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_2 */ 2,
1196 /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_3 */ 3,
1197 /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_3B */ 7,
1198 /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_4 */ 4,
1199 /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_5 */ 5,
1200 };
1201 return t[lvl];
1202}
1203
1204static inline int vui_sar_idc(enum v4l2_mpeg_video_h264_vui_sar_idc sar)
1205{
1206 static unsigned int t[V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED + 1] = {
1207 /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED */ 0,
1208 /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1 */ 1,
1209 /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_12x11 */ 2,
1210 /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_10x11 */ 3,
1211 /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_16x11 */ 4,
1212 /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_40x33 */ 5,
1213 /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_24x11 */ 6,
1214 /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_20x11 */ 7,
1215 /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_32x11 */ 8,
1216 /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_80x33 */ 9,
1217 /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_18x11 */ 10,
1218 /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_15x11 */ 11,
1219 /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_64x33 */ 12,
1220 /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_160x99 */ 13,
1221 /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_4x3 */ 14,
1222 /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_3x2 */ 15,
1223 /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_2x1 */ 16,
1224 /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED */ 255,
1225 };
1226 return t[sar];
1227}
1228
1229static int s5p_mfc_enc_s_ctrl(struct v4l2_ctrl *ctrl)
1230{
1231 struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl);
1232 struct s5p_mfc_dev *dev = ctx->dev;
1233 struct s5p_mfc_enc_params *p = &ctx->enc_params;
1234 int ret = 0;
1235
1236 switch (ctrl->id) {
1237 case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
1238 p->gop_size = ctrl->val;
1239 break;
1240 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
1241 p->slice_mode = ctrl->val;
1242 break;
1243 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
1244 p->slice_mb = ctrl->val;
1245 break;
1246 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:
1247 p->slice_bit = ctrl->val * 8;
1248 break;
1249 case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:
1250 p->intra_refresh_mb = ctrl->val;
1251 break;
1252 case V4L2_CID_MPEG_MFC51_VIDEO_PADDING:
1253 p->pad = ctrl->val;
1254 break;
1255 case V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV:
1256 p->pad_luma = (ctrl->val >> 16) & 0xff;
1257 p->pad_cb = (ctrl->val >> 8) & 0xff;
1258 p->pad_cr = (ctrl->val >> 0) & 0xff;
1259 break;
1260 case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
1261 p->rc_frame = ctrl->val;
1262 break;
1263 case V4L2_CID_MPEG_VIDEO_BITRATE:
1264 p->rc_bitrate = ctrl->val;
1265 break;
1266 case V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF:
1267 p->rc_reaction_coeff = ctrl->val;
1268 break;
1269 case V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE:
1270 ctx->force_frame_type = ctrl->val;
1271 break;
1272 case V4L2_CID_MPEG_VIDEO_VBV_SIZE:
1273 p->vbv_size = ctrl->val;
1274 break;
1275 case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE:
1276 p->codec.h264.cpb_size = ctrl->val;
1277 break;
1278 case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
1279 p->seq_hdr_mode = ctrl->val;
1280 break;
1281 case V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE:
1282 p->frame_skip_mode = ctrl->val;
1283 break;
1284 case V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT:
1285 p->fixed_target_bit = ctrl->val;
1286 break;
1287 case V4L2_CID_MPEG_VIDEO_B_FRAMES:
1288 p->num_b_frame = ctrl->val;
1289 break;
1290 case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
1291 switch (ctrl->val) {
1292 case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
1293 p->codec.h264.profile =
1294 S5P_FIMV_ENC_PROFILE_H264_MAIN;
1295 break;
1296 case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
1297 p->codec.h264.profile =
1298 S5P_FIMV_ENC_PROFILE_H264_HIGH;
1299 break;
1300 case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
1301 p->codec.h264.profile =
1302 S5P_FIMV_ENC_PROFILE_H264_BASELINE;
1303 break;
1304 default:
1305 ret = -EINVAL;
1306 }
1307 break;
1308 case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
1309 p->codec.h264.level_v4l2 = ctrl->val;
1310 p->codec.h264.level = h264_level(ctrl->val);
1311 if (p->codec.h264.level < 0) {
1312 mfc_err("Level number is wrong\n");
1313 ret = p->codec.h264.level;
1314 }
1315 break;
1316 case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
1317 p->codec.mpeg4.level_v4l2 = ctrl->val;
1318 p->codec.mpeg4.level = mpeg4_level(ctrl->val);
1319 if (p->codec.mpeg4.level < 0) {
1320 mfc_err("Level number is wrong\n");
1321 ret = p->codec.mpeg4.level;
1322 }
1323 break;
1324 case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
1325 p->codec.h264.loop_filter_mode = ctrl->val;
1326 break;
1327 case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:
1328 p->codec.h264.loop_filter_alpha = ctrl->val;
1329 break;
1330 case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
1331 p->codec.h264.loop_filter_beta = ctrl->val;
1332 break;
1333 case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
1334 p->codec.h264.entropy_mode = ctrl->val;
1335 break;
1336 case V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P:
1337 p->codec.h264.num_ref_pic_4p = ctrl->val;
1338 break;
1339 case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
1340 p->codec.h264._8x8_transform = ctrl->val;
1341 break;
1342 case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:
1343 p->codec.h264.rc_mb = ctrl->val;
1344 break;
1345 case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
1346 p->codec.h264.rc_frame_qp = ctrl->val;
1347 break;
1348 case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
1349 p->codec.h264.rc_min_qp = ctrl->val;
1350 break;
1351 case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
1352 p->codec.h264.rc_max_qp = ctrl->val;
1353 break;
1354 case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
1355 p->codec.h264.rc_p_frame_qp = ctrl->val;
1356 break;
1357 case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:
1358 p->codec.h264.rc_b_frame_qp = ctrl->val;
1359 break;
1360 case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP:
1361 case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP:
1362 p->codec.mpeg4.rc_frame_qp = ctrl->val;
1363 break;
1364 case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP:
1365 case V4L2_CID_MPEG_VIDEO_H263_MIN_QP:
1366 p->codec.mpeg4.rc_min_qp = ctrl->val;
1367 break;
1368 case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP:
1369 case V4L2_CID_MPEG_VIDEO_H263_MAX_QP:
1370 p->codec.mpeg4.rc_max_qp = ctrl->val;
1371 break;
1372 case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP:
1373 case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP:
1374 p->codec.mpeg4.rc_p_frame_qp = ctrl->val;
1375 break;
1376 case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP:
1377 case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP:
1378 p->codec.mpeg4.rc_b_frame_qp = ctrl->val;
1379 break;
1380 case V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK:
1381 p->codec.h264.rc_mb_dark = ctrl->val;
1382 break;
1383 case V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH:
1384 p->codec.h264.rc_mb_smooth = ctrl->val;
1385 break;
1386 case V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC:
1387 p->codec.h264.rc_mb_static = ctrl->val;
1388 break;
1389 case V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY:
1390 p->codec.h264.rc_mb_activity = ctrl->val;
1391 break;
1392 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
1393 p->codec.h264.vui_sar = ctrl->val;
1394 break;
1395 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
1396 p->codec.h264.vui_sar_idc = vui_sar_idc(ctrl->val);
1397 break;
1398 case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH:
1399 p->codec.h264.vui_ext_sar_width = ctrl->val;
1400 break;
1401 case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT:
1402 p->codec.h264.vui_ext_sar_height = ctrl->val;
1403 break;
1404 case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE:
1405 p->codec.h264.open_gop = !ctrl->val;
1406 break;
1407 case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD:
1408 p->codec.h264.open_gop_size = ctrl->val;
1409 break;
1410 case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
1411 switch (ctrl->val) {
1412 case V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE:
1413 p->codec.mpeg4.profile =
1414 S5P_FIMV_ENC_PROFILE_MPEG4_SIMPLE;
1415 break;
1416 case V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE:
1417 p->codec.mpeg4.profile =
1418 S5P_FIMV_ENC_PROFILE_MPEG4_ADVANCED_SIMPLE;
1419 break;
1420 default:
1421 ret = -EINVAL;
1422 }
1423 break;
1424 case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL:
1425 p->codec.mpeg4.quarter_pixel = ctrl->val;
1426 break;
1427 default:
1428 v4l2_err(&dev->v4l2_dev, "Invalid control, id=%d, val=%d\n",
1429 ctrl->id, ctrl->val);
1430 ret = -EINVAL;
1431 }
1432 return ret;
1433}
1434
1435static const struct v4l2_ctrl_ops s5p_mfc_enc_ctrl_ops = {
1436 .s_ctrl = s5p_mfc_enc_s_ctrl,
1437};
1438
1439int vidioc_s_parm(struct file *file, void *priv, struct v4l2_streamparm *a)
1440{
1441 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
1442
1443 if (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1444 ctx->enc_params.rc_framerate_num =
1445 a->parm.output.timeperframe.denominator;
1446 ctx->enc_params.rc_framerate_denom =
1447 a->parm.output.timeperframe.numerator;
1448 } else {
1449 mfc_err("Setting FPS is only possible for the output queue\n");
1450 return -EINVAL;
1451 }
1452 return 0;
1453}
1454
1455int vidioc_g_parm(struct file *file, void *priv, struct v4l2_streamparm *a)
1456{
1457 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
1458
1459 if (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
1460 a->parm.output.timeperframe.denominator =
1461 ctx->enc_params.rc_framerate_num;
1462 a->parm.output.timeperframe.numerator =
1463 ctx->enc_params.rc_framerate_denom;
1464 } else {
1465 mfc_err("Setting FPS is only possible for the output queue\n");
1466 return -EINVAL;
1467 }
1468 return 0;
1469}
1470
1471static const struct v4l2_ioctl_ops s5p_mfc_enc_ioctl_ops = {
1472 .vidioc_querycap = vidioc_querycap,
1473 .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
1474 .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap_mplane,
1475 .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
1476 .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out_mplane,
1477 .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt,
1478 .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt,
1479 .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt,
1480 .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt,
1481 .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt,
1482 .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt,
1483 .vidioc_reqbufs = vidioc_reqbufs,
1484 .vidioc_querybuf = vidioc_querybuf,
1485 .vidioc_qbuf = vidioc_qbuf,
1486 .vidioc_dqbuf = vidioc_dqbuf,
1487 .vidioc_streamon = vidioc_streamon,
1488 .vidioc_streamoff = vidioc_streamoff,
1489 .vidioc_s_parm = vidioc_s_parm,
1490 .vidioc_g_parm = vidioc_g_parm,
1491};
1492
1493static int check_vb_with_fmt(struct s5p_mfc_fmt *fmt, struct vb2_buffer *vb)
1494{
1495 int i;
1496
1497 if (!fmt)
1498 return -EINVAL;
1499 if (fmt->num_planes != vb->num_planes) {
1500 mfc_err("invalid plane number for the format\n");
1501 return -EINVAL;
1502 }
1503 for (i = 0; i < fmt->num_planes; i++) {
1504 if (!vb2_dma_contig_plane_paddr(vb, i)) {
1505 mfc_err("failed to get plane cookie\n");
1506 return -EINVAL;
1507 }
1508 mfc_debug(2, "index: %d, plane[%d] cookie: 0x%08zx",
1509 vb->v4l2_buf.index, i,
1510 vb2_dma_contig_plane_paddr(vb, i));
1511 }
1512 return 0;
1513}
1514
1515static int s5p_mfc_queue_setup(struct vb2_queue *vq,
1516 unsigned int *buf_count, unsigned int *plane_count,
1517 unsigned long psize[], void *allocators[])
1518{
1519 struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
1520
1521 if (ctx->state != MFCINST_GOT_INST) {
1522 mfc_err("inavlid state: %d\n", ctx->state);
1523 return -EINVAL;
1524 }
1525 if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
1526 if (ctx->dst_fmt)
1527 *plane_count = ctx->dst_fmt->num_planes;
1528 else
1529 *plane_count = MFC_ENC_CAP_PLANE_COUNT;
1530 if (*buf_count < 1)
1531 *buf_count = 1;
1532 if (*buf_count > MFC_MAX_BUFFERS)
1533 *buf_count = MFC_MAX_BUFFERS;
1534 psize[0] = ctx->enc_dst_buf_size;
1535 allocators[0] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
1536 } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1537 if (ctx->src_fmt)
1538 *plane_count = ctx->src_fmt->num_planes;
1539 else
1540 *plane_count = MFC_ENC_OUT_PLANE_COUNT;
1541
1542 if (*buf_count < 1)
1543 *buf_count = 1;
1544 if (*buf_count > MFC_MAX_BUFFERS)
1545 *buf_count = MFC_MAX_BUFFERS;
1546 psize[0] = ctx->luma_size;
1547 psize[1] = ctx->chroma_size;
1548 allocators[0] = ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX];
1549 allocators[1] = ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX];
1550 } else {
1551 mfc_err("inavlid queue type: %d\n", vq->type);
1552 return -EINVAL;
1553 }
1554 return 0;
1555}
1556
1557static void s5p_mfc_unlock(struct vb2_queue *q)
1558{
1559 struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
1560 struct s5p_mfc_dev *dev = ctx->dev;
1561
1562 mutex_unlock(&dev->mfc_mutex);
1563}
1564
1565static void s5p_mfc_lock(struct vb2_queue *q)
1566{
1567 struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
1568 struct s5p_mfc_dev *dev = ctx->dev;
1569
1570 mutex_lock(&dev->mfc_mutex);
1571}
1572
1573static int s5p_mfc_buf_init(struct vb2_buffer *vb)
1574{
1575 struct vb2_queue *vq = vb->vb2_queue;
1576 struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
1577 unsigned int i;
1578 int ret;
1579
1580 if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
1581 ret = check_vb_with_fmt(ctx->dst_fmt, vb);
1582 if (ret < 0)
1583 return ret;
1584 i = vb->v4l2_buf.index;
1585 ctx->dst_bufs[i].b = vb;
1586 ctx->dst_bufs[i].cookie.stream =
1587 vb2_dma_contig_plane_paddr(vb, 0);
1588 ctx->dst_bufs_cnt++;
1589 } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1590 ret = check_vb_with_fmt(ctx->src_fmt, vb);
1591 if (ret < 0)
1592 return ret;
1593 i = vb->v4l2_buf.index;
1594 ctx->src_bufs[i].b = vb;
1595 ctx->src_bufs[i].cookie.raw.luma =
1596 vb2_dma_contig_plane_paddr(vb, 0);
1597 ctx->src_bufs[i].cookie.raw.chroma =
1598 vb2_dma_contig_plane_paddr(vb, 1);
1599 ctx->src_bufs_cnt++;
1600 } else {
1601 mfc_err("inavlid queue type: %d\n", vq->type);
1602 return -EINVAL;
1603 }
1604 return 0;
1605}
1606
1607static int s5p_mfc_buf_prepare(struct vb2_buffer *vb)
1608{
1609 struct vb2_queue *vq = vb->vb2_queue;
1610 struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
1611 int ret;
1612
1613 if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
1614 ret = check_vb_with_fmt(ctx->dst_fmt, vb);
1615 if (ret < 0)
1616 return ret;
1617 mfc_debug(2, "plane size: %ld, dst size: %d\n",
1618 vb2_plane_size(vb, 0), ctx->enc_dst_buf_size);
1619 if (vb2_plane_size(vb, 0) < ctx->enc_dst_buf_size) {
1620 mfc_err("plane size is too small for capture\n");
1621 return -EINVAL;
1622 }
1623 } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1624 ret = check_vb_with_fmt(ctx->src_fmt, vb);
1625 if (ret < 0)
1626 return ret;
1627 mfc_debug(2, "plane size: %ld, luma size: %d\n",
1628 vb2_plane_size(vb, 0), ctx->luma_size);
1629 mfc_debug(2, "plane size: %ld, chroma size: %d\n",
1630 vb2_plane_size(vb, 1), ctx->chroma_size);
1631 if (vb2_plane_size(vb, 0) < ctx->luma_size ||
1632 vb2_plane_size(vb, 1) < ctx->chroma_size) {
1633 mfc_err("plane size is too small for output\n");
1634 return -EINVAL;
1635 }
1636 } else {
1637 mfc_err("inavlid queue type: %d\n", vq->type);
1638 return -EINVAL;
1639 }
1640 return 0;
1641}
1642
1643static int s5p_mfc_start_streaming(struct vb2_queue *q)
1644{
1645 struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
1646 struct s5p_mfc_dev *dev = ctx->dev;
1647 unsigned long flags;
1648
1649 v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
1650 /* If context is ready then dev = work->data;schedule it to run */
1651 if (s5p_mfc_ctx_ready(ctx)) {
1652 spin_lock_irqsave(&dev->condlock, flags);
1653 set_bit(ctx->num, &dev->ctx_work_bits);
1654 spin_unlock_irqrestore(&dev->condlock, flags);
1655 }
1656 s5p_mfc_try_run(dev);
1657 return 0;
1658}
1659
1660static int s5p_mfc_stop_streaming(struct vb2_queue *q)
1661{
1662 unsigned long flags;
1663 struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
1664 struct s5p_mfc_dev *dev = ctx->dev;
1665
1666 if ((ctx->state == MFCINST_FINISHING ||
1667 ctx->state == MFCINST_RUNNING) &&
1668 dev->curr_ctx == ctx->num && dev->hw_lock) {
1669 ctx->state = MFCINST_ABORT;
1670 s5p_mfc_wait_for_done_ctx(ctx, S5P_FIMV_R2H_CMD_FRAME_DONE_RET,
1671 0);
1672 }
1673 ctx->state = MFCINST_FINISHED;
1674 spin_lock_irqsave(&dev->irqlock, flags);
1675 if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
1676 s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
1677 INIT_LIST_HEAD(&ctx->dst_queue);
1678 ctx->dst_queue_cnt = 0;
1679 }
1680 if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1681 cleanup_ref_queue(ctx);
1682 s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
1683 INIT_LIST_HEAD(&ctx->src_queue);
1684 ctx->src_queue_cnt = 0;
1685 }
1686 spin_unlock_irqrestore(&dev->irqlock, flags);
1687 return 0;
1688}
1689
1690static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
1691{
1692 struct vb2_queue *vq = vb->vb2_queue;
1693 struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
1694 struct s5p_mfc_dev *dev = ctx->dev;
1695 unsigned long flags;
1696 struct s5p_mfc_buf *mfc_buf;
1697
1698 if (ctx->state == MFCINST_ERROR) {
1699 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
1700 cleanup_ref_queue(ctx);
1701 return;
1702 }
1703 if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
1704 mfc_buf = &ctx->dst_bufs[vb->v4l2_buf.index];
1705 mfc_buf->used = 0;
1706 /* Mark destination as available for use by MFC */
1707 spin_lock_irqsave(&dev->irqlock, flags);
1708 list_add_tail(&mfc_buf->list, &ctx->dst_queue);
1709 ctx->dst_queue_cnt++;
1710 spin_unlock_irqrestore(&dev->irqlock, flags);
1711 } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1712 mfc_buf = &ctx->src_bufs[vb->v4l2_buf.index];
1713 mfc_buf->used = 0;
1714 spin_lock_irqsave(&dev->irqlock, flags);
1715 if (vb->v4l2_planes[0].bytesused == 0) {
1716 mfc_debug(1, "change state to FINISHING\n");
1717 ctx->state = MFCINST_FINISHING;
1718 vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
1719 cleanup_ref_queue(ctx);
1720 } else {
1721 list_add_tail(&mfc_buf->list, &ctx->src_queue);
1722 ctx->src_queue_cnt++;
1723 }
1724 spin_unlock_irqrestore(&dev->irqlock, flags);
1725 } else {
1726 mfc_err("unsupported buffer type (%d)\n", vq->type);
1727 }
1728 if (s5p_mfc_ctx_ready(ctx)) {
1729 spin_lock_irqsave(&dev->condlock, flags);
1730 set_bit(ctx->num, &dev->ctx_work_bits);
1731 spin_unlock_irqrestore(&dev->condlock, flags);
1732 }
1733 s5p_mfc_try_run(dev);
1734}
1735
1736static struct vb2_ops s5p_mfc_enc_qops = {
1737 .queue_setup = s5p_mfc_queue_setup,
1738 .wait_prepare = s5p_mfc_unlock,
1739 .wait_finish = s5p_mfc_lock,
1740 .buf_init = s5p_mfc_buf_init,
1741 .buf_prepare = s5p_mfc_buf_prepare,
1742 .start_streaming = s5p_mfc_start_streaming,
1743 .stop_streaming = s5p_mfc_stop_streaming,
1744 .buf_queue = s5p_mfc_buf_queue,
1745};
1746
1747struct s5p_mfc_codec_ops *get_enc_codec_ops(void)
1748{
1749 return &encoder_codec_ops;
1750}
1751
1752struct vb2_ops *get_enc_queue_ops(void)
1753{
1754 return &s5p_mfc_enc_qops;
1755}
1756
1757const struct v4l2_ioctl_ops *get_enc_v4l2_ioctl_ops(void)
1758{
1759 return &s5p_mfc_enc_ioctl_ops;
1760}
1761
1762#define IS_MFC51_PRIV(x) ((V4L2_CTRL_ID2CLASS(x) == V4L2_CTRL_CLASS_MPEG) \
1763 && V4L2_CTRL_DRIVER_PRIV(x))
1764
1765int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx)
1766{
1767 struct v4l2_ctrl_config cfg;
1768 int i;
1769
1770 v4l2_ctrl_handler_init(&ctx->ctrl_handler, NUM_CTRLS);
1771 if (ctx->ctrl_handler.error) {
1772 mfc_err("v4l2_ctrl_handler_init failed\n");
1773 return ctx->ctrl_handler.error;
1774 }
1775 for (i = 0; i < NUM_CTRLS; i++) {
1776 if (IS_MFC51_PRIV(controls[i].id)) {
1777 cfg.ops = &s5p_mfc_enc_ctrl_ops;
1778 cfg.id = controls[i].id;
1779 cfg.min = controls[i].minimum;
1780 cfg.max = controls[i].maximum;
1781 cfg.def = controls[i].default_value;
1782 cfg.name = controls[i].name;
1783 cfg.type = controls[i].type;
1784 cfg.flags = 0;
1785
1786 if (cfg.type == V4L2_CTRL_TYPE_MENU) {
1787 cfg.step = 0;
1788 cfg.menu_skip_mask = cfg.menu_skip_mask;
1789 cfg.qmenu = mfc51_get_menu(cfg.id);
1790 } else {
1791 cfg.step = controls[i].step;
1792 cfg.menu_skip_mask = 0;
1793 }
1794 ctx->ctrls[i] = v4l2_ctrl_new_custom(&ctx->ctrl_handler,
1795 &cfg, NULL);
1796 } else {
1797 if (controls[i].type == V4L2_CTRL_TYPE_MENU) {
1798 ctx->ctrls[i] = v4l2_ctrl_new_std_menu(
1799 &ctx->ctrl_handler,
1800 &s5p_mfc_enc_ctrl_ops, controls[i].id,
1801 controls[i].maximum, 0,
1802 controls[i].default_value);
1803 } else {
1804 ctx->ctrls[i] = v4l2_ctrl_new_std(
1805 &ctx->ctrl_handler,
1806 &s5p_mfc_enc_ctrl_ops, controls[i].id,
1807 controls[i].minimum,
1808 controls[i].maximum, controls[i].step,
1809 controls[i].default_value);
1810 }
1811 }
1812 if (ctx->ctrl_handler.error) {
1813 mfc_err("Adding control (%d) failed\n", i);
1814 return ctx->ctrl_handler.error;
1815 }
1816 if (controls[i].is_volatile && ctx->ctrls[i])
1817 ctx->ctrls[i]->is_volatile = 1;
1818 }
1819 return 0;
1820}
1821
1822void s5p_mfc_enc_ctrls_delete(struct s5p_mfc_ctx *ctx)
1823{
1824 int i;
1825
1826 v4l2_ctrl_handler_free(&ctx->ctrl_handler);
1827 for (i = 0; i < NUM_CTRLS; i++)
1828 ctx->ctrls[i] = NULL;
1829}
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.h b/drivers/media/video/s5p-mfc/s5p_mfc_enc.h
new file mode 100644
index 000000000000..405bdd3ee083
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.h
@@ -0,0 +1,23 @@
1/*
2 * linux/drivers/media/video/s5p-mfc/s5p_mfc_enc.h
3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#ifndef S5P_MFC_ENC_H_
14#define S5P_MFC_ENC_H_
15
16struct s5p_mfc_codec_ops *get_enc_codec_ops(void);
17struct vb2_ops *get_enc_queue_ops(void);
18const struct v4l2_ioctl_ops *get_enc_v4l2_ioctl_ops(void);
19struct s5p_mfc_fmt *get_enc_def_fmt(bool src);
20int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx);
21void s5p_mfc_enc_ctrls_delete(struct s5p_mfc_ctx *ctx);
22
23#endif /* S5P_MFC_ENC_H_ */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_intr.c b/drivers/media/video/s5p-mfc/s5p_mfc_intr.c
new file mode 100644
index 000000000000..8f2f8bf4da7f
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_intr.c
@@ -0,0 +1,92 @@
1/*
2 * drivers/media/video/samsung/mfc5/s5p_mfc_intr.c
3 *
4 * C file for Samsung MFC (Multi Function Codec - FIMV) driver
5 * This file contains functions used to wait for command completion.
6 *
7 * Kamil Debski, Copyright (C) 2011 Samsung Electronics Co., Ltd.
8 * http://www.samsung.com/
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/delay.h>
16#include <linux/errno.h>
17#include <linux/io.h>
18#include <linux/sched.h>
19#include <linux/wait.h>
20#include "regs-mfc.h"
21#include "s5p_mfc_common.h"
22#include "s5p_mfc_debug.h"
23#include "s5p_mfc_intr.h"
24
25int s5p_mfc_wait_for_done_dev(struct s5p_mfc_dev *dev, int command)
26{
27 int ret;
28
29 ret = wait_event_interruptible_timeout(dev->queue,
30 (dev->int_cond && (dev->int_type == command
31 || dev->int_type == S5P_FIMV_R2H_CMD_ERR_RET)),
32 msecs_to_jiffies(MFC_INT_TIMEOUT));
33 if (ret == 0) {
34 mfc_err("Interrupt (dev->int_type:%d, command:%d) timed out\n",
35 dev->int_type, command);
36 return 1;
37 } else if (ret == -ERESTARTSYS) {
38 mfc_err("Interrupted by a signal\n");
39 return 1;
40 }
41 mfc_debug(1, "Finished waiting (dev->int_type:%d, command: %d)\n",
42 dev->int_type, command);
43 if (dev->int_type == S5P_FIMV_R2H_CMD_ERR_RET)
44 return 1;
45 return 0;
46}
47
48void s5p_mfc_clean_dev_int_flags(struct s5p_mfc_dev *dev)
49{
50 dev->int_cond = 0;
51 dev->int_type = 0;
52 dev->int_err = 0;
53}
54
55int s5p_mfc_wait_for_done_ctx(struct s5p_mfc_ctx *ctx,
56 int command, int interrupt)
57{
58 int ret;
59
60 if (interrupt) {
61 ret = wait_event_interruptible_timeout(ctx->queue,
62 (ctx->int_cond && (ctx->int_type == command
63 || ctx->int_type == S5P_FIMV_R2H_CMD_ERR_RET)),
64 msecs_to_jiffies(MFC_INT_TIMEOUT));
65 } else {
66 ret = wait_event_timeout(ctx->queue,
67 (ctx->int_cond && (ctx->int_type == command
68 || ctx->int_type == S5P_FIMV_R2H_CMD_ERR_RET)),
69 msecs_to_jiffies(MFC_INT_TIMEOUT));
70 }
71 if (ret == 0) {
72 mfc_err("Interrupt (ctx->int_type:%d, command:%d) timed out\n",
73 ctx->int_type, command);
74 return 1;
75 } else if (ret == -ERESTARTSYS) {
76 mfc_err("Interrupted by a signal\n");
77 return 1;
78 }
79 mfc_debug(1, "Finished waiting (ctx->int_type:%d, command: %d)\n",
80 ctx->int_type, command);
81 if (ctx->int_type == S5P_FIMV_R2H_CMD_ERR_RET)
82 return 1;
83 return 0;
84}
85
86void s5p_mfc_clean_ctx_int_flags(struct s5p_mfc_ctx *ctx)
87{
88 ctx->int_cond = 0;
89 ctx->int_type = 0;
90 ctx->int_err = 0;
91}
92
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_intr.h b/drivers/media/video/s5p-mfc/s5p_mfc_intr.h
new file mode 100644
index 000000000000..122d7732f745
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_intr.h
@@ -0,0 +1,26 @@
1/*
2 * drivers/media/video/samsung/mfc5/s5p_mfc_intr.h
3 *
4 * Header file for Samsung MFC (Multi Function Codec - FIMV) driver
5 * It contains waiting functions declarations.
6 *
7 * Kamil Debski, Copyright (C) 2011 Samsung Electronics
8 * http://www.samsung.com/
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#ifndef S5P_MFC_INTR_H_
16#define S5P_MFC_INTR_H_
17
18#include "s5p_mfc_common.h"
19
20int s5p_mfc_wait_for_done_ctx(struct s5p_mfc_ctx *ctx,
21 int command, int interrupt);
22int s5p_mfc_wait_for_done_dev(struct s5p_mfc_dev *dev, int command);
23void s5p_mfc_clean_ctx_int_flags(struct s5p_mfc_ctx *ctx);
24void s5p_mfc_clean_dev_int_flags(struct s5p_mfc_dev *dev);
25
26#endif /* S5P_MFC_INTR_H_ */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_opr.c b/drivers/media/video/s5p-mfc/s5p_mfc_opr.c
new file mode 100644
index 000000000000..7b239168c199
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_opr.c
@@ -0,0 +1,1397 @@
1/*
2 * drivers/media/video/samsung/mfc5/s5p_mfc_opr.c
3 *
4 * Samsung MFC (Multi Function Codec - FIMV) driver
5 * This file contains hw related functions.
6 *
7 * Kamil Debski, Copyright (c) 2011 Samsung Electronics
8 * http://www.samsung.com/
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include "regs-mfc.h"
16#include "s5p_mfc_cmd.h"
17#include "s5p_mfc_common.h"
18#include "s5p_mfc_ctrl.h"
19#include "s5p_mfc_debug.h"
20#include "s5p_mfc_intr.h"
21#include "s5p_mfc_opr.h"
22#include "s5p_mfc_pm.h"
23#include "s5p_mfc_shm.h"
24#include <asm/cacheflush.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/err.h>
28#include <linux/firmware.h>
29#include <linux/io.h>
30#include <linux/jiffies.h>
31#include <linux/mm.h>
32#include <linux/sched.h>
33
34#define OFFSETA(x) (((x) - dev->bank1) >> MFC_OFFSET_SHIFT)
35#define OFFSETB(x) (((x) - dev->bank2) >> MFC_OFFSET_SHIFT)
36
37/* Allocate temporary buffers for decoding */
38int s5p_mfc_alloc_dec_temp_buffers(struct s5p_mfc_ctx *ctx)
39{
40 void *desc_virt;
41 struct s5p_mfc_dev *dev = ctx->dev;
42
43 ctx->desc_buf = vb2_dma_contig_memops.alloc(
44 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], DESC_BUF_SIZE);
45 if (IS_ERR_VALUE((int)ctx->desc_buf)) {
46 ctx->desc_buf = 0;
47 mfc_err("Allocating DESC buffer failed\n");
48 return -ENOMEM;
49 }
50 ctx->desc_phys = s5p_mfc_mem_cookie(
51 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->desc_buf);
52 BUG_ON(ctx->desc_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
53 desc_virt = vb2_dma_contig_memops.vaddr(ctx->desc_buf);
54 if (desc_virt == NULL) {
55 vb2_dma_contig_memops.put(ctx->desc_buf);
56 ctx->desc_phys = 0;
57 ctx->desc_buf = 0;
58 mfc_err("Remapping DESC buffer failed\n");
59 return -ENOMEM;
60 }
61 memset(desc_virt, 0, DESC_BUF_SIZE);
62 wmb();
63 return 0;
64}
65
66/* Release temporary buffers for decoding */
67void s5p_mfc_release_dec_desc_buffer(struct s5p_mfc_ctx *ctx)
68{
69 if (ctx->desc_phys) {
70 vb2_dma_contig_memops.put(ctx->desc_buf);
71 ctx->desc_phys = 0;
72 ctx->desc_buf = 0;
73 }
74}
75
76/* Allocate codec buffers */
77int s5p_mfc_alloc_codec_buffers(struct s5p_mfc_ctx *ctx)
78{
79 struct s5p_mfc_dev *dev = ctx->dev;
80 unsigned int enc_ref_y_size = 0;
81 unsigned int enc_ref_c_size = 0;
82 unsigned int guard_width, guard_height;
83
84 if (ctx->type == MFCINST_DECODER) {
85 mfc_debug(2, "Luma size:%d Chroma size:%d MV size:%d\n",
86 ctx->luma_size, ctx->chroma_size, ctx->mv_size);
87 mfc_debug(2, "Totals bufs: %d\n", ctx->total_dpb_count);
88 } else if (ctx->type == MFCINST_ENCODER) {
89 enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
90 * ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN);
91 enc_ref_y_size = ALIGN(enc_ref_y_size, S5P_FIMV_NV12MT_SALIGN);
92
93 if (ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC) {
94 enc_ref_c_size = ALIGN(ctx->img_width,
95 S5P_FIMV_NV12MT_HALIGN)
96 * ALIGN(ctx->img_height >> 1,
97 S5P_FIMV_NV12MT_VALIGN);
98 enc_ref_c_size = ALIGN(enc_ref_c_size,
99 S5P_FIMV_NV12MT_SALIGN);
100 } else {
101 guard_width = ALIGN(ctx->img_width + 16,
102 S5P_FIMV_NV12MT_HALIGN);
103 guard_height = ALIGN((ctx->img_height >> 1) + 4,
104 S5P_FIMV_NV12MT_VALIGN);
105 enc_ref_c_size = ALIGN(guard_width * guard_height,
106 S5P_FIMV_NV12MT_SALIGN);
107 }
108 mfc_debug(2, "recon luma size: %d chroma size: %d\n",
109 enc_ref_y_size, enc_ref_c_size);
110 } else {
111 return -EINVAL;
112 }
113 /* Codecs have different memory requirements */
114 switch (ctx->codec_mode) {
115 case S5P_FIMV_CODEC_H264_DEC:
116 ctx->bank1_size =
117 ALIGN(S5P_FIMV_DEC_NB_IP_SIZE +
118 S5P_FIMV_DEC_VERT_NB_MV_SIZE,
119 S5P_FIMV_DEC_BUF_ALIGN);
120 ctx->bank2_size = ctx->total_dpb_count * ctx->mv_size;
121 break;
122 case S5P_FIMV_CODEC_MPEG4_DEC:
123 ctx->bank1_size =
124 ALIGN(S5P_FIMV_DEC_NB_DCAC_SIZE +
125 S5P_FIMV_DEC_UPNB_MV_SIZE +
126 S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
127 S5P_FIMV_DEC_STX_PARSER_SIZE +
128 S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE,
129 S5P_FIMV_DEC_BUF_ALIGN);
130 ctx->bank2_size = 0;
131 break;
132 case S5P_FIMV_CODEC_VC1RCV_DEC:
133 case S5P_FIMV_CODEC_VC1_DEC:
134 ctx->bank1_size =
135 ALIGN(S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE +
136 S5P_FIMV_DEC_UPNB_MV_SIZE +
137 S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
138 S5P_FIMV_DEC_NB_DCAC_SIZE +
139 3 * S5P_FIMV_DEC_VC1_BITPLANE_SIZE,
140 S5P_FIMV_DEC_BUF_ALIGN);
141 ctx->bank2_size = 0;
142 break;
143 case S5P_FIMV_CODEC_MPEG2_DEC:
144 ctx->bank1_size = 0;
145 ctx->bank2_size = 0;
146 break;
147 case S5P_FIMV_CODEC_H263_DEC:
148 ctx->bank1_size =
149 ALIGN(S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE +
150 S5P_FIMV_DEC_UPNB_MV_SIZE +
151 S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
152 S5P_FIMV_DEC_NB_DCAC_SIZE,
153 S5P_FIMV_DEC_BUF_ALIGN);
154 ctx->bank2_size = 0;
155 break;
156 case S5P_FIMV_CODEC_H264_ENC:
157 ctx->bank1_size = (enc_ref_y_size * 2) +
158 S5P_FIMV_ENC_UPMV_SIZE +
159 S5P_FIMV_ENC_COLFLG_SIZE +
160 S5P_FIMV_ENC_INTRAMD_SIZE +
161 S5P_FIMV_ENC_NBORINFO_SIZE;
162 ctx->bank2_size = (enc_ref_y_size * 2) +
163 (enc_ref_c_size * 4) +
164 S5P_FIMV_ENC_INTRAPRED_SIZE;
165 break;
166 case S5P_FIMV_CODEC_MPEG4_ENC:
167 ctx->bank1_size = (enc_ref_y_size * 2) +
168 S5P_FIMV_ENC_UPMV_SIZE +
169 S5P_FIMV_ENC_COLFLG_SIZE +
170 S5P_FIMV_ENC_ACDCCOEF_SIZE;
171 ctx->bank2_size = (enc_ref_y_size * 2) +
172 (enc_ref_c_size * 4);
173 break;
174 case S5P_FIMV_CODEC_H263_ENC:
175 ctx->bank1_size = (enc_ref_y_size * 2) +
176 S5P_FIMV_ENC_UPMV_SIZE +
177 S5P_FIMV_ENC_ACDCCOEF_SIZE;
178 ctx->bank2_size = (enc_ref_y_size * 2) +
179 (enc_ref_c_size * 4);
180 break;
181 default:
182 break;
183 }
184 /* Allocate only if memory from bank 1 is necessary */
185 if (ctx->bank1_size > 0) {
186 ctx->bank1_buf = vb2_dma_contig_memops.alloc(
187 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_size);
188 if (IS_ERR(ctx->bank1_buf)) {
189 ctx->bank1_buf = 0;
190 printk(KERN_ERR
191 "Buf alloc for decoding failed (port A)\n");
192 return -ENOMEM;
193 }
194 ctx->bank1_phys = s5p_mfc_mem_cookie(
195 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_buf);
196 BUG_ON(ctx->bank1_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
197 }
198 /* Allocate only if memory from bank 2 is necessary */
199 if (ctx->bank2_size > 0) {
200 ctx->bank2_buf = vb2_dma_contig_memops.alloc(
201 dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], ctx->bank2_size);
202 if (IS_ERR(ctx->bank2_buf)) {
203 ctx->bank2_buf = 0;
204 mfc_err("Buf alloc for decoding failed (port B)\n");
205 return -ENOMEM;
206 }
207 ctx->bank2_phys = s5p_mfc_mem_cookie(
208 dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], ctx->bank2_buf);
209 BUG_ON(ctx->bank2_phys & ((1 << MFC_BANK2_ALIGN_ORDER) - 1));
210 }
211 return 0;
212}
213
214/* Release buffers allocated for codec */
215void s5p_mfc_release_codec_buffers(struct s5p_mfc_ctx *ctx)
216{
217 if (ctx->bank1_buf) {
218 vb2_dma_contig_memops.put(ctx->bank1_buf);
219 ctx->bank1_buf = 0;
220 ctx->bank1_phys = 0;
221 ctx->bank1_size = 0;
222 }
223 if (ctx->bank2_buf) {
224 vb2_dma_contig_memops.put(ctx->bank2_buf);
225 ctx->bank2_buf = 0;
226 ctx->bank2_phys = 0;
227 ctx->bank2_size = 0;
228 }
229}
230
231/* Allocate memory for instance data buffer */
232int s5p_mfc_alloc_instance_buffer(struct s5p_mfc_ctx *ctx)
233{
234 void *context_virt;
235 struct s5p_mfc_dev *dev = ctx->dev;
236
237 if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC ||
238 ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC)
239 ctx->ctx_size = MFC_H264_CTX_BUF_SIZE;
240 else
241 ctx->ctx_size = MFC_CTX_BUF_SIZE;
242 ctx->ctx_buf = vb2_dma_contig_memops.alloc(
243 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx_size);
244 if (IS_ERR(ctx->ctx_buf)) {
245 mfc_err("Allocating context buffer failed\n");
246 ctx->ctx_phys = 0;
247 ctx->ctx_buf = 0;
248 return -ENOMEM;
249 }
250 ctx->ctx_phys = s5p_mfc_mem_cookie(
251 dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx_buf);
252 BUG_ON(ctx->ctx_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
253 ctx->ctx_ofs = OFFSETA(ctx->ctx_phys);
254 context_virt = vb2_dma_contig_memops.vaddr(ctx->ctx_buf);
255 if (context_virt == NULL) {
256 mfc_err("Remapping instance buffer failed\n");
257 vb2_dma_contig_memops.put(ctx->ctx_buf);
258 ctx->ctx_phys = 0;
259 ctx->ctx_buf = 0;
260 return -ENOMEM;
261 }
262 /* Zero content of the allocated memory */
263 memset(context_virt, 0, ctx->ctx_size);
264 wmb();
265 if (s5p_mfc_init_shm(ctx) < 0) {
266 vb2_dma_contig_memops.put(ctx->ctx_buf);
267 ctx->ctx_phys = 0;
268 ctx->ctx_buf = 0;
269 return -ENOMEM;
270 }
271 return 0;
272}
273
274/* Release instance buffer */
275void s5p_mfc_release_instance_buffer(struct s5p_mfc_ctx *ctx)
276{
277 if (ctx->ctx_buf) {
278 vb2_dma_contig_memops.put(ctx->ctx_buf);
279 ctx->ctx_phys = 0;
280 ctx->ctx_buf = 0;
281 }
282 if (ctx->shm_alloc) {
283 vb2_dma_contig_memops.put(ctx->shm_alloc);
284 ctx->shm_alloc = 0;
285 ctx->shm = 0;
286 }
287}
288
289/* Set registers for decoding temporary buffers */
290void s5p_mfc_set_dec_desc_buffer(struct s5p_mfc_ctx *ctx)
291{
292 struct s5p_mfc_dev *dev = ctx->dev;
293
294 mfc_write(dev, OFFSETA(ctx->desc_phys), S5P_FIMV_SI_CH0_DESC_ADR);
295 mfc_write(dev, DESC_BUF_SIZE, S5P_FIMV_SI_CH0_DESC_SIZE);
296}
297
298/* Set registers for shared buffer */
299void s5p_mfc_set_shared_buffer(struct s5p_mfc_ctx *ctx)
300{
301 struct s5p_mfc_dev *dev = ctx->dev;
302 mfc_write(dev, ctx->shm_ofs, S5P_FIMV_SI_CH0_HOST_WR_ADR);
303}
304
305/* Set registers for decoding stream buffer */
306int s5p_mfc_set_dec_stream_buffer(struct s5p_mfc_ctx *ctx, int buf_addr,
307 unsigned int start_num_byte, unsigned int buf_size)
308{
309 struct s5p_mfc_dev *dev = ctx->dev;
310
311 mfc_write(dev, OFFSETA(buf_addr), S5P_FIMV_SI_CH0_SB_ST_ADR);
312 mfc_write(dev, ctx->dec_src_buf_size, S5P_FIMV_SI_CH0_CPB_SIZE);
313 mfc_write(dev, buf_size, S5P_FIMV_SI_CH0_SB_FRM_SIZE);
314 s5p_mfc_write_shm(ctx, start_num_byte, START_BYTE_NUM);
315 return 0;
316}
317
318/* Set decoding frame buffer */
319int s5p_mfc_set_dec_frame_buffer(struct s5p_mfc_ctx *ctx)
320{
321 unsigned int frame_size, i;
322 unsigned int frame_size_ch, frame_size_mv;
323 struct s5p_mfc_dev *dev = ctx->dev;
324 unsigned int dpb;
325 size_t buf_addr1, buf_addr2;
326 int buf_size1, buf_size2;
327
328 buf_addr1 = ctx->bank1_phys;
329 buf_size1 = ctx->bank1_size;
330 buf_addr2 = ctx->bank2_phys;
331 buf_size2 = ctx->bank2_size;
332 dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) &
333 ~S5P_FIMV_DPB_COUNT_MASK;
334 mfc_write(dev, ctx->total_dpb_count | dpb,
335 S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
336 s5p_mfc_set_shared_buffer(ctx);
337 switch (ctx->codec_mode) {
338 case S5P_FIMV_CODEC_H264_DEC:
339 mfc_write(dev, OFFSETA(buf_addr1),
340 S5P_FIMV_H264_VERT_NB_MV_ADR);
341 buf_addr1 += S5P_FIMV_DEC_VERT_NB_MV_SIZE;
342 buf_size1 -= S5P_FIMV_DEC_VERT_NB_MV_SIZE;
343 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_NB_IP_ADR);
344 buf_addr1 += S5P_FIMV_DEC_NB_IP_SIZE;
345 buf_size1 -= S5P_FIMV_DEC_NB_IP_SIZE;
346 break;
347 case S5P_FIMV_CODEC_MPEG4_DEC:
348 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_NB_DCAC_ADR);
349 buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
350 buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
351 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_UP_NB_MV_ADR);
352 buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
353 buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
354 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_SA_MV_ADR);
355 buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
356 buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
357 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_SP_ADR);
358 buf_addr1 += S5P_FIMV_DEC_STX_PARSER_SIZE;
359 buf_size1 -= S5P_FIMV_DEC_STX_PARSER_SIZE;
360 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_OT_LINE_ADR);
361 buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
362 buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
363 break;
364 case S5P_FIMV_CODEC_H263_DEC:
365 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_OT_LINE_ADR);
366 buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
367 buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
368 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_UP_NB_MV_ADR);
369 buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
370 buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
371 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_SA_MV_ADR);
372 buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
373 buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
374 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_NB_DCAC_ADR);
375 buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
376 buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
377 break;
378 case S5P_FIMV_CODEC_VC1_DEC:
379 case S5P_FIMV_CODEC_VC1RCV_DEC:
380 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_NB_DCAC_ADR);
381 buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
382 buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
383 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_OT_LINE_ADR);
384 buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
385 buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
386 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_UP_NB_MV_ADR);
387 buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
388 buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
389 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_SA_MV_ADR);
390 buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
391 buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
392 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE3_ADR);
393 buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
394 buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
395 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE2_ADR);
396 buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
397 buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
398 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE1_ADR);
399 buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
400 buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
401 break;
402 case S5P_FIMV_CODEC_MPEG2_DEC:
403 break;
404 default:
405 mfc_err("Unknown codec for decoding (%x)\n",
406 ctx->codec_mode);
407 return -EINVAL;
408 break;
409 }
410 frame_size = ctx->luma_size;
411 frame_size_ch = ctx->chroma_size;
412 frame_size_mv = ctx->mv_size;
413 mfc_debug(2, "Frm size: %d ch: %d mv: %d\n", frame_size, frame_size_ch,
414 frame_size_mv);
415 for (i = 0; i < ctx->total_dpb_count; i++) {
416 /* Bank2 */
417 mfc_debug(2, "Luma %d: %x\n", i,
418 ctx->dst_bufs[i].cookie.raw.luma);
419 mfc_write(dev, OFFSETB(ctx->dst_bufs[i].cookie.raw.luma),
420 S5P_FIMV_DEC_LUMA_ADR + i * 4);
421 mfc_debug(2, "\tChroma %d: %x\n", i,
422 ctx->dst_bufs[i].cookie.raw.chroma);
423 mfc_write(dev, OFFSETA(ctx->dst_bufs[i].cookie.raw.chroma),
424 S5P_FIMV_DEC_CHROMA_ADR + i * 4);
425 if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC) {
426 mfc_debug(2, "\tBuf2: %x, size: %d\n",
427 buf_addr2, buf_size2);
428 mfc_write(dev, OFFSETB(buf_addr2),
429 S5P_FIMV_H264_MV_ADR + i * 4);
430 buf_addr2 += frame_size_mv;
431 buf_size2 -= frame_size_mv;
432 }
433 }
434 mfc_debug(2, "Buf1: %u, buf_size1: %d\n", buf_addr1, buf_size1);
435 mfc_debug(2, "Buf 1/2 size after: %d/%d (frames %d)\n",
436 buf_size1, buf_size2, ctx->total_dpb_count);
437 if (buf_size1 < 0 || buf_size2 < 0) {
438 mfc_debug(2, "Not enough memory has been allocated\n");
439 return -ENOMEM;
440 }
441 s5p_mfc_write_shm(ctx, frame_size, ALLOC_LUMA_DPB_SIZE);
442 s5p_mfc_write_shm(ctx, frame_size_ch, ALLOC_CHROMA_DPB_SIZE);
443 if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC)
444 s5p_mfc_write_shm(ctx, frame_size_mv, ALLOC_MV_SIZE);
445 mfc_write(dev, ((S5P_FIMV_CH_INIT_BUFS & S5P_FIMV_CH_MASK)
446 << S5P_FIMV_CH_SHIFT) | (ctx->inst_no),
447 S5P_FIMV_SI_CH0_INST_ID);
448 return 0;
449}
450
451/* Set registers for encoding stream buffer */
452int s5p_mfc_set_enc_stream_buffer(struct s5p_mfc_ctx *ctx,
453 unsigned long addr, unsigned int size)
454{
455 struct s5p_mfc_dev *dev = ctx->dev;
456
457 mfc_write(dev, OFFSETA(addr), S5P_FIMV_ENC_SI_CH0_SB_ADR);
458 mfc_write(dev, size, S5P_FIMV_ENC_SI_CH0_SB_SIZE);
459 return 0;
460}
461
462void s5p_mfc_set_enc_frame_buffer(struct s5p_mfc_ctx *ctx,
463 unsigned long y_addr, unsigned long c_addr)
464{
465 struct s5p_mfc_dev *dev = ctx->dev;
466
467 mfc_write(dev, OFFSETB(y_addr), S5P_FIMV_ENC_SI_CH0_CUR_Y_ADR);
468 mfc_write(dev, OFFSETB(c_addr), S5P_FIMV_ENC_SI_CH0_CUR_C_ADR);
469}
470
471void s5p_mfc_get_enc_frame_buffer(struct s5p_mfc_ctx *ctx,
472 unsigned long *y_addr, unsigned long *c_addr)
473{
474 struct s5p_mfc_dev *dev = ctx->dev;
475
476 *y_addr = dev->bank2 + (mfc_read(dev, S5P_FIMV_ENCODED_Y_ADDR)
477 << MFC_OFFSET_SHIFT);
478 *c_addr = dev->bank2 + (mfc_read(dev, S5P_FIMV_ENCODED_C_ADDR)
479 << MFC_OFFSET_SHIFT);
480}
481
482/* Set encoding ref & codec buffer */
483int s5p_mfc_set_enc_ref_buffer(struct s5p_mfc_ctx *ctx)
484{
485 struct s5p_mfc_dev *dev = ctx->dev;
486 size_t buf_addr1, buf_addr2;
487 size_t buf_size1, buf_size2;
488 unsigned int enc_ref_y_size, enc_ref_c_size;
489 unsigned int guard_width, guard_height;
490 int i;
491
492 buf_addr1 = ctx->bank1_phys;
493 buf_size1 = ctx->bank1_size;
494 buf_addr2 = ctx->bank2_phys;
495 buf_size2 = ctx->bank2_size;
496 enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
497 * ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN);
498 enc_ref_y_size = ALIGN(enc_ref_y_size, S5P_FIMV_NV12MT_SALIGN);
499 if (ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC) {
500 enc_ref_c_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
501 * ALIGN((ctx->img_height >> 1), S5P_FIMV_NV12MT_VALIGN);
502 enc_ref_c_size = ALIGN(enc_ref_c_size, S5P_FIMV_NV12MT_SALIGN);
503 } else {
504 guard_width = ALIGN(ctx->img_width + 16,
505 S5P_FIMV_NV12MT_HALIGN);
506 guard_height = ALIGN((ctx->img_height >> 1) + 4,
507 S5P_FIMV_NV12MT_VALIGN);
508 enc_ref_c_size = ALIGN(guard_width * guard_height,
509 S5P_FIMV_NV12MT_SALIGN);
510 }
511 mfc_debug(2, "buf_size1: %d, buf_size2: %d\n", buf_size1, buf_size2);
512 switch (ctx->codec_mode) {
513 case S5P_FIMV_CODEC_H264_ENC:
514 for (i = 0; i < 2; i++) {
515 mfc_write(dev, OFFSETA(buf_addr1),
516 S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
517 buf_addr1 += enc_ref_y_size;
518 buf_size1 -= enc_ref_y_size;
519
520 mfc_write(dev, OFFSETB(buf_addr2),
521 S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
522 buf_addr2 += enc_ref_y_size;
523 buf_size2 -= enc_ref_y_size;
524 }
525 for (i = 0; i < 4; i++) {
526 mfc_write(dev, OFFSETB(buf_addr2),
527 S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
528 buf_addr2 += enc_ref_c_size;
529 buf_size2 -= enc_ref_c_size;
530 }
531 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_UP_MV_ADR);
532 buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
533 buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
534 mfc_write(dev, OFFSETA(buf_addr1),
535 S5P_FIMV_H264_COZERO_FLAG_ADR);
536 buf_addr1 += S5P_FIMV_ENC_COLFLG_SIZE;
537 buf_size1 -= S5P_FIMV_ENC_COLFLG_SIZE;
538 mfc_write(dev, OFFSETA(buf_addr1),
539 S5P_FIMV_H264_UP_INTRA_MD_ADR);
540 buf_addr1 += S5P_FIMV_ENC_INTRAMD_SIZE;
541 buf_size1 -= S5P_FIMV_ENC_INTRAMD_SIZE;
542 mfc_write(dev, OFFSETB(buf_addr2),
543 S5P_FIMV_H264_UP_INTRA_PRED_ADR);
544 buf_addr2 += S5P_FIMV_ENC_INTRAPRED_SIZE;
545 buf_size2 -= S5P_FIMV_ENC_INTRAPRED_SIZE;
546 mfc_write(dev, OFFSETA(buf_addr1),
547 S5P_FIMV_H264_NBOR_INFO_ADR);
548 buf_addr1 += S5P_FIMV_ENC_NBORINFO_SIZE;
549 buf_size1 -= S5P_FIMV_ENC_NBORINFO_SIZE;
550 mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
551 buf_size1, buf_size2);
552 break;
553 case S5P_FIMV_CODEC_MPEG4_ENC:
554 for (i = 0; i < 2; i++) {
555 mfc_write(dev, OFFSETA(buf_addr1),
556 S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
557 buf_addr1 += enc_ref_y_size;
558 buf_size1 -= enc_ref_y_size;
559 mfc_write(dev, OFFSETB(buf_addr2),
560 S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
561 buf_addr2 += enc_ref_y_size;
562 buf_size2 -= enc_ref_y_size;
563 }
564 for (i = 0; i < 4; i++) {
565 mfc_write(dev, OFFSETB(buf_addr2),
566 S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
567 buf_addr2 += enc_ref_c_size;
568 buf_size2 -= enc_ref_c_size;
569 }
570 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_UP_MV_ADR);
571 buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
572 buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
573 mfc_write(dev, OFFSETA(buf_addr1),
574 S5P_FIMV_MPEG4_COZERO_FLAG_ADR);
575 buf_addr1 += S5P_FIMV_ENC_COLFLG_SIZE;
576 buf_size1 -= S5P_FIMV_ENC_COLFLG_SIZE;
577 mfc_write(dev, OFFSETA(buf_addr1),
578 S5P_FIMV_MPEG4_ACDC_COEF_ADR);
579 buf_addr1 += S5P_FIMV_ENC_ACDCCOEF_SIZE;
580 buf_size1 -= S5P_FIMV_ENC_ACDCCOEF_SIZE;
581 mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
582 buf_size1, buf_size2);
583 break;
584 case S5P_FIMV_CODEC_H263_ENC:
585 for (i = 0; i < 2; i++) {
586 mfc_write(dev, OFFSETA(buf_addr1),
587 S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
588 buf_addr1 += enc_ref_y_size;
589 buf_size1 -= enc_ref_y_size;
590 mfc_write(dev, OFFSETB(buf_addr2),
591 S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
592 buf_addr2 += enc_ref_y_size;
593 buf_size2 -= enc_ref_y_size;
594 }
595 for (i = 0; i < 4; i++) {
596 mfc_write(dev, OFFSETB(buf_addr2),
597 S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
598 buf_addr2 += enc_ref_c_size;
599 buf_size2 -= enc_ref_c_size;
600 }
601 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_UP_MV_ADR);
602 buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
603 buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
604 mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_ACDC_COEF_ADR);
605 buf_addr1 += S5P_FIMV_ENC_ACDCCOEF_SIZE;
606 buf_size1 -= S5P_FIMV_ENC_ACDCCOEF_SIZE;
607 mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
608 buf_size1, buf_size2);
609 break;
610 default:
611 mfc_err("Unknown codec set for encoding: %d\n",
612 ctx->codec_mode);
613 return -EINVAL;
614 }
615 return 0;
616}
617
618static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx)
619{
620 struct s5p_mfc_dev *dev = ctx->dev;
621 struct s5p_mfc_enc_params *p = &ctx->enc_params;
622 unsigned int reg;
623 unsigned int shm;
624
625 /* width */
626 mfc_write(dev, ctx->img_width, S5P_FIMV_ENC_HSIZE_PX);
627 /* height */
628 mfc_write(dev, ctx->img_height, S5P_FIMV_ENC_VSIZE_PX);
629 /* pictype : enable, IDR period */
630 reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL);
631 reg |= (1 << 18);
632 reg &= ~(0xFFFF);
633 reg |= p->gop_size;
634 mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL);
635 mfc_write(dev, 0, S5P_FIMV_ENC_B_RECON_WRITE_ON);
636 /* multi-slice control */
637 /* multi-slice MB number or bit size */
638 mfc_write(dev, p->slice_mode, S5P_FIMV_ENC_MSLICE_CTRL);
639 if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) {
640 mfc_write(dev, p->slice_mb, S5P_FIMV_ENC_MSLICE_MB);
641 } else if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) {
642 mfc_write(dev, p->slice_bit, S5P_FIMV_ENC_MSLICE_BIT);
643 } else {
644 mfc_write(dev, 0, S5P_FIMV_ENC_MSLICE_MB);
645 mfc_write(dev, 0, S5P_FIMV_ENC_MSLICE_BIT);
646 }
647 /* cyclic intra refresh */
648 mfc_write(dev, p->intra_refresh_mb, S5P_FIMV_ENC_CIR_CTRL);
649 /* memory structure cur. frame */
650 if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M)
651 mfc_write(dev, 0, S5P_FIMV_ENC_MAP_FOR_CUR);
652 else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT)
653 mfc_write(dev, 3, S5P_FIMV_ENC_MAP_FOR_CUR);
654 /* padding control & value */
655 reg = mfc_read(dev, S5P_FIMV_ENC_PADDING_CTRL);
656 if (p->pad) {
657 /** enable */
658 reg |= (1 << 31);
659 /** cr value */
660 reg &= ~(0xFF << 16);
661 reg |= (p->pad_cr << 16);
662 /** cb value */
663 reg &= ~(0xFF << 8);
664 reg |= (p->pad_cb << 8);
665 /** y value */
666 reg &= ~(0xFF);
667 reg |= (p->pad_luma);
668 } else {
669 /** disable & all value clear */
670 reg = 0;
671 }
672 mfc_write(dev, reg, S5P_FIMV_ENC_PADDING_CTRL);
673 /* rate control config. */
674 reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
675 /** frame-level rate control */
676 reg &= ~(0x1 << 9);
677 reg |= (p->rc_frame << 9);
678 mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
679 /* bit rate */
680 if (p->rc_frame)
681 mfc_write(dev, p->rc_bitrate,
682 S5P_FIMV_ENC_RC_BIT_RATE);
683 else
684 mfc_write(dev, 0, S5P_FIMV_ENC_RC_BIT_RATE);
685 /* reaction coefficient */
686 if (p->rc_frame)
687 mfc_write(dev, p->rc_reaction_coeff, S5P_FIMV_ENC_RC_RPARA);
688 shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
689 /* seq header ctrl */
690 shm &= ~(0x1 << 3);
691 shm |= (p->seq_hdr_mode << 3);
692 /* frame skip mode */
693 shm &= ~(0x3 << 1);
694 shm |= (p->frame_skip_mode << 1);
695 s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
696 /* fixed target bit */
697 s5p_mfc_write_shm(ctx, p->fixed_target_bit, RC_CONTROL_CONFIG);
698 return 0;
699}
700
701static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
702{
703 struct s5p_mfc_dev *dev = ctx->dev;
704 struct s5p_mfc_enc_params *p = &ctx->enc_params;
705 struct s5p_mfc_h264_enc_params *p_264 = &p->codec.h264;
706 unsigned int reg;
707 unsigned int shm;
708
709 s5p_mfc_set_enc_params(ctx);
710 /* pictype : number of B */
711 reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL);
712 /* num_b_frame - 0 ~ 2 */
713 reg &= ~(0x3 << 16);
714 reg |= (p->num_b_frame << 16);
715 mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL);
716 /* profile & level */
717 reg = mfc_read(dev, S5P_FIMV_ENC_PROFILE);
718 /* level */
719 reg &= ~(0xFF << 8);
720 reg |= (p_264->level << 8);
721 /* profile - 0 ~ 2 */
722 reg &= ~(0x3F);
723 reg |= p_264->profile;
724 mfc_write(dev, reg, S5P_FIMV_ENC_PROFILE);
725 /* interlace */
726 mfc_write(dev, p->interlace, S5P_FIMV_ENC_PIC_STRUCT);
727 /* height */
728 if (p->interlace)
729 mfc_write(dev, ctx->img_height >> 1, S5P_FIMV_ENC_VSIZE_PX);
730 /* loopfilter ctrl */
731 mfc_write(dev, p_264->loop_filter_mode, S5P_FIMV_ENC_LF_CTRL);
732 /* loopfilter alpha offset */
733 if (p_264->loop_filter_alpha < 0) {
734 reg = 0x10;
735 reg |= (0xFF - p_264->loop_filter_alpha) + 1;
736 } else {
737 reg = 0x00;
738 reg |= (p_264->loop_filter_alpha & 0xF);
739 }
740 mfc_write(dev, reg, S5P_FIMV_ENC_ALPHA_OFF);
741 /* loopfilter beta offset */
742 if (p_264->loop_filter_beta < 0) {
743 reg = 0x10;
744 reg |= (0xFF - p_264->loop_filter_beta) + 1;
745 } else {
746 reg = 0x00;
747 reg |= (p_264->loop_filter_beta & 0xF);
748 }
749 mfc_write(dev, reg, S5P_FIMV_ENC_BETA_OFF);
750 /* entropy coding mode */
751 if (p_264->entropy_mode == V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC)
752 mfc_write(dev, 1, S5P_FIMV_ENC_H264_ENTROPY_MODE);
753 else
754 mfc_write(dev, 0, S5P_FIMV_ENC_H264_ENTROPY_MODE);
755 /* number of ref. picture */
756 reg = mfc_read(dev, S5P_FIMV_ENC_H264_NUM_OF_REF);
757 /* num of ref. pictures of P */
758 reg &= ~(0x3 << 5);
759 reg |= (p_264->num_ref_pic_4p << 5);
760 /* max number of ref. pictures */
761 reg &= ~(0x1F);
762 reg |= p_264->max_ref_pic;
763 mfc_write(dev, reg, S5P_FIMV_ENC_H264_NUM_OF_REF);
764 /* 8x8 transform enable */
765 mfc_write(dev, p_264->_8x8_transform, S5P_FIMV_ENC_H264_TRANS_FLAG);
766 /* rate control config. */
767 reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
768 /* macroblock level rate control */
769 reg &= ~(0x1 << 8);
770 reg |= (p_264->rc_mb << 8);
771 /* frame QP */
772 reg &= ~(0x3F);
773 reg |= p_264->rc_frame_qp;
774 mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
775 /* frame rate */
776 if (p->rc_frame && p->rc_framerate_denom)
777 mfc_write(dev, p->rc_framerate_num * 1000
778 / p->rc_framerate_denom, S5P_FIMV_ENC_RC_FRAME_RATE);
779 else
780 mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE);
781 /* max & min value of QP */
782 reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
783 /* max QP */
784 reg &= ~(0x3F << 8);
785 reg |= (p_264->rc_max_qp << 8);
786 /* min QP */
787 reg &= ~(0x3F);
788 reg |= p_264->rc_min_qp;
789 mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
790 /* macroblock adaptive scaling features */
791 if (p_264->rc_mb) {
792 reg = mfc_read(dev, S5P_FIMV_ENC_RC_MB_CTRL);
793 /* dark region */
794 reg &= ~(0x1 << 3);
795 reg |= (p_264->rc_mb_dark << 3);
796 /* smooth region */
797 reg &= ~(0x1 << 2);
798 reg |= (p_264->rc_mb_smooth << 2);
799 /* static region */
800 reg &= ~(0x1 << 1);
801 reg |= (p_264->rc_mb_static << 1);
802 /* high activity region */
803 reg &= ~(0x1);
804 reg |= p_264->rc_mb_activity;
805 mfc_write(dev, reg, S5P_FIMV_ENC_RC_MB_CTRL);
806 }
807 if (!p->rc_frame &&
808 !p_264->rc_mb) {
809 shm = s5p_mfc_read_shm(ctx, P_B_FRAME_QP);
810 shm &= ~(0xFFF);
811 shm |= ((p_264->rc_b_frame_qp & 0x3F) << 6);
812 shm |= (p_264->rc_p_frame_qp & 0x3F);
813 s5p_mfc_write_shm(ctx, shm, P_B_FRAME_QP);
814 }
815 /* extended encoder ctrl */
816 shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
817 /* AR VUI control */
818 shm &= ~(0x1 << 15);
819 shm |= (p_264->vui_sar << 1);
820 s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
821 if (p_264->vui_sar) {
822 /* aspect ration IDC */
823 shm = s5p_mfc_read_shm(ctx, SAMPLE_ASPECT_RATIO_IDC);
824 shm &= ~(0xFF);
825 shm |= p_264->vui_sar_idc;
826 s5p_mfc_write_shm(ctx, shm, SAMPLE_ASPECT_RATIO_IDC);
827 if (p_264->vui_sar_idc == 0xFF) {
828 /* sample AR info */
829 shm = s5p_mfc_read_shm(ctx, EXTENDED_SAR);
830 shm &= ~(0xFFFFFFFF);
831 shm |= p_264->vui_ext_sar_width << 16;
832 shm |= p_264->vui_ext_sar_height;
833 s5p_mfc_write_shm(ctx, shm, EXTENDED_SAR);
834 }
835 }
836 /* intra picture period for H.264 */
837 shm = s5p_mfc_read_shm(ctx, H264_I_PERIOD);
838 /* control */
839 shm &= ~(0x1 << 16);
840 shm |= (p_264->open_gop << 16);
841 /* value */
842 if (p_264->open_gop) {
843 shm &= ~(0xFFFF);
844 shm |= p_264->open_gop_size;
845 }
846 s5p_mfc_write_shm(ctx, shm, H264_I_PERIOD);
847 /* extended encoder ctrl */
848 shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
849 /* vbv buffer size */
850 if (p->frame_skip_mode ==
851 V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
852 shm &= ~(0xFFFF << 16);
853 shm |= (p_264->cpb_size << 16);
854 }
855 s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
856 return 0;
857}
858
859static int s5p_mfc_set_enc_params_mpeg4(struct s5p_mfc_ctx *ctx)
860{
861 struct s5p_mfc_dev *dev = ctx->dev;
862 struct s5p_mfc_enc_params *p = &ctx->enc_params;
863 struct s5p_mfc_mpeg4_enc_params *p_mpeg4 = &p->codec.mpeg4;
864 unsigned int reg;
865 unsigned int shm;
866 unsigned int framerate;
867
868 s5p_mfc_set_enc_params(ctx);
869 /* pictype : number of B */
870 reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL);
871 /* num_b_frame - 0 ~ 2 */
872 reg &= ~(0x3 << 16);
873 reg |= (p->num_b_frame << 16);
874 mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL);
875 /* profile & level */
876 reg = mfc_read(dev, S5P_FIMV_ENC_PROFILE);
877 /* level */
878 reg &= ~(0xFF << 8);
879 reg |= (p_mpeg4->level << 8);
880 /* profile - 0 ~ 2 */
881 reg &= ~(0x3F);
882 reg |= p_mpeg4->profile;
883 mfc_write(dev, reg, S5P_FIMV_ENC_PROFILE);
884 /* quarter_pixel */
885 mfc_write(dev, p_mpeg4->quarter_pixel, S5P_FIMV_ENC_MPEG4_QUART_PXL);
886 /* qp */
887 if (!p->rc_frame) {
888 shm = s5p_mfc_read_shm(ctx, P_B_FRAME_QP);
889 shm &= ~(0xFFF);
890 shm |= ((p_mpeg4->rc_b_frame_qp & 0x3F) << 6);
891 shm |= (p_mpeg4->rc_p_frame_qp & 0x3F);
892 s5p_mfc_write_shm(ctx, shm, P_B_FRAME_QP);
893 }
894 /* frame rate */
895 if (p->rc_frame) {
896 if (p->rc_framerate_denom > 0) {
897 framerate = p->rc_framerate_num * 1000 /
898 p->rc_framerate_denom;
899 mfc_write(dev, framerate,
900 S5P_FIMV_ENC_RC_FRAME_RATE);
901 shm = s5p_mfc_read_shm(ctx, RC_VOP_TIMING);
902 shm &= ~(0xFFFFFFFF);
903 shm |= (1 << 31);
904 shm |= ((p->rc_framerate_num & 0x7FFF) << 16);
905 shm |= (p->rc_framerate_denom & 0xFFFF);
906 s5p_mfc_write_shm(ctx, shm, RC_VOP_TIMING);
907 }
908 } else {
909 mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE);
910 }
911 /* rate control config. */
912 reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
913 /* frame QP */
914 reg &= ~(0x3F);
915 reg |= p_mpeg4->rc_frame_qp;
916 mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
917 /* max & min value of QP */
918 reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
919 /* max QP */
920 reg &= ~(0x3F << 8);
921 reg |= (p_mpeg4->rc_max_qp << 8);
922 /* min QP */
923 reg &= ~(0x3F);
924 reg |= p_mpeg4->rc_min_qp;
925 mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
926 /* extended encoder ctrl */
927 shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
928 /* vbv buffer size */
929 if (p->frame_skip_mode ==
930 V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
931 shm &= ~(0xFFFF << 16);
932 shm |= (p->vbv_size << 16);
933 }
934 s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
935 return 0;
936}
937
938static int s5p_mfc_set_enc_params_h263(struct s5p_mfc_ctx *ctx)
939{
940 struct s5p_mfc_dev *dev = ctx->dev;
941 struct s5p_mfc_enc_params *p = &ctx->enc_params;
942 struct s5p_mfc_mpeg4_enc_params *p_h263 = &p->codec.mpeg4;
943 unsigned int reg;
944 unsigned int shm;
945
946 s5p_mfc_set_enc_params(ctx);
947 /* qp */
948 if (!p->rc_frame) {
949 shm = s5p_mfc_read_shm(ctx, P_B_FRAME_QP);
950 shm &= ~(0xFFF);
951 shm |= (p_h263->rc_p_frame_qp & 0x3F);
952 s5p_mfc_write_shm(ctx, shm, P_B_FRAME_QP);
953 }
954 /* frame rate */
955 if (p->rc_frame && p->rc_framerate_denom)
956 mfc_write(dev, p->rc_framerate_num * 1000
957 / p->rc_framerate_denom, S5P_FIMV_ENC_RC_FRAME_RATE);
958 else
959 mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE);
960 /* rate control config. */
961 reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
962 /* frame QP */
963 reg &= ~(0x3F);
964 reg |= p_h263->rc_frame_qp;
965 mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
966 /* max & min value of QP */
967 reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
968 /* max QP */
969 reg &= ~(0x3F << 8);
970 reg |= (p_h263->rc_max_qp << 8);
971 /* min QP */
972 reg &= ~(0x3F);
973 reg |= p_h263->rc_min_qp;
974 mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
975 /* extended encoder ctrl */
976 shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
977 /* vbv buffer size */
978 if (p->frame_skip_mode ==
979 V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
980 shm &= ~(0xFFFF << 16);
981 shm |= (p->vbv_size << 16);
982 }
983 s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
984 return 0;
985}
986
987/* Initialize decoding */
988int s5p_mfc_init_decode(struct s5p_mfc_ctx *ctx)
989{
990 struct s5p_mfc_dev *dev = ctx->dev;
991
992 s5p_mfc_set_shared_buffer(ctx);
993 /* Setup loop filter, for decoding this is only valid for MPEG4 */
994 if (ctx->codec_mode == S5P_FIMV_CODEC_MPEG4_DEC)
995 mfc_write(dev, ctx->loop_filter_mpeg4, S5P_FIMV_ENC_LF_CTRL);
996 else
997 mfc_write(dev, 0, S5P_FIMV_ENC_LF_CTRL);
998 mfc_write(dev, ((ctx->slice_interface & S5P_FIMV_SLICE_INT_MASK) <<
999 S5P_FIMV_SLICE_INT_SHIFT) | (ctx->display_delay_enable <<
1000 S5P_FIMV_DDELAY_ENA_SHIFT) | ((ctx->display_delay &
1001 S5P_FIMV_DDELAY_VAL_MASK) << S5P_FIMV_DDELAY_VAL_SHIFT),
1002 S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
1003 mfc_write(dev,
1004 ((S5P_FIMV_CH_SEQ_HEADER & S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT)
1005 | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
1006 return 0;
1007}
1008
1009static void s5p_mfc_set_flush(struct s5p_mfc_ctx *ctx, int flush)
1010{
1011 struct s5p_mfc_dev *dev = ctx->dev;
1012 unsigned int dpb;
1013
1014 if (flush)
1015 dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) | (
1016 S5P_FIMV_DPB_FLUSH_MASK << S5P_FIMV_DPB_FLUSH_SHIFT);
1017 else
1018 dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) &
1019 ~(S5P_FIMV_DPB_FLUSH_MASK << S5P_FIMV_DPB_FLUSH_SHIFT);
1020 mfc_write(dev, dpb, S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
1021}
1022
1023/* Decode a single frame */
1024int s5p_mfc_decode_one_frame(struct s5p_mfc_ctx *ctx,
1025 enum s5p_mfc_decode_arg last_frame)
1026{
1027 struct s5p_mfc_dev *dev = ctx->dev;
1028
1029 mfc_write(dev, ctx->dec_dst_flag, S5P_FIMV_SI_CH0_RELEASE_BUF);
1030 s5p_mfc_set_shared_buffer(ctx);
1031 s5p_mfc_set_flush(ctx, ctx->dpb_flush_flag);
1032 /* Issue different commands to instance basing on whether it
1033 * is the last frame or not. */
1034 switch (last_frame) {
1035 case MFC_DEC_FRAME:
1036 mfc_write(dev, ((S5P_FIMV_CH_FRAME_START & S5P_FIMV_CH_MASK) <<
1037 S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
1038 break;
1039 case MFC_DEC_LAST_FRAME:
1040 mfc_write(dev, ((S5P_FIMV_CH_LAST_FRAME & S5P_FIMV_CH_MASK) <<
1041 S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
1042 break;
1043 case MFC_DEC_RES_CHANGE:
1044 mfc_write(dev, ((S5P_FIMV_CH_FRAME_START_REALLOC &
1045 S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT) | (ctx->inst_no),
1046 S5P_FIMV_SI_CH0_INST_ID);
1047 break;
1048 }
1049 mfc_debug(2, "Decoding a usual frame\n");
1050 return 0;
1051}
1052
1053int s5p_mfc_init_encode(struct s5p_mfc_ctx *ctx)
1054{
1055 struct s5p_mfc_dev *dev = ctx->dev;
1056
1057 if (ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC)
1058 s5p_mfc_set_enc_params_h264(ctx);
1059 else if (ctx->codec_mode == S5P_FIMV_CODEC_MPEG4_ENC)
1060 s5p_mfc_set_enc_params_mpeg4(ctx);
1061 else if (ctx->codec_mode == S5P_FIMV_CODEC_H263_ENC)
1062 s5p_mfc_set_enc_params_h263(ctx);
1063 else {
1064 mfc_err("Unknown codec for encoding (%x)\n",
1065 ctx->codec_mode);
1066 return -EINVAL;
1067 }
1068 s5p_mfc_set_shared_buffer(ctx);
1069 mfc_write(dev, ((S5P_FIMV_CH_SEQ_HEADER << 16) & 0x70000) |
1070 (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
1071 return 0;
1072}
1073
1074/* Encode a single frame */
1075int s5p_mfc_encode_one_frame(struct s5p_mfc_ctx *ctx)
1076{
1077 struct s5p_mfc_dev *dev = ctx->dev;
1078 /* memory structure cur. frame */
1079 if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M)
1080 mfc_write(dev, 0, S5P_FIMV_ENC_MAP_FOR_CUR);
1081 else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT)
1082 mfc_write(dev, 3, S5P_FIMV_ENC_MAP_FOR_CUR);
1083 s5p_mfc_set_shared_buffer(ctx);
1084 mfc_write(dev, (S5P_FIMV_CH_FRAME_START << 16 & 0x70000) |
1085 (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
1086 return 0;
1087}
1088
1089static int s5p_mfc_get_new_ctx(struct s5p_mfc_dev *dev)
1090{
1091 unsigned long flags;
1092 int new_ctx;
1093 int cnt;
1094
1095 spin_lock_irqsave(&dev->condlock, flags);
1096 new_ctx = (dev->curr_ctx + 1) % MFC_NUM_CONTEXTS;
1097 cnt = 0;
1098 while (!test_bit(new_ctx, &dev->ctx_work_bits)) {
1099 new_ctx = (new_ctx + 1) % MFC_NUM_CONTEXTS;
1100 if (++cnt > MFC_NUM_CONTEXTS) {
1101 /* No contexts to run */
1102 spin_unlock_irqrestore(&dev->condlock, flags);
1103 return -EAGAIN;
1104 }
1105 }
1106 spin_unlock_irqrestore(&dev->condlock, flags);
1107 return new_ctx;
1108}
1109
1110static void s5p_mfc_run_res_change(struct s5p_mfc_ctx *ctx)
1111{
1112 struct s5p_mfc_dev *dev = ctx->dev;
1113
1114 s5p_mfc_set_dec_stream_buffer(ctx, 0, 0, 0);
1115 dev->curr_ctx = ctx->num;
1116 s5p_mfc_clean_ctx_int_flags(ctx);
1117 s5p_mfc_decode_one_frame(ctx, MFC_DEC_RES_CHANGE);
1118}
1119
1120static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame)
1121{
1122 struct s5p_mfc_dev *dev = ctx->dev;
1123 struct s5p_mfc_buf *temp_vb;
1124 unsigned long flags;
1125 unsigned int index;
1126
1127 spin_lock_irqsave(&dev->irqlock, flags);
1128 /* Frames are being decoded */
1129 if (list_empty(&ctx->src_queue)) {
1130 mfc_debug(2, "No src buffers\n");
1131 spin_unlock_irqrestore(&dev->irqlock, flags);
1132 return -EAGAIN;
1133 }
1134 /* Get the next source buffer */
1135 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1136 temp_vb->used = 1;
1137 s5p_mfc_set_dec_stream_buffer(ctx,
1138 vb2_dma_contig_plane_paddr(temp_vb->b, 0), ctx->consumed_stream,
1139 temp_vb->b->v4l2_planes[0].bytesused);
1140 spin_unlock_irqrestore(&dev->irqlock, flags);
1141 index = temp_vb->b->v4l2_buf.index;
1142 dev->curr_ctx = ctx->num;
1143 s5p_mfc_clean_ctx_int_flags(ctx);
1144 if (temp_vb->b->v4l2_planes[0].bytesused == 0) {
1145 last_frame = MFC_DEC_LAST_FRAME;
1146 mfc_debug(2, "Setting ctx->state to FINISHING\n");
1147 ctx->state = MFCINST_FINISHING;
1148 }
1149 s5p_mfc_decode_one_frame(ctx, last_frame);
1150 return 0;
1151}
1152
1153static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
1154{
1155 struct s5p_mfc_dev *dev = ctx->dev;
1156 unsigned long flags;
1157 struct s5p_mfc_buf *dst_mb;
1158 struct s5p_mfc_buf *src_mb;
1159 unsigned long src_y_addr, src_c_addr, dst_addr;
1160 unsigned int dst_size;
1161
1162 spin_lock_irqsave(&dev->irqlock, flags);
1163 if (list_empty(&ctx->src_queue)) {
1164 mfc_debug(2, "no src buffers\n");
1165 spin_unlock_irqrestore(&dev->irqlock, flags);
1166 return -EAGAIN;
1167 }
1168 if (list_empty(&ctx->dst_queue)) {
1169 mfc_debug(2, "no dst buffers\n");
1170 spin_unlock_irqrestore(&dev->irqlock, flags);
1171 return -EAGAIN;
1172 }
1173 src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1174 src_mb->used = 1;
1175 src_y_addr = vb2_dma_contig_plane_paddr(src_mb->b, 0);
1176 src_c_addr = vb2_dma_contig_plane_paddr(src_mb->b, 1);
1177 s5p_mfc_set_enc_frame_buffer(ctx, src_y_addr, src_c_addr);
1178 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
1179 dst_mb->used = 1;
1180 dst_addr = vb2_dma_contig_plane_paddr(dst_mb->b, 0);
1181 dst_size = vb2_plane_size(dst_mb->b, 0);
1182 s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
1183 spin_unlock_irqrestore(&dev->irqlock, flags);
1184 dev->curr_ctx = ctx->num;
1185 s5p_mfc_clean_ctx_int_flags(ctx);
1186 s5p_mfc_encode_one_frame(ctx);
1187 return 0;
1188}
1189
1190static void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx)
1191{
1192 struct s5p_mfc_dev *dev = ctx->dev;
1193 unsigned long flags;
1194 struct s5p_mfc_buf *temp_vb;
1195
1196 /* Initializing decoding - parsing header */
1197 spin_lock_irqsave(&dev->irqlock, flags);
1198 mfc_debug(2, "Preparing to init decoding\n");
1199 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1200 s5p_mfc_set_dec_desc_buffer(ctx);
1201 mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
1202 s5p_mfc_set_dec_stream_buffer(ctx,
1203 vb2_dma_contig_plane_paddr(temp_vb->b, 0),
1204 0, temp_vb->b->v4l2_planes[0].bytesused);
1205 spin_unlock_irqrestore(&dev->irqlock, flags);
1206 dev->curr_ctx = ctx->num;
1207 s5p_mfc_clean_ctx_int_flags(ctx);
1208 s5p_mfc_init_decode(ctx);
1209}
1210
1211static void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx)
1212{
1213 struct s5p_mfc_dev *dev = ctx->dev;
1214 unsigned long flags;
1215 struct s5p_mfc_buf *dst_mb;
1216 unsigned long dst_addr;
1217 unsigned int dst_size;
1218
1219 s5p_mfc_set_enc_ref_buffer(ctx);
1220 spin_lock_irqsave(&dev->irqlock, flags);
1221 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
1222 dst_addr = vb2_dma_contig_plane_paddr(dst_mb->b, 0);
1223 dst_size = vb2_plane_size(dst_mb->b, 0);
1224 s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
1225 spin_unlock_irqrestore(&dev->irqlock, flags);
1226 dev->curr_ctx = ctx->num;
1227 s5p_mfc_clean_ctx_int_flags(ctx);
1228 s5p_mfc_init_encode(ctx);
1229}
1230
1231static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
1232{
1233 struct s5p_mfc_dev *dev = ctx->dev;
1234 unsigned long flags;
1235 struct s5p_mfc_buf *temp_vb;
1236 int ret;
1237
1238 /*
1239 * Header was parsed now starting processing
1240 * First set the output frame buffers
1241 */
1242 if (ctx->capture_state != QUEUE_BUFS_MMAPED) {
1243 mfc_err("It seems that not all destionation buffers were "
1244 "mmaped\nMFC requires that all destination are mmaped "
1245 "before starting processing\n");
1246 return -EAGAIN;
1247 }
1248 spin_lock_irqsave(&dev->irqlock, flags);
1249 if (list_empty(&ctx->src_queue)) {
1250 mfc_err("Header has been deallocated in the middle of"
1251 " initialization\n");
1252 spin_unlock_irqrestore(&dev->irqlock, flags);
1253 return -EIO;
1254 }
1255 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1256 mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
1257 s5p_mfc_set_dec_stream_buffer(ctx,
1258 vb2_dma_contig_plane_paddr(temp_vb->b, 0),
1259 0, temp_vb->b->v4l2_planes[0].bytesused);
1260 spin_unlock_irqrestore(&dev->irqlock, flags);
1261 dev->curr_ctx = ctx->num;
1262 s5p_mfc_clean_ctx_int_flags(ctx);
1263 ret = s5p_mfc_set_dec_frame_buffer(ctx);
1264 if (ret) {
1265 mfc_err("Failed to alloc frame mem\n");
1266 ctx->state = MFCINST_ERROR;
1267 }
1268 return ret;
1269}
1270
1271/* Try running an operation on hardware */
1272void s5p_mfc_try_run(struct s5p_mfc_dev *dev)
1273{
1274 struct s5p_mfc_ctx *ctx;
1275 int new_ctx;
1276 unsigned int ret = 0;
1277
1278 if (test_bit(0, &dev->enter_suspend)) {
1279 mfc_debug(1, "Entering suspend so do not schedule any jobs\n");
1280 return;
1281 }
1282 /* Check whether hardware is not running */
1283 if (test_and_set_bit(0, &dev->hw_lock) != 0) {
1284 /* This is perfectly ok, the scheduled ctx should wait */
1285 mfc_debug(1, "Couldn't lock HW\n");
1286 return;
1287 }
1288 /* Choose the context to run */
1289 new_ctx = s5p_mfc_get_new_ctx(dev);
1290 if (new_ctx < 0) {
1291 /* No contexts to run */
1292 if (test_and_clear_bit(0, &dev->hw_lock) == 0) {
1293 mfc_err("Failed to unlock hardware\n");
1294 return;
1295 }
1296 mfc_debug(1, "No ctx is scheduled to be run\n");
1297 return;
1298 }
1299 ctx = dev->ctx[new_ctx];
1300 /* Got context to run in ctx */
1301 /*
1302 * Last frame has already been sent to MFC.
1303 * Now obtaining frames from MFC buffer
1304 */
1305 s5p_mfc_clock_on();
1306 if (ctx->type == MFCINST_DECODER) {
1307 s5p_mfc_set_dec_desc_buffer(ctx);
1308 switch (ctx->state) {
1309 case MFCINST_FINISHING:
1310 s5p_mfc_run_dec_frame(ctx, MFC_DEC_LAST_FRAME);
1311 break;
1312 case MFCINST_RUNNING:
1313 ret = s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME);
1314 break;
1315 case MFCINST_INIT:
1316 s5p_mfc_clean_ctx_int_flags(ctx);
1317 ret = s5p_mfc_open_inst_cmd(ctx);
1318 break;
1319 case MFCINST_RETURN_INST:
1320 s5p_mfc_clean_ctx_int_flags(ctx);
1321 ret = s5p_mfc_close_inst_cmd(ctx);
1322 break;
1323 case MFCINST_GOT_INST:
1324 s5p_mfc_run_init_dec(ctx);
1325 break;
1326 case MFCINST_HEAD_PARSED:
1327 ret = s5p_mfc_run_init_dec_buffers(ctx);
1328 mfc_debug(1, "head parsed\n");
1329 break;
1330 case MFCINST_RES_CHANGE_INIT:
1331 s5p_mfc_run_res_change(ctx);
1332 break;
1333 case MFCINST_RES_CHANGE_FLUSH:
1334 s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME);
1335 break;
1336 case MFCINST_RES_CHANGE_END:
1337 mfc_debug(2, "Finished remaining frames after resolution change\n");
1338 ctx->capture_state = QUEUE_FREE;
1339 mfc_debug(2, "Will re-init the codec\n");
1340 s5p_mfc_run_init_dec(ctx);
1341 break;
1342 default:
1343 ret = -EAGAIN;
1344 }
1345 } else if (ctx->type == MFCINST_ENCODER) {
1346 switch (ctx->state) {
1347 case MFCINST_FINISHING:
1348 case MFCINST_RUNNING:
1349 ret = s5p_mfc_run_enc_frame(ctx);
1350 break;
1351 case MFCINST_INIT:
1352 s5p_mfc_clean_ctx_int_flags(ctx);
1353 ret = s5p_mfc_open_inst_cmd(ctx);
1354 break;
1355 case MFCINST_RETURN_INST:
1356 s5p_mfc_clean_ctx_int_flags(ctx);
1357 ret = s5p_mfc_close_inst_cmd(ctx);
1358 break;
1359 case MFCINST_GOT_INST:
1360 s5p_mfc_run_init_enc(ctx);
1361 break;
1362 default:
1363 ret = -EAGAIN;
1364 }
1365 } else {
1366 mfc_err("Invalid context type: %d\n", ctx->type);
1367 ret = -EAGAIN;
1368 }
1369
1370 if (ret) {
1371 /* Free hardware lock */
1372 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
1373 mfc_err("Failed to unlock hardware\n");
1374
1375 /* This is in deed imporant, as no operation has been
1376 * scheduled, reduce the clock count as no one will
1377 * ever do this, because no interrupt related to this try_run
1378 * will ever come from hardware. */
1379 s5p_mfc_clock_off();
1380 }
1381}
1382
1383
1384void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq)
1385{
1386 struct s5p_mfc_buf *b;
1387 int i;
1388
1389 while (!list_empty(lh)) {
1390 b = list_entry(lh->next, struct s5p_mfc_buf, list);
1391 for (i = 0; i < b->b->num_planes; i++)
1392 vb2_set_plane_payload(b->b, i, 0);
1393 vb2_buffer_done(b->b, VB2_BUF_STATE_ERROR);
1394 list_del(&b->list);
1395 }
1396}
1397
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_opr.h b/drivers/media/video/s5p-mfc/s5p_mfc_opr.h
new file mode 100644
index 000000000000..db83836e6a9f
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_opr.h
@@ -0,0 +1,91 @@
1/*
2 * drivers/media/video/samsung/mfc5/s5p_mfc_opr.h
3 *
4 * Header file for Samsung MFC (Multi Function Codec - FIMV) driver
5 * Contains declarations of hw related functions.
6 *
7 * Kamil Debski, Copyright (C) 2011 Samsung Electronics
8 * http://www.samsung.com/
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#ifndef S5P_MFC_OPR_H_
16#define S5P_MFC_OPR_H_
17
18#include "s5p_mfc_common.h"
19
20int s5p_mfc_init_decode(struct s5p_mfc_ctx *ctx);
21int s5p_mfc_init_encode(struct s5p_mfc_ctx *mfc_ctx);
22
23/* Decoding functions */
24int s5p_mfc_set_dec_frame_buffer(struct s5p_mfc_ctx *ctx);
25int s5p_mfc_set_dec_stream_buffer(struct s5p_mfc_ctx *ctx, int buf_addr,
26 unsigned int start_num_byte,
27 unsigned int buf_size);
28
29/* Encoding functions */
30void s5p_mfc_set_enc_frame_buffer(struct s5p_mfc_ctx *ctx,
31 unsigned long y_addr, unsigned long c_addr);
32int s5p_mfc_set_enc_stream_buffer(struct s5p_mfc_ctx *ctx,
33 unsigned long addr, unsigned int size);
34void s5p_mfc_get_enc_frame_buffer(struct s5p_mfc_ctx *ctx,
35 unsigned long *y_addr, unsigned long *c_addr);
36int s5p_mfc_set_enc_ref_buffer(struct s5p_mfc_ctx *mfc_ctx);
37
38int s5p_mfc_decode_one_frame(struct s5p_mfc_ctx *ctx,
39 enum s5p_mfc_decode_arg last_frame);
40int s5p_mfc_encode_one_frame(struct s5p_mfc_ctx *mfc_ctx);
41
42/* Memory allocation */
43int s5p_mfc_alloc_dec_temp_buffers(struct s5p_mfc_ctx *ctx);
44void s5p_mfc_set_dec_desc_buffer(struct s5p_mfc_ctx *ctx);
45void s5p_mfc_release_dec_desc_buffer(struct s5p_mfc_ctx *ctx);
46
47int s5p_mfc_alloc_codec_buffers(struct s5p_mfc_ctx *ctx);
48void s5p_mfc_release_codec_buffers(struct s5p_mfc_ctx *ctx);
49
50int s5p_mfc_alloc_instance_buffer(struct s5p_mfc_ctx *ctx);
51void s5p_mfc_release_instance_buffer(struct s5p_mfc_ctx *ctx);
52
53void s5p_mfc_try_run(struct s5p_mfc_dev *dev);
54void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq);
55
56#define s5p_mfc_get_dspl_y_adr() (readl(dev->regs_base + \
57 S5P_FIMV_SI_DISPLAY_Y_ADR) << \
58 MFC_OFFSET_SHIFT)
59#define s5p_mfc_get_dec_y_adr() (readl(dev->regs_base + \
60 S5P_FIMV_SI_DISPLAY_Y_ADR) << \
61 MFC_OFFSET_SHIFT)
62#define s5p_mfc_get_dspl_status() readl(dev->regs_base + \
63 S5P_FIMV_SI_DISPLAY_STATUS)
64#define s5p_mfc_get_frame_type() (readl(dev->regs_base + \
65 S5P_FIMV_DECODE_FRAME_TYPE) \
66 & S5P_FIMV_DECODE_FRAME_MASK)
67#define s5p_mfc_get_consumed_stream() readl(dev->regs_base + \
68 S5P_FIMV_SI_CONSUMED_BYTES)
69#define s5p_mfc_get_int_reason() (readl(dev->regs_base + \
70 S5P_FIMV_RISC2HOST_CMD) & \
71 S5P_FIMV_RISC2HOST_CMD_MASK)
72#define s5p_mfc_get_int_err() readl(dev->regs_base + \
73 S5P_FIMV_RISC2HOST_ARG2)
74#define s5p_mfc_err_dec(x) (((x) & S5P_FIMV_ERR_DEC_MASK) >> \
75 S5P_FIMV_ERR_DEC_SHIFT)
76#define s5p_mfc_err_dspl(x) (((x) & S5P_FIMV_ERR_DSPL_MASK) >> \
77 S5P_FIMV_ERR_DSPL_SHIFT)
78#define s5p_mfc_get_img_width() readl(dev->regs_base + \
79 S5P_FIMV_SI_HRESOL)
80#define s5p_mfc_get_img_height() readl(dev->regs_base + \
81 S5P_FIMV_SI_VRESOL)
82#define s5p_mfc_get_dpb_count() readl(dev->regs_base + \
83 S5P_FIMV_SI_BUF_NUMBER)
84#define s5p_mfc_get_inst_no() readl(dev->regs_base + \
85 S5P_FIMV_RISC2HOST_ARG1)
86#define s5p_mfc_get_enc_strm_size() readl(dev->regs_base + \
87 S5P_FIMV_ENC_SI_STRM_SIZE)
88#define s5p_mfc_get_enc_slice_type() readl(dev->regs_base + \
89 S5P_FIMV_ENC_SI_SLICE_TYPE)
90
91#endif /* S5P_MFC_OPR_H_ */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_pm.c b/drivers/media/video/s5p-mfc/s5p_mfc_pm.c
new file mode 100644
index 000000000000..f6a3035c4fb7
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_pm.c
@@ -0,0 +1,117 @@
1/*
2 * linux/drivers/media/video/s5p-mfc/s5p_mfc_pm.c
3 *
4 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include <linux/clk.h>
14#include <linux/err.h>
15#include <linux/platform_device.h>
16#ifdef CONFIG_PM_RUNTIME
17#include <linux/pm_runtime.h>
18#endif
19#include "s5p_mfc_common.h"
20#include "s5p_mfc_debug.h"
21#include "s5p_mfc_pm.h"
22
23#define MFC_CLKNAME "sclk_mfc"
24#define MFC_GATE_CLK_NAME "mfc"
25
26#define CLK_DEBUG
27
28static struct s5p_mfc_pm *pm;
29static struct s5p_mfc_dev *p_dev;
30
31#ifdef CLK_DEBUG
32atomic_t clk_ref;
33#endif
34
35int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
36{
37 int ret = 0;
38
39 pm = &dev->pm;
40 p_dev = dev;
41 pm->clock_gate = clk_get(&dev->plat_dev->dev, MFC_GATE_CLK_NAME);
42 if (IS_ERR(pm->clock_gate)) {
43 mfc_err("Failed to get clock-gating control\n");
44 ret = -ENOENT;
45 goto err_g_ip_clk;
46 }
47 pm->clock = clk_get(&dev->plat_dev->dev, MFC_CLKNAME);
48 if (IS_ERR(pm->clock)) {
49 mfc_err("Failed to get MFC clock\n");
50 ret = -ENOENT;
51 goto err_g_ip_clk_2;
52 }
53 atomic_set(&pm->power, 0);
54#ifdef CONFIG_PM_RUNTIME
55 pm->device = &dev->plat_dev->dev;
56 pm_runtime_enable(pm->device);
57#endif
58#ifdef CLK_DEBUG
59 atomic_set(&clk_ref, 0);
60#endif
61 return 0;
62err_g_ip_clk_2:
63 clk_put(pm->clock_gate);
64err_g_ip_clk:
65 return ret;
66}
67
68void s5p_mfc_final_pm(struct s5p_mfc_dev *dev)
69{
70 clk_put(pm->clock_gate);
71 clk_put(pm->clock);
72#ifdef CONFIG_PM_RUNTIME
73 pm_runtime_disable(pm->device);
74#endif
75}
76
77int s5p_mfc_clock_on(void)
78{
79 int ret;
80#ifdef CLK_DEBUG
81 atomic_inc(&clk_ref);
82 mfc_debug(3, "+ %d", atomic_read(&clk_ref));
83#endif
84 ret = clk_enable(pm->clock_gate);
85 return ret;
86}
87
88void s5p_mfc_clock_off(void)
89{
90#ifdef CLK_DEBUG
91 atomic_dec(&clk_ref);
92 mfc_debug(3, "- %d", atomic_read(&clk_ref));
93#endif
94 clk_disable(pm->clock_gate);
95}
96
97int s5p_mfc_power_on(void)
98{
99#ifdef CONFIG_PM_RUNTIME
100 return pm_runtime_get_sync(pm->device);
101#else
102 atomic_set(&pm->power, 1);
103 return 0;
104#endif
105}
106
107int s5p_mfc_power_off(void)
108{
109#ifdef CONFIG_PM_RUNTIME
110 return pm_runtime_put_sync(pm->device);
111#else
112 atomic_set(&pm->power, 0);
113 return 0;
114#endif
115}
116
117
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_pm.h b/drivers/media/video/s5p-mfc/s5p_mfc_pm.h
new file mode 100644
index 000000000000..5107914f27e4
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_pm.h
@@ -0,0 +1,24 @@
1/*
2 * linux/drivers/media/video/s5p-mfc/s5p_mfc_pm.h
3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#ifndef S5P_MFC_PM_H_
14#define S5P_MFC_PM_H_
15
16int s5p_mfc_init_pm(struct s5p_mfc_dev *dev);
17void s5p_mfc_final_pm(struct s5p_mfc_dev *dev);
18
19int s5p_mfc_clock_on(void);
20void s5p_mfc_clock_off(void);
21int s5p_mfc_power_on(void);
22int s5p_mfc_power_off(void);
23
24#endif /* S5P_MFC_PM_H_ */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_shm.c b/drivers/media/video/s5p-mfc/s5p_mfc_shm.c
new file mode 100644
index 000000000000..91fdbac8c37a
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_shm.c
@@ -0,0 +1,47 @@
1/*
2 * linux/drivers/media/video/s5p-mfc/s5p_mfc_shm.c
3 *
4 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#ifdef CONFIG_ARCH_EXYNOS4
14#include <linux/dma-mapping.h>
15#endif
16#include <linux/io.h>
17#include "s5p_mfc_common.h"
18#include "s5p_mfc_debug.h"
19
20int s5p_mfc_init_shm(struct s5p_mfc_ctx *ctx)
21{
22 struct s5p_mfc_dev *dev = ctx->dev;
23 void *shm_alloc_ctx = dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
24
25 ctx->shm_alloc = vb2_dma_contig_memops.alloc(shm_alloc_ctx,
26 SHARED_BUF_SIZE);
27 if (IS_ERR(ctx->shm_alloc)) {
28 mfc_err("failed to allocate shared memory\n");
29 return PTR_ERR(ctx->shm_alloc);
30 }
31 /* shm_ofs only keeps the offset from base (port a) */
32 ctx->shm_ofs = s5p_mfc_mem_cookie(shm_alloc_ctx, ctx->shm_alloc)
33 - dev->bank1;
34 BUG_ON(ctx->shm_ofs & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
35 ctx->shm = vb2_dma_contig_memops.vaddr(ctx->shm_alloc);
36 if (!ctx->shm) {
37 vb2_dma_contig_memops.put(ctx->shm_alloc);
38 ctx->shm_ofs = 0;
39 ctx->shm_alloc = NULL;
40 mfc_err("failed to virt addr of shared memory\n");
41 return -ENOMEM;
42 }
43 memset((void *)ctx->shm, 0, SHARED_BUF_SIZE);
44 wmb();
45 return 0;
46}
47
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_shm.h b/drivers/media/video/s5p-mfc/s5p_mfc_shm.h
new file mode 100644
index 000000000000..764eac6bcc4c
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_shm.h
@@ -0,0 +1,91 @@
1/*
2 * linux/drivers/media/video/s5p-mfc/s5p_mfc_shm.h
3 *
4 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#ifndef S5P_MFC_SHM_H_
14#define S5P_MFC_SHM_H_
15
16enum MFC_SHM_OFS
17{
18 EXTENEDED_DECODE_STATUS = 0x00, /* D */
19 SET_FRAME_TAG = 0x04, /* D */
20 GET_FRAME_TAG_TOP = 0x08, /* D */
21 GET_FRAME_TAG_BOT = 0x0C, /* D */
22 PIC_TIME_TOP = 0x10, /* D */
23 PIC_TIME_BOT = 0x14, /* D */
24 START_BYTE_NUM = 0x18, /* D */
25
26 CROP_INFO_H = 0x20, /* D */
27 CROP_INFO_V = 0x24, /* D */
28 EXT_ENC_CONTROL = 0x28, /* E */
29 ENC_PARAM_CHANGE = 0x2C, /* E */
30 RC_VOP_TIMING = 0x30, /* E, MPEG4 */
31 HEC_PERIOD = 0x34, /* E, MPEG4 */
32 METADATA_ENABLE = 0x38, /* C */
33 METADATA_STATUS = 0x3C, /* C */
34 METADATA_DISPLAY_INDEX = 0x40, /* C */
35 EXT_METADATA_START_ADDR = 0x44, /* C */
36 PUT_EXTRADATA = 0x48, /* C */
37 EXTRADATA_ADDR = 0x4C, /* C */
38
39 ALLOC_LUMA_DPB_SIZE = 0x64, /* D */
40 ALLOC_CHROMA_DPB_SIZE = 0x68, /* D */
41 ALLOC_MV_SIZE = 0x6C, /* D */
42 P_B_FRAME_QP = 0x70, /* E */
43 SAMPLE_ASPECT_RATIO_IDC = 0x74, /* E, H.264, depend on
44 ASPECT_RATIO_VUI_ENABLE in EXT_ENC_CONTROL */
45 EXTENDED_SAR = 0x78, /* E, H.264, depned on
46 ASPECT_RATIO_VUI_ENABLE in EXT_ENC_CONTROL */
47 DISP_PIC_PROFILE = 0x7C, /* D */
48 FLUSH_CMD_TYPE = 0x80, /* C */
49 FLUSH_CMD_INBUF1 = 0x84, /* C */
50 FLUSH_CMD_INBUF2 = 0x88, /* C */
51 FLUSH_CMD_OUTBUF = 0x8C, /* E */
52 NEW_RC_BIT_RATE = 0x90, /* E, format as RC_BIT_RATE(0xC5A8)
53 depend on RC_BIT_RATE_CHANGE in ENC_PARAM_CHANGE */
54 NEW_RC_FRAME_RATE = 0x94, /* E, format as RC_FRAME_RATE(0xD0D0)
55 depend on RC_FRAME_RATE_CHANGE in ENC_PARAM_CHANGE */
56 NEW_I_PERIOD = 0x98, /* E, format as I_FRM_CTRL(0xC504)
57 depend on I_PERIOD_CHANGE in ENC_PARAM_CHANGE */
58 H264_I_PERIOD = 0x9C, /* E, H.264, open GOP */
59 RC_CONTROL_CONFIG = 0xA0, /* E */
60 BATCH_INPUT_ADDR = 0xA4, /* E */
61 BATCH_OUTPUT_ADDR = 0xA8, /* E */
62 BATCH_OUTPUT_SIZE = 0xAC, /* E */
63 MIN_LUMA_DPB_SIZE = 0xB0, /* D */
64 DEVICE_FORMAT_ID = 0xB4, /* C */
65 H264_POC_TYPE = 0xB8, /* D */
66 MIN_CHROMA_DPB_SIZE = 0xBC, /* D */
67 DISP_PIC_FRAME_TYPE = 0xC0, /* D */
68 FREE_LUMA_DPB = 0xC4, /* D, VC1 MPEG4 */
69 ASPECT_RATIO_INFO = 0xC8, /* D, MPEG4 */
70 EXTENDED_PAR = 0xCC, /* D, MPEG4 */
71 DBG_HISTORY_INPUT0 = 0xD0, /* C */
72 DBG_HISTORY_INPUT1 = 0xD4, /* C */
73 DBG_HISTORY_OUTPUT = 0xD8, /* C */
74 HIERARCHICAL_P_QP = 0xE0, /* E, H.264 */
75};
76
77int s5p_mfc_init_shm(struct s5p_mfc_ctx *ctx);
78
79#define s5p_mfc_write_shm(ctx, x, ofs) \
80 do { \
81 writel(x, (ctx->shm + ofs)); \
82 wmb(); \
83 } while (0)
84
85static inline u32 s5p_mfc_read_shm(struct s5p_mfc_ctx *ctx, unsigned int ofs)
86{
87 rmb();
88 return readl(ctx->shm + ofs);
89}
90
91#endif /* S5P_MFC_SHM_H_ */
diff --git a/drivers/media/video/s5p-tv/Kconfig b/drivers/media/video/s5p-tv/Kconfig
new file mode 100644
index 000000000000..9c37dee7bc59
--- /dev/null
+++ b/drivers/media/video/s5p-tv/Kconfig
@@ -0,0 +1,76 @@
1# drivers/media/video/s5p-tv/Kconfig
2#
3# Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
4# http://www.samsung.com/
5# Tomasz Stanislawski <t.stanislaws@samsung.com>
6#
7# Licensed under GPL
8
9config VIDEO_SAMSUNG_S5P_TV
10 bool "Samsung TV driver for S5P platform (experimental)"
11 depends on PLAT_S5P
12 depends on EXPERIMENTAL
13 default n
14 ---help---
15 Say Y here to enable selecting the TV output devices for
16 Samsung S5P platform.
17
18if VIDEO_SAMSUNG_S5P_TV
19
20config VIDEO_SAMSUNG_S5P_HDMI
21 tristate "Samsung HDMI Driver"
22 depends on VIDEO_V4L2
23 depends on VIDEO_SAMSUNG_S5P_TV
24 select VIDEO_SAMSUNG_S5P_HDMIPHY
25 help
26 Say Y here if you want support for the HDMI output
27 interface in S5P Samsung SoC. The driver can be compiled
28 as module. It is an auxiliary driver, that exposes a V4L2
29 subdev for use by other drivers. This driver requires
30 hdmiphy driver to work correctly.
31
32config VIDEO_SAMSUNG_S5P_HDMI_DEBUG
33 bool "Enable debug for HDMI Driver"
34 depends on VIDEO_SAMSUNG_S5P_HDMI
35 default n
36 help
37 Enables debugging for HDMI driver.
38
39config VIDEO_SAMSUNG_S5P_HDMIPHY
40 tristate "Samsung HDMIPHY Driver"
41 depends on VIDEO_DEV && VIDEO_V4L2 && I2C
42 depends on VIDEO_SAMSUNG_S5P_TV
43 help
44 Say Y here if you want support for the physical HDMI
45 interface in S5P Samsung SoC. The driver can be compiled
46 as module. It is an I2C driver, that exposes a V4L2
47 subdev for use by other drivers.
48
49config VIDEO_SAMSUNG_S5P_SDO
50 tristate "Samsung Analog TV Driver"
51 depends on VIDEO_DEV && VIDEO_V4L2
52 depends on VIDEO_SAMSUNG_S5P_TV
53 help
54 Say Y here if you want support for the analog TV output
55 interface in S5P Samsung SoC. The driver can be compiled
56 as module. It is an auxiliary driver, that exposes a V4L2
57 subdev for use by other drivers. This driver requires
58 hdmiphy driver to work correctly.
59
60config VIDEO_SAMSUNG_S5P_MIXER
61 tristate "Samsung Mixer and Video Processor Driver"
62 depends on VIDEO_DEV && VIDEO_V4L2
63 depends on VIDEO_SAMSUNG_S5P_TV
64 select VIDEOBUF2_DMA_CONTIG
65 help
66 Say Y here if you want support for the Mixer in Samsung S5P SoCs.
67 This device produce image data to one of output interfaces.
68
69config VIDEO_SAMSUNG_S5P_MIXER_DEBUG
70 bool "Enable debug for Mixer Driver"
71 depends on VIDEO_SAMSUNG_S5P_MIXER
72 default n
73 help
74 Enables debugging for Mixer driver.
75
76endif # VIDEO_SAMSUNG_S5P_TV
diff --git a/drivers/media/video/s5p-tv/Makefile b/drivers/media/video/s5p-tv/Makefile
new file mode 100644
index 000000000000..37e4c17663b4
--- /dev/null
+++ b/drivers/media/video/s5p-tv/Makefile
@@ -0,0 +1,17 @@
1# drivers/media/video/samsung/tvout/Makefile
2#
3# Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
4# http://www.samsung.com/
5# Tomasz Stanislawski <t.stanislaws@samsung.com>
6#
7# Licensed under GPL
8
9obj-$(CONFIG_VIDEO_SAMSUNG_S5P_HDMIPHY) += s5p-hdmiphy.o
10s5p-hdmiphy-y += hdmiphy_drv.o
11obj-$(CONFIG_VIDEO_SAMSUNG_S5P_HDMI) += s5p-hdmi.o
12s5p-hdmi-y += hdmi_drv.o
13obj-$(CONFIG_VIDEO_SAMSUNG_S5P_SDO) += s5p-sdo.o
14s5p-sdo-y += sdo_drv.o
15obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MIXER) += s5p-mixer.o
16s5p-mixer-y += mixer_drv.o mixer_video.o mixer_reg.o mixer_grp_layer.o mixer_vp_layer.o
17
diff --git a/drivers/media/video/s5p-tv/hdmi_drv.c b/drivers/media/video/s5p-tv/hdmi_drv.c
new file mode 100644
index 000000000000..06d6663f4594
--- /dev/null
+++ b/drivers/media/video/s5p-tv/hdmi_drv.c
@@ -0,0 +1,1042 @@
1/*
2 * Samsung HDMI interface driver
3 *
4 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
5 *
6 * Tomasz Stanislawski, <t.stanislaws@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published
10 * by the Free Software Foundiation. either version 2 of the License,
11 * or (at your option) any later version
12 */
13
14#ifdef CONFIG_VIDEO_SAMSUNG_S5P_HDMI_DEBUG
15#define DEBUG
16#endif
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/io.h>
21#include <linux/i2c.h>
22#include <linux/platform_device.h>
23#include <media/v4l2-subdev.h>
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/irq.h>
27#include <linux/delay.h>
28#include <linux/bug.h>
29#include <linux/pm_runtime.h>
30#include <linux/clk.h>
31#include <linux/regulator/consumer.h>
32
33#include <media/v4l2-common.h>
34#include <media/v4l2-dev.h>
35#include <media/v4l2-device.h>
36
37#include "regs-hdmi.h"
38
39MODULE_AUTHOR("Tomasz Stanislawski, <t.stanislaws@samsung.com>");
40MODULE_DESCRIPTION("Samsung HDMI");
41MODULE_LICENSE("GPL");
42
43/* default preset configured on probe */
44#define HDMI_DEFAULT_PRESET V4L2_DV_1080P60
45
46struct hdmi_resources {
47 struct clk *hdmi;
48 struct clk *sclk_hdmi;
49 struct clk *sclk_pixel;
50 struct clk *sclk_hdmiphy;
51 struct clk *hdmiphy;
52 struct regulator_bulk_data *regul_bulk;
53 int regul_count;
54};
55
56struct hdmi_device {
57 /** base address of HDMI registers */
58 void __iomem *regs;
59 /** HDMI interrupt */
60 unsigned int irq;
61 /** pointer to device parent */
62 struct device *dev;
63 /** subdev generated by HDMI device */
64 struct v4l2_subdev sd;
65 /** V4L2 device structure */
66 struct v4l2_device v4l2_dev;
67 /** subdev of HDMIPHY interface */
68 struct v4l2_subdev *phy_sd;
69 /** configuration of current graphic mode */
70 const struct hdmi_preset_conf *cur_conf;
71 /** current preset */
72 u32 cur_preset;
73 /** other resources */
74 struct hdmi_resources res;
75};
76
77struct hdmi_driver_data {
78 int hdmiphy_bus;
79};
80
81struct hdmi_tg_regs {
82 u8 cmd;
83 u8 h_fsz_l;
84 u8 h_fsz_h;
85 u8 hact_st_l;
86 u8 hact_st_h;
87 u8 hact_sz_l;
88 u8 hact_sz_h;
89 u8 v_fsz_l;
90 u8 v_fsz_h;
91 u8 vsync_l;
92 u8 vsync_h;
93 u8 vsync2_l;
94 u8 vsync2_h;
95 u8 vact_st_l;
96 u8 vact_st_h;
97 u8 vact_sz_l;
98 u8 vact_sz_h;
99 u8 field_chg_l;
100 u8 field_chg_h;
101 u8 vact_st2_l;
102 u8 vact_st2_h;
103 u8 vsync_top_hdmi_l;
104 u8 vsync_top_hdmi_h;
105 u8 vsync_bot_hdmi_l;
106 u8 vsync_bot_hdmi_h;
107 u8 field_top_hdmi_l;
108 u8 field_top_hdmi_h;
109 u8 field_bot_hdmi_l;
110 u8 field_bot_hdmi_h;
111};
112
113struct hdmi_core_regs {
114 u8 h_blank[2];
115 u8 v_blank[3];
116 u8 h_v_line[3];
117 u8 vsync_pol[1];
118 u8 int_pro_mode[1];
119 u8 v_blank_f[3];
120 u8 h_sync_gen[3];
121 u8 v_sync_gen1[3];
122 u8 v_sync_gen2[3];
123 u8 v_sync_gen3[3];
124};
125
126struct hdmi_preset_conf {
127 struct hdmi_core_regs core;
128 struct hdmi_tg_regs tg;
129 struct v4l2_mbus_framefmt mbus_fmt;
130};
131
132/* I2C module and id for HDMIPHY */
133static struct i2c_board_info hdmiphy_info = {
134 I2C_BOARD_INFO("hdmiphy", 0x38),
135};
136
137static struct hdmi_driver_data hdmi_driver_data[] = {
138 { .hdmiphy_bus = 3 },
139 { .hdmiphy_bus = 8 },
140};
141
142static struct platform_device_id hdmi_driver_types[] = {
143 {
144 .name = "s5pv210-hdmi",
145 .driver_data = (unsigned long)&hdmi_driver_data[0],
146 }, {
147 .name = "exynos4-hdmi",
148 .driver_data = (unsigned long)&hdmi_driver_data[1],
149 }, {
150 /* end node */
151 }
152};
153
154static const struct v4l2_subdev_ops hdmi_sd_ops;
155
156static struct hdmi_device *sd_to_hdmi_dev(struct v4l2_subdev *sd)
157{
158 return container_of(sd, struct hdmi_device, sd);
159}
160
161static inline
162void hdmi_write(struct hdmi_device *hdev, u32 reg_id, u32 value)
163{
164 writel(value, hdev->regs + reg_id);
165}
166
167static inline
168void hdmi_write_mask(struct hdmi_device *hdev, u32 reg_id, u32 value, u32 mask)
169{
170 u32 old = readl(hdev->regs + reg_id);
171 value = (value & mask) | (old & ~mask);
172 writel(value, hdev->regs + reg_id);
173}
174
175static inline
176void hdmi_writeb(struct hdmi_device *hdev, u32 reg_id, u8 value)
177{
178 writeb(value, hdev->regs + reg_id);
179}
180
181static inline u32 hdmi_read(struct hdmi_device *hdev, u32 reg_id)
182{
183 return readl(hdev->regs + reg_id);
184}
185
186static irqreturn_t hdmi_irq_handler(int irq, void *dev_data)
187{
188 struct hdmi_device *hdev = dev_data;
189 u32 intc_flag;
190
191 (void)irq;
192 intc_flag = hdmi_read(hdev, HDMI_INTC_FLAG);
193 /* clearing flags for HPD plug/unplug */
194 if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
195 printk(KERN_INFO "unplugged\n");
196 hdmi_write_mask(hdev, HDMI_INTC_FLAG, ~0,
197 HDMI_INTC_FLAG_HPD_UNPLUG);
198 }
199 if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
200 printk(KERN_INFO "plugged\n");
201 hdmi_write_mask(hdev, HDMI_INTC_FLAG, ~0,
202 HDMI_INTC_FLAG_HPD_PLUG);
203 }
204
205 return IRQ_HANDLED;
206}
207
208static void hdmi_reg_init(struct hdmi_device *hdev)
209{
210 /* enable HPD interrupts */
211 hdmi_write_mask(hdev, HDMI_INTC_CON, ~0, HDMI_INTC_EN_GLOBAL |
212 HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
213 /* choose HDMI mode */
214 hdmi_write_mask(hdev, HDMI_MODE_SEL,
215 HDMI_MODE_HDMI_EN, HDMI_MODE_MASK);
216 /* disable bluescreen */
217 hdmi_write_mask(hdev, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
218 /* choose bluescreen (fecal) color */
219 hdmi_writeb(hdev, HDMI_BLUE_SCREEN_0, 0x12);
220 hdmi_writeb(hdev, HDMI_BLUE_SCREEN_1, 0x34);
221 hdmi_writeb(hdev, HDMI_BLUE_SCREEN_2, 0x56);
222 /* enable AVI packet every vsync, fixes purple line problem */
223 hdmi_writeb(hdev, HDMI_AVI_CON, 0x02);
224 /* force YUV444, look to CEA-861-D, table 7 for more detail */
225 hdmi_writeb(hdev, HDMI_AVI_BYTE(0), 2 << 5);
226 hdmi_write_mask(hdev, HDMI_CON_1, 2, 3 << 5);
227}
228
229static void hdmi_timing_apply(struct hdmi_device *hdev,
230 const struct hdmi_preset_conf *conf)
231{
232 const struct hdmi_core_regs *core = &conf->core;
233 const struct hdmi_tg_regs *tg = &conf->tg;
234
235 /* setting core registers */
236 hdmi_writeb(hdev, HDMI_H_BLANK_0, core->h_blank[0]);
237 hdmi_writeb(hdev, HDMI_H_BLANK_1, core->h_blank[1]);
238 hdmi_writeb(hdev, HDMI_V_BLANK_0, core->v_blank[0]);
239 hdmi_writeb(hdev, HDMI_V_BLANK_1, core->v_blank[1]);
240 hdmi_writeb(hdev, HDMI_V_BLANK_2, core->v_blank[2]);
241 hdmi_writeb(hdev, HDMI_H_V_LINE_0, core->h_v_line[0]);
242 hdmi_writeb(hdev, HDMI_H_V_LINE_1, core->h_v_line[1]);
243 hdmi_writeb(hdev, HDMI_H_V_LINE_2, core->h_v_line[2]);
244 hdmi_writeb(hdev, HDMI_VSYNC_POL, core->vsync_pol[0]);
245 hdmi_writeb(hdev, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
246 hdmi_writeb(hdev, HDMI_V_BLANK_F_0, core->v_blank_f[0]);
247 hdmi_writeb(hdev, HDMI_V_BLANK_F_1, core->v_blank_f[1]);
248 hdmi_writeb(hdev, HDMI_V_BLANK_F_2, core->v_blank_f[2]);
249 hdmi_writeb(hdev, HDMI_H_SYNC_GEN_0, core->h_sync_gen[0]);
250 hdmi_writeb(hdev, HDMI_H_SYNC_GEN_1, core->h_sync_gen[1]);
251 hdmi_writeb(hdev, HDMI_H_SYNC_GEN_2, core->h_sync_gen[2]);
252 hdmi_writeb(hdev, HDMI_V_SYNC_GEN_1_0, core->v_sync_gen1[0]);
253 hdmi_writeb(hdev, HDMI_V_SYNC_GEN_1_1, core->v_sync_gen1[1]);
254 hdmi_writeb(hdev, HDMI_V_SYNC_GEN_1_2, core->v_sync_gen1[2]);
255 hdmi_writeb(hdev, HDMI_V_SYNC_GEN_2_0, core->v_sync_gen2[0]);
256 hdmi_writeb(hdev, HDMI_V_SYNC_GEN_2_1, core->v_sync_gen2[1]);
257 hdmi_writeb(hdev, HDMI_V_SYNC_GEN_2_2, core->v_sync_gen2[2]);
258 hdmi_writeb(hdev, HDMI_V_SYNC_GEN_3_0, core->v_sync_gen3[0]);
259 hdmi_writeb(hdev, HDMI_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
260 hdmi_writeb(hdev, HDMI_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
261 /* Timing generator registers */
262 hdmi_writeb(hdev, HDMI_TG_H_FSZ_L, tg->h_fsz_l);
263 hdmi_writeb(hdev, HDMI_TG_H_FSZ_H, tg->h_fsz_h);
264 hdmi_writeb(hdev, HDMI_TG_HACT_ST_L, tg->hact_st_l);
265 hdmi_writeb(hdev, HDMI_TG_HACT_ST_H, tg->hact_st_h);
266 hdmi_writeb(hdev, HDMI_TG_HACT_SZ_L, tg->hact_sz_l);
267 hdmi_writeb(hdev, HDMI_TG_HACT_SZ_H, tg->hact_sz_h);
268 hdmi_writeb(hdev, HDMI_TG_V_FSZ_L, tg->v_fsz_l);
269 hdmi_writeb(hdev, HDMI_TG_V_FSZ_H, tg->v_fsz_h);
270 hdmi_writeb(hdev, HDMI_TG_VSYNC_L, tg->vsync_l);
271 hdmi_writeb(hdev, HDMI_TG_VSYNC_H, tg->vsync_h);
272 hdmi_writeb(hdev, HDMI_TG_VSYNC2_L, tg->vsync2_l);
273 hdmi_writeb(hdev, HDMI_TG_VSYNC2_H, tg->vsync2_h);
274 hdmi_writeb(hdev, HDMI_TG_VACT_ST_L, tg->vact_st_l);
275 hdmi_writeb(hdev, HDMI_TG_VACT_ST_H, tg->vact_st_h);
276 hdmi_writeb(hdev, HDMI_TG_VACT_SZ_L, tg->vact_sz_l);
277 hdmi_writeb(hdev, HDMI_TG_VACT_SZ_H, tg->vact_sz_h);
278 hdmi_writeb(hdev, HDMI_TG_FIELD_CHG_L, tg->field_chg_l);
279 hdmi_writeb(hdev, HDMI_TG_FIELD_CHG_H, tg->field_chg_h);
280 hdmi_writeb(hdev, HDMI_TG_VACT_ST2_L, tg->vact_st2_l);
281 hdmi_writeb(hdev, HDMI_TG_VACT_ST2_H, tg->vact_st2_h);
282 hdmi_writeb(hdev, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l);
283 hdmi_writeb(hdev, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h);
284 hdmi_writeb(hdev, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l);
285 hdmi_writeb(hdev, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h);
286 hdmi_writeb(hdev, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l);
287 hdmi_writeb(hdev, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h);
288 hdmi_writeb(hdev, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l);
289 hdmi_writeb(hdev, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h);
290}
291
292static int hdmi_conf_apply(struct hdmi_device *hdmi_dev)
293{
294 struct device *dev = hdmi_dev->dev;
295 const struct hdmi_preset_conf *conf = hdmi_dev->cur_conf;
296 struct v4l2_dv_preset preset;
297 int ret;
298
299 dev_dbg(dev, "%s\n", __func__);
300
301 /* reset hdmiphy */
302 hdmi_write_mask(hdmi_dev, HDMI_PHY_RSTOUT, ~0, HDMI_PHY_SW_RSTOUT);
303 mdelay(10);
304 hdmi_write_mask(hdmi_dev, HDMI_PHY_RSTOUT, 0, HDMI_PHY_SW_RSTOUT);
305 mdelay(10);
306
307 /* configure presets */
308 preset.preset = hdmi_dev->cur_preset;
309 ret = v4l2_subdev_call(hdmi_dev->phy_sd, video, s_dv_preset, &preset);
310 if (ret) {
311 dev_err(dev, "failed to set preset (%u)\n", preset.preset);
312 return ret;
313 }
314
315 /* resetting HDMI core */
316 hdmi_write_mask(hdmi_dev, HDMI_CORE_RSTOUT, 0, HDMI_CORE_SW_RSTOUT);
317 mdelay(10);
318 hdmi_write_mask(hdmi_dev, HDMI_CORE_RSTOUT, ~0, HDMI_CORE_SW_RSTOUT);
319 mdelay(10);
320
321 hdmi_reg_init(hdmi_dev);
322
323 /* setting core registers */
324 hdmi_timing_apply(hdmi_dev, conf);
325
326 return 0;
327}
328
329static void hdmi_dumpregs(struct hdmi_device *hdev, char *prefix)
330{
331#define DUMPREG(reg_id) \
332 dev_dbg(hdev->dev, "%s:" #reg_id " = %08x\n", prefix, \
333 readl(hdev->regs + reg_id))
334
335 dev_dbg(hdev->dev, "%s: ---- CONTROL REGISTERS ----\n", prefix);
336 DUMPREG(HDMI_INTC_FLAG);
337 DUMPREG(HDMI_INTC_CON);
338 DUMPREG(HDMI_HPD_STATUS);
339 DUMPREG(HDMI_PHY_RSTOUT);
340 DUMPREG(HDMI_PHY_VPLL);
341 DUMPREG(HDMI_PHY_CMU);
342 DUMPREG(HDMI_CORE_RSTOUT);
343
344 dev_dbg(hdev->dev, "%s: ---- CORE REGISTERS ----\n", prefix);
345 DUMPREG(HDMI_CON_0);
346 DUMPREG(HDMI_CON_1);
347 DUMPREG(HDMI_CON_2);
348 DUMPREG(HDMI_SYS_STATUS);
349 DUMPREG(HDMI_PHY_STATUS);
350 DUMPREG(HDMI_STATUS_EN);
351 DUMPREG(HDMI_HPD);
352 DUMPREG(HDMI_MODE_SEL);
353 DUMPREG(HDMI_HPD_GEN);
354 DUMPREG(HDMI_DC_CONTROL);
355 DUMPREG(HDMI_VIDEO_PATTERN_GEN);
356
357 dev_dbg(hdev->dev, "%s: ---- CORE SYNC REGISTERS ----\n", prefix);
358 DUMPREG(HDMI_H_BLANK_0);
359 DUMPREG(HDMI_H_BLANK_1);
360 DUMPREG(HDMI_V_BLANK_0);
361 DUMPREG(HDMI_V_BLANK_1);
362 DUMPREG(HDMI_V_BLANK_2);
363 DUMPREG(HDMI_H_V_LINE_0);
364 DUMPREG(HDMI_H_V_LINE_1);
365 DUMPREG(HDMI_H_V_LINE_2);
366 DUMPREG(HDMI_VSYNC_POL);
367 DUMPREG(HDMI_INT_PRO_MODE);
368 DUMPREG(HDMI_V_BLANK_F_0);
369 DUMPREG(HDMI_V_BLANK_F_1);
370 DUMPREG(HDMI_V_BLANK_F_2);
371 DUMPREG(HDMI_H_SYNC_GEN_0);
372 DUMPREG(HDMI_H_SYNC_GEN_1);
373 DUMPREG(HDMI_H_SYNC_GEN_2);
374 DUMPREG(HDMI_V_SYNC_GEN_1_0);
375 DUMPREG(HDMI_V_SYNC_GEN_1_1);
376 DUMPREG(HDMI_V_SYNC_GEN_1_2);
377 DUMPREG(HDMI_V_SYNC_GEN_2_0);
378 DUMPREG(HDMI_V_SYNC_GEN_2_1);
379 DUMPREG(HDMI_V_SYNC_GEN_2_2);
380 DUMPREG(HDMI_V_SYNC_GEN_3_0);
381 DUMPREG(HDMI_V_SYNC_GEN_3_1);
382 DUMPREG(HDMI_V_SYNC_GEN_3_2);
383
384 dev_dbg(hdev->dev, "%s: ---- TG REGISTERS ----\n", prefix);
385 DUMPREG(HDMI_TG_CMD);
386 DUMPREG(HDMI_TG_H_FSZ_L);
387 DUMPREG(HDMI_TG_H_FSZ_H);
388 DUMPREG(HDMI_TG_HACT_ST_L);
389 DUMPREG(HDMI_TG_HACT_ST_H);
390 DUMPREG(HDMI_TG_HACT_SZ_L);
391 DUMPREG(HDMI_TG_HACT_SZ_H);
392 DUMPREG(HDMI_TG_V_FSZ_L);
393 DUMPREG(HDMI_TG_V_FSZ_H);
394 DUMPREG(HDMI_TG_VSYNC_L);
395 DUMPREG(HDMI_TG_VSYNC_H);
396 DUMPREG(HDMI_TG_VSYNC2_L);
397 DUMPREG(HDMI_TG_VSYNC2_H);
398 DUMPREG(HDMI_TG_VACT_ST_L);
399 DUMPREG(HDMI_TG_VACT_ST_H);
400 DUMPREG(HDMI_TG_VACT_SZ_L);
401 DUMPREG(HDMI_TG_VACT_SZ_H);
402 DUMPREG(HDMI_TG_FIELD_CHG_L);
403 DUMPREG(HDMI_TG_FIELD_CHG_H);
404 DUMPREG(HDMI_TG_VACT_ST2_L);
405 DUMPREG(HDMI_TG_VACT_ST2_H);
406 DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
407 DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
408 DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
409 DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
410 DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
411 DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
412 DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
413 DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
414#undef DUMPREG
415}
416
417static const struct hdmi_preset_conf hdmi_conf_480p = {
418 .core = {
419 .h_blank = {0x8a, 0x00},
420 .v_blank = {0x0d, 0x6a, 0x01},
421 .h_v_line = {0x0d, 0xa2, 0x35},
422 .vsync_pol = {0x01},
423 .int_pro_mode = {0x00},
424 .v_blank_f = {0x00, 0x00, 0x00},
425 .h_sync_gen = {0x0e, 0x30, 0x11},
426 .v_sync_gen1 = {0x0f, 0x90, 0x00},
427 /* other don't care */
428 },
429 .tg = {
430 0x00, /* cmd */
431 0x5a, 0x03, /* h_fsz */
432 0x8a, 0x00, 0xd0, 0x02, /* hact */
433 0x0d, 0x02, /* v_fsz */
434 0x01, 0x00, 0x33, 0x02, /* vsync */
435 0x2d, 0x00, 0xe0, 0x01, /* vact */
436 0x33, 0x02, /* field_chg */
437 0x49, 0x02, /* vact_st2 */
438 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
439 0x01, 0x00, 0x33, 0x02, /* field top/bot */
440 },
441 .mbus_fmt = {
442 .width = 720,
443 .height = 480,
444 .code = V4L2_MBUS_FMT_FIXED, /* means RGB888 */
445 .field = V4L2_FIELD_NONE,
446 },
447};
448
449static const struct hdmi_preset_conf hdmi_conf_720p60 = {
450 .core = {
451 .h_blank = {0x72, 0x01},
452 .v_blank = {0xee, 0xf2, 0x00},
453 .h_v_line = {0xee, 0x22, 0x67},
454 .vsync_pol = {0x00},
455 .int_pro_mode = {0x00},
456 .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
457 .h_sync_gen = {0x6c, 0x50, 0x02},
458 .v_sync_gen1 = {0x0a, 0x50, 0x00},
459 /* other don't care */
460 },
461 .tg = {
462 0x00, /* cmd */
463 0x72, 0x06, /* h_fsz */
464 0x72, 0x01, 0x00, 0x05, /* hact */
465 0xee, 0x02, /* v_fsz */
466 0x01, 0x00, 0x33, 0x02, /* vsync */
467 0x1e, 0x00, 0xd0, 0x02, /* vact */
468 0x33, 0x02, /* field_chg */
469 0x49, 0x02, /* vact_st2 */
470 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
471 0x01, 0x00, 0x33, 0x02, /* field top/bot */
472 },
473 .mbus_fmt = {
474 .width = 1280,
475 .height = 720,
476 .code = V4L2_MBUS_FMT_FIXED, /* means RGB888 */
477 .field = V4L2_FIELD_NONE,
478 },
479};
480
481static const struct hdmi_preset_conf hdmi_conf_1080p50 = {
482 .core = {
483 .h_blank = {0xd0, 0x02},
484 .v_blank = {0x65, 0x6c, 0x01},
485 .h_v_line = {0x65, 0x04, 0xa5},
486 .vsync_pol = {0x00},
487 .int_pro_mode = {0x00},
488 .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
489 .h_sync_gen = {0x0e, 0xea, 0x08},
490 .v_sync_gen1 = {0x09, 0x40, 0x00},
491 /* other don't care */
492 },
493 .tg = {
494 0x00, /* cmd */
495 0x98, 0x08, /* h_fsz */
496 0x18, 0x01, 0x80, 0x07, /* hact */
497 0x65, 0x04, /* v_fsz */
498 0x01, 0x00, 0x33, 0x02, /* vsync */
499 0x2d, 0x00, 0x38, 0x04, /* vact */
500 0x33, 0x02, /* field_chg */
501 0x49, 0x02, /* vact_st2 */
502 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
503 0x01, 0x00, 0x33, 0x02, /* field top/bot */
504 },
505 .mbus_fmt = {
506 .width = 1920,
507 .height = 1080,
508 .code = V4L2_MBUS_FMT_FIXED, /* means RGB888 */
509 .field = V4L2_FIELD_NONE,
510 },
511};
512
513static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
514 .core = {
515 .h_blank = {0x18, 0x01},
516 .v_blank = {0x65, 0x6c, 0x01},
517 .h_v_line = {0x65, 0x84, 0x89},
518 .vsync_pol = {0x00},
519 .int_pro_mode = {0x00},
520 .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
521 .h_sync_gen = {0x56, 0x08, 0x02},
522 .v_sync_gen1 = {0x09, 0x40, 0x00},
523 /* other don't care */
524 },
525 .tg = {
526 0x00, /* cmd */
527 0x98, 0x08, /* h_fsz */
528 0x18, 0x01, 0x80, 0x07, /* hact */
529 0x65, 0x04, /* v_fsz */
530 0x01, 0x00, 0x33, 0x02, /* vsync */
531 0x2d, 0x00, 0x38, 0x04, /* vact */
532 0x33, 0x02, /* field_chg */
533 0x48, 0x02, /* vact_st2 */
534 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
535 0x01, 0x00, 0x33, 0x02, /* field top/bot */
536 },
537 .mbus_fmt = {
538 .width = 1920,
539 .height = 1080,
540 .code = V4L2_MBUS_FMT_FIXED, /* means RGB888 */
541 .field = V4L2_FIELD_NONE,
542 },
543};
544
545static const struct {
546 u32 preset;
547 const struct hdmi_preset_conf *conf;
548} hdmi_conf[] = {
549 { V4L2_DV_480P59_94, &hdmi_conf_480p },
550 { V4L2_DV_720P59_94, &hdmi_conf_720p60 },
551 { V4L2_DV_1080P50, &hdmi_conf_1080p50 },
552 { V4L2_DV_1080P30, &hdmi_conf_1080p60 },
553 { V4L2_DV_1080P60, &hdmi_conf_1080p60 },
554};
555
556static const struct hdmi_preset_conf *hdmi_preset2conf(u32 preset)
557{
558 int i;
559
560 for (i = 0; i < ARRAY_SIZE(hdmi_conf); ++i)
561 if (hdmi_conf[i].preset == preset)
562 return hdmi_conf[i].conf;
563 return NULL;
564}
565
566static int hdmi_streamon(struct hdmi_device *hdev)
567{
568 struct device *dev = hdev->dev;
569 struct hdmi_resources *res = &hdev->res;
570 int ret, tries;
571
572 dev_dbg(dev, "%s\n", __func__);
573
574 ret = v4l2_subdev_call(hdev->phy_sd, video, s_stream, 1);
575 if (ret)
576 return ret;
577
578 /* waiting for HDMIPHY's PLL to get to steady state */
579 for (tries = 100; tries; --tries) {
580 u32 val = hdmi_read(hdev, HDMI_PHY_STATUS);
581 if (val & HDMI_PHY_STATUS_READY)
582 break;
583 mdelay(1);
584 }
585 /* steady state not achieved */
586 if (tries == 0) {
587 dev_err(dev, "hdmiphy's pll could not reach steady state.\n");
588 v4l2_subdev_call(hdev->phy_sd, video, s_stream, 0);
589 hdmi_dumpregs(hdev, "s_stream");
590 return -EIO;
591 }
592
593 /* hdmiphy clock is used for HDMI in streaming mode */
594 clk_disable(res->sclk_hdmi);
595 clk_set_parent(res->sclk_hdmi, res->sclk_hdmiphy);
596 clk_enable(res->sclk_hdmi);
597
598 /* enable HDMI and timing generator */
599 hdmi_write_mask(hdev, HDMI_CON_0, ~0, HDMI_EN);
600 hdmi_write_mask(hdev, HDMI_TG_CMD, ~0, HDMI_TG_EN);
601 hdmi_dumpregs(hdev, "streamon");
602 return 0;
603}
604
605static int hdmi_streamoff(struct hdmi_device *hdev)
606{
607 struct device *dev = hdev->dev;
608 struct hdmi_resources *res = &hdev->res;
609
610 dev_dbg(dev, "%s\n", __func__);
611
612 hdmi_write_mask(hdev, HDMI_CON_0, 0, HDMI_EN);
613 hdmi_write_mask(hdev, HDMI_TG_CMD, 0, HDMI_TG_EN);
614
615 /* pixel(vpll) clock is used for HDMI in config mode */
616 clk_disable(res->sclk_hdmi);
617 clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
618 clk_enable(res->sclk_hdmi);
619
620 v4l2_subdev_call(hdev->phy_sd, video, s_stream, 0);
621
622 hdmi_dumpregs(hdev, "streamoff");
623 return 0;
624}
625
626static int hdmi_s_stream(struct v4l2_subdev *sd, int enable)
627{
628 struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
629 struct device *dev = hdev->dev;
630
631 dev_dbg(dev, "%s(%d)\n", __func__, enable);
632 if (enable)
633 return hdmi_streamon(hdev);
634 return hdmi_streamoff(hdev);
635}
636
637static void hdmi_resource_poweron(struct hdmi_resources *res)
638{
639 /* turn HDMI power on */
640 regulator_bulk_enable(res->regul_count, res->regul_bulk);
641 /* power-on hdmi physical interface */
642 clk_enable(res->hdmiphy);
643 /* use VPP as parent clock; HDMIPHY is not working yet */
644 clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
645 /* turn clocks on */
646 clk_enable(res->sclk_hdmi);
647}
648
649static void hdmi_resource_poweroff(struct hdmi_resources *res)
650{
651 /* turn clocks off */
652 clk_disable(res->sclk_hdmi);
653 /* power-off hdmiphy */
654 clk_disable(res->hdmiphy);
655 /* turn HDMI power off */
656 regulator_bulk_disable(res->regul_count, res->regul_bulk);
657}
658
659static int hdmi_s_power(struct v4l2_subdev *sd, int on)
660{
661 struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
662 int ret;
663
664 if (on)
665 ret = pm_runtime_get_sync(hdev->dev);
666 else
667 ret = pm_runtime_put_sync(hdev->dev);
668 /* only values < 0 indicate errors */
669 return IS_ERR_VALUE(ret) ? ret : 0;
670}
671
672static int hdmi_s_dv_preset(struct v4l2_subdev *sd,
673 struct v4l2_dv_preset *preset)
674{
675 struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
676 struct device *dev = hdev->dev;
677 const struct hdmi_preset_conf *conf;
678
679 conf = hdmi_preset2conf(preset->preset);
680 if (conf == NULL) {
681 dev_err(dev, "preset (%u) not supported\n", preset->preset);
682 return -EINVAL;
683 }
684 hdev->cur_conf = conf;
685 hdev->cur_preset = preset->preset;
686 return 0;
687}
688
689static int hdmi_g_dv_preset(struct v4l2_subdev *sd,
690 struct v4l2_dv_preset *preset)
691{
692 memset(preset, 0, sizeof(*preset));
693 preset->preset = sd_to_hdmi_dev(sd)->cur_preset;
694 return 0;
695}
696
697static int hdmi_g_mbus_fmt(struct v4l2_subdev *sd,
698 struct v4l2_mbus_framefmt *fmt)
699{
700 struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
701 struct device *dev = hdev->dev;
702
703 dev_dbg(dev, "%s\n", __func__);
704 if (!hdev->cur_conf)
705 return -EINVAL;
706 *fmt = hdev->cur_conf->mbus_fmt;
707 return 0;
708}
709
710static int hdmi_enum_dv_presets(struct v4l2_subdev *sd,
711 struct v4l2_dv_enum_preset *preset)
712{
713 if (preset->index >= ARRAY_SIZE(hdmi_conf))
714 return -EINVAL;
715 return v4l_fill_dv_preset_info(hdmi_conf[preset->index].preset, preset);
716}
717
718static const struct v4l2_subdev_core_ops hdmi_sd_core_ops = {
719 .s_power = hdmi_s_power,
720};
721
722static const struct v4l2_subdev_video_ops hdmi_sd_video_ops = {
723 .s_dv_preset = hdmi_s_dv_preset,
724 .g_dv_preset = hdmi_g_dv_preset,
725 .enum_dv_presets = hdmi_enum_dv_presets,
726 .g_mbus_fmt = hdmi_g_mbus_fmt,
727 .s_stream = hdmi_s_stream,
728};
729
730static const struct v4l2_subdev_ops hdmi_sd_ops = {
731 .core = &hdmi_sd_core_ops,
732 .video = &hdmi_sd_video_ops,
733};
734
735static int hdmi_runtime_suspend(struct device *dev)
736{
737 struct v4l2_subdev *sd = dev_get_drvdata(dev);
738 struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
739
740 dev_dbg(dev, "%s\n", __func__);
741 hdmi_resource_poweroff(&hdev->res);
742 return 0;
743}
744
745static int hdmi_runtime_resume(struct device *dev)
746{
747 struct v4l2_subdev *sd = dev_get_drvdata(dev);
748 struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
749 int ret = 0;
750
751 dev_dbg(dev, "%s\n", __func__);
752
753 hdmi_resource_poweron(&hdev->res);
754
755 ret = hdmi_conf_apply(hdev);
756 if (ret)
757 goto fail;
758
759 dev_dbg(dev, "poweron succeed\n");
760
761 return 0;
762
763fail:
764 hdmi_resource_poweroff(&hdev->res);
765 dev_err(dev, "poweron failed\n");
766
767 return ret;
768}
769
770static const struct dev_pm_ops hdmi_pm_ops = {
771 .runtime_suspend = hdmi_runtime_suspend,
772 .runtime_resume = hdmi_runtime_resume,
773};
774
775static void hdmi_resources_cleanup(struct hdmi_device *hdev)
776{
777 struct hdmi_resources *res = &hdev->res;
778
779 dev_dbg(hdev->dev, "HDMI resource cleanup\n");
780 /* put clocks, power */
781 if (res->regul_count)
782 regulator_bulk_free(res->regul_count, res->regul_bulk);
783 /* kfree is NULL-safe */
784 kfree(res->regul_bulk);
785 if (!IS_ERR_OR_NULL(res->hdmiphy))
786 clk_put(res->hdmiphy);
787 if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
788 clk_put(res->sclk_hdmiphy);
789 if (!IS_ERR_OR_NULL(res->sclk_pixel))
790 clk_put(res->sclk_pixel);
791 if (!IS_ERR_OR_NULL(res->sclk_hdmi))
792 clk_put(res->sclk_hdmi);
793 if (!IS_ERR_OR_NULL(res->hdmi))
794 clk_put(res->hdmi);
795 memset(res, 0, sizeof *res);
796}
797
798static int hdmi_resources_init(struct hdmi_device *hdev)
799{
800 struct device *dev = hdev->dev;
801 struct hdmi_resources *res = &hdev->res;
802 static char *supply[] = {
803 "hdmi-en",
804 "vdd",
805 "vdd_osc",
806 "vdd_pll",
807 };
808 int i, ret;
809
810 dev_dbg(dev, "HDMI resource init\n");
811
812 memset(res, 0, sizeof *res);
813 /* get clocks, power */
814
815 res->hdmi = clk_get(dev, "hdmi");
816 if (IS_ERR_OR_NULL(res->hdmi)) {
817 dev_err(dev, "failed to get clock 'hdmi'\n");
818 goto fail;
819 }
820 res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
821 if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
822 dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
823 goto fail;
824 }
825 res->sclk_pixel = clk_get(dev, "sclk_pixel");
826 if (IS_ERR_OR_NULL(res->sclk_pixel)) {
827 dev_err(dev, "failed to get clock 'sclk_pixel'\n");
828 goto fail;
829 }
830 res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
831 if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
832 dev_err(dev, "failed to get clock 'sclk_hdmiphy'\n");
833 goto fail;
834 }
835 res->hdmiphy = clk_get(dev, "hdmiphy");
836 if (IS_ERR_OR_NULL(res->hdmiphy)) {
837 dev_err(dev, "failed to get clock 'hdmiphy'\n");
838 goto fail;
839 }
840 res->regul_bulk = kzalloc(ARRAY_SIZE(supply) *
841 sizeof res->regul_bulk[0], GFP_KERNEL);
842 if (!res->regul_bulk) {
843 dev_err(dev, "failed to get memory for regulators\n");
844 goto fail;
845 }
846 for (i = 0; i < ARRAY_SIZE(supply); ++i) {
847 res->regul_bulk[i].supply = supply[i];
848 res->regul_bulk[i].consumer = NULL;
849 }
850
851 ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
852 if (ret) {
853 dev_err(dev, "failed to get regulators\n");
854 goto fail;
855 }
856 res->regul_count = ARRAY_SIZE(supply);
857
858 return 0;
859fail:
860 dev_err(dev, "HDMI resource init - failed\n");
861 hdmi_resources_cleanup(hdev);
862 return -ENODEV;
863}
864
865static int __devinit hdmi_probe(struct platform_device *pdev)
866{
867 struct device *dev = &pdev->dev;
868 struct resource *res;
869 struct i2c_adapter *phy_adapter;
870 struct v4l2_subdev *sd;
871 struct hdmi_device *hdmi_dev = NULL;
872 struct hdmi_driver_data *drv_data;
873 int ret;
874
875 dev_dbg(dev, "probe start\n");
876
877 hdmi_dev = kzalloc(sizeof(*hdmi_dev), GFP_KERNEL);
878 if (!hdmi_dev) {
879 dev_err(dev, "out of memory\n");
880 ret = -ENOMEM;
881 goto fail;
882 }
883
884 hdmi_dev->dev = dev;
885
886 ret = hdmi_resources_init(hdmi_dev);
887 if (ret)
888 goto fail_hdev;
889
890 /* mapping HDMI registers */
891 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
892 if (res == NULL) {
893 dev_err(dev, "get memory resource failed.\n");
894 ret = -ENXIO;
895 goto fail_init;
896 }
897
898 hdmi_dev->regs = ioremap(res->start, resource_size(res));
899 if (hdmi_dev->regs == NULL) {
900 dev_err(dev, "register mapping failed.\n");
901 ret = -ENXIO;
902 goto fail_hdev;
903 }
904
905 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
906 if (res == NULL) {
907 dev_err(dev, "get interrupt resource failed.\n");
908 ret = -ENXIO;
909 goto fail_regs;
910 }
911
912 ret = request_irq(res->start, hdmi_irq_handler, 0, "hdmi", hdmi_dev);
913 if (ret) {
914 dev_err(dev, "request interrupt failed.\n");
915 goto fail_regs;
916 }
917 hdmi_dev->irq = res->start;
918
919 /* setting v4l2 name to prevent WARN_ON in v4l2_device_register */
920 strlcpy(hdmi_dev->v4l2_dev.name, dev_name(dev),
921 sizeof(hdmi_dev->v4l2_dev.name));
922 /* passing NULL owner prevents driver from erasing drvdata */
923 ret = v4l2_device_register(NULL, &hdmi_dev->v4l2_dev);
924 if (ret) {
925 dev_err(dev, "could not register v4l2 device.\n");
926 goto fail_irq;
927 }
928
929 drv_data = (struct hdmi_driver_data *)
930 platform_get_device_id(pdev)->driver_data;
931 phy_adapter = i2c_get_adapter(drv_data->hdmiphy_bus);
932 if (phy_adapter == NULL) {
933 dev_err(dev, "adapter request failed\n");
934 ret = -ENXIO;
935 goto fail_vdev;
936 }
937
938 hdmi_dev->phy_sd = v4l2_i2c_new_subdev_board(&hdmi_dev->v4l2_dev,
939 phy_adapter, &hdmiphy_info, NULL);
940 /* on failure or not adapter is no longer useful */
941 i2c_put_adapter(phy_adapter);
942 if (hdmi_dev->phy_sd == NULL) {
943 dev_err(dev, "missing subdev for hdmiphy\n");
944 ret = -ENODEV;
945 goto fail_vdev;
946 }
947
948 clk_enable(hdmi_dev->res.hdmi);
949
950 pm_runtime_enable(dev);
951
952 sd = &hdmi_dev->sd;
953 v4l2_subdev_init(sd, &hdmi_sd_ops);
954 sd->owner = THIS_MODULE;
955
956 strlcpy(sd->name, "s5p-hdmi", sizeof sd->name);
957 hdmi_dev->cur_preset = HDMI_DEFAULT_PRESET;
958 /* FIXME: missing fail preset is not supported */
959 hdmi_dev->cur_conf = hdmi_preset2conf(hdmi_dev->cur_preset);
960
961 /* storing subdev for call that have only access to struct device */
962 dev_set_drvdata(dev, sd);
963
964 dev_info(dev, "probe sucessful\n");
965
966 return 0;
967
968fail_vdev:
969 v4l2_device_unregister(&hdmi_dev->v4l2_dev);
970
971fail_irq:
972 free_irq(hdmi_dev->irq, hdmi_dev);
973
974fail_regs:
975 iounmap(hdmi_dev->regs);
976
977fail_init:
978 hdmi_resources_cleanup(hdmi_dev);
979
980fail_hdev:
981 kfree(hdmi_dev);
982
983fail:
984 dev_err(dev, "probe failed\n");
985 return ret;
986}
987
988static int __devexit hdmi_remove(struct platform_device *pdev)
989{
990 struct device *dev = &pdev->dev;
991 struct v4l2_subdev *sd = dev_get_drvdata(dev);
992 struct hdmi_device *hdmi_dev = sd_to_hdmi_dev(sd);
993
994 pm_runtime_disable(dev);
995 clk_disable(hdmi_dev->res.hdmi);
996 v4l2_device_unregister(&hdmi_dev->v4l2_dev);
997 disable_irq(hdmi_dev->irq);
998 free_irq(hdmi_dev->irq, hdmi_dev);
999 iounmap(hdmi_dev->regs);
1000 hdmi_resources_cleanup(hdmi_dev);
1001 kfree(hdmi_dev);
1002 dev_info(dev, "remove sucessful\n");
1003
1004 return 0;
1005}
1006
1007static struct platform_driver hdmi_driver __refdata = {
1008 .probe = hdmi_probe,
1009 .remove = __devexit_p(hdmi_remove),
1010 .id_table = hdmi_driver_types,
1011 .driver = {
1012 .name = "s5p-hdmi",
1013 .owner = THIS_MODULE,
1014 .pm = &hdmi_pm_ops,
1015 }
1016};
1017
1018/* D R I V E R I N I T I A L I Z A T I O N */
1019
1020static int __init hdmi_init(void)
1021{
1022 int ret;
1023 static const char banner[] __initdata = KERN_INFO \
1024 "Samsung HDMI output driver, "
1025 "(c) 2010-2011 Samsung Electronics Co., Ltd.\n";
1026 printk(banner);
1027
1028 ret = platform_driver_register(&hdmi_driver);
1029 if (ret)
1030 printk(KERN_ERR "HDMI platform driver register failed\n");
1031
1032 return ret;
1033}
1034module_init(hdmi_init);
1035
1036static void __exit hdmi_exit(void)
1037{
1038 platform_driver_unregister(&hdmi_driver);
1039}
1040module_exit(hdmi_exit);
1041
1042
diff --git a/drivers/media/video/s5p-tv/hdmiphy_drv.c b/drivers/media/video/s5p-tv/hdmiphy_drv.c
new file mode 100644
index 000000000000..6693f4aff108
--- /dev/null
+++ b/drivers/media/video/s5p-tv/hdmiphy_drv.c
@@ -0,0 +1,188 @@
1/*
2 * Samsung HDMI Physical interface driver
3 *
4 * Copyright (C) 2010-2011 Samsung Electronics Co.Ltd
5 * Author: Tomasz Stanislawski <t.stanislaws@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13#include <linux/module.h>
14#include <linux/i2c.h>
15#include <linux/slab.h>
16#include <linux/clk.h>
17#include <linux/io.h>
18#include <linux/interrupt.h>
19#include <linux/irq.h>
20#include <linux/err.h>
21
22#include <media/v4l2-subdev.h>
23
24MODULE_AUTHOR("Tomasz Stanislawski <t.stanislaws@samsung.com>");
25MODULE_DESCRIPTION("Samsung HDMI Physical interface driver");
26MODULE_LICENSE("GPL");
27
28struct hdmiphy_conf {
29 u32 preset;
30 const u8 *data;
31};
32
33static const u8 hdmiphy_conf27[32] = {
34 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
35 0x6B, 0x10, 0x02, 0x51, 0xDf, 0xF2, 0x54, 0x87,
36 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
37 0x22, 0x40, 0xe3, 0x26, 0x00, 0x00, 0x00, 0x00,
38};
39
40static const u8 hdmiphy_conf74_175[32] = {
41 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
42 0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
43 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
44 0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00,
45};
46
47static const u8 hdmiphy_conf74_25[32] = {
48 0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
49 0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
50 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xe0,
51 0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00,
52};
53
54static const u8 hdmiphy_conf148_5[32] = {
55 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
56 0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
57 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
58 0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00,
59};
60
61static const struct hdmiphy_conf hdmiphy_conf[] = {
62 { V4L2_DV_480P59_94, hdmiphy_conf27 },
63 { V4L2_DV_1080P30, hdmiphy_conf74_175 },
64 { V4L2_DV_720P59_94, hdmiphy_conf74_175 },
65 { V4L2_DV_720P60, hdmiphy_conf74_25 },
66 { V4L2_DV_1080P50, hdmiphy_conf148_5 },
67 { V4L2_DV_1080P60, hdmiphy_conf148_5 },
68};
69
70const u8 *hdmiphy_preset2conf(u32 preset)
71{
72 int i;
73 for (i = 0; i < ARRAY_SIZE(hdmiphy_conf); ++i)
74 if (hdmiphy_conf[i].preset == preset)
75 return hdmiphy_conf[i].data;
76 return NULL;
77}
78
79static int hdmiphy_s_power(struct v4l2_subdev *sd, int on)
80{
81 /* to be implemented */
82 return 0;
83}
84
85static int hdmiphy_s_dv_preset(struct v4l2_subdev *sd,
86 struct v4l2_dv_preset *preset)
87{
88 const u8 *data;
89 u8 buffer[32];
90 int ret;
91 struct i2c_client *client = v4l2_get_subdevdata(sd);
92 struct device *dev = &client->dev;
93
94 dev_info(dev, "s_dv_preset(preset = %d)\n", preset->preset);
95 data = hdmiphy_preset2conf(preset->preset);
96 if (!data) {
97 dev_err(dev, "format not supported\n");
98 return -EINVAL;
99 }
100
101 /* storing configuration to the device */
102 memcpy(buffer, data, 32);
103 ret = i2c_master_send(client, buffer, 32);
104 if (ret != 32) {
105 dev_err(dev, "failed to configure HDMIPHY via I2C\n");
106 return -EIO;
107 }
108
109 return 0;
110}
111
112static int hdmiphy_s_stream(struct v4l2_subdev *sd, int enable)
113{
114 struct i2c_client *client = v4l2_get_subdevdata(sd);
115 struct device *dev = &client->dev;
116 u8 buffer[2];
117 int ret;
118
119 dev_info(dev, "s_stream(%d)\n", enable);
120 /* going to/from configuration from/to operation mode */
121 buffer[0] = 0x1f;
122 buffer[1] = enable ? 0x80 : 0x00;
123
124 ret = i2c_master_send(client, buffer, 2);
125 if (ret != 2) {
126 dev_err(dev, "stream (%d) failed\n", enable);
127 return -EIO;
128 }
129 return 0;
130}
131
132static const struct v4l2_subdev_core_ops hdmiphy_core_ops = {
133 .s_power = hdmiphy_s_power,
134};
135
136static const struct v4l2_subdev_video_ops hdmiphy_video_ops = {
137 .s_dv_preset = hdmiphy_s_dv_preset,
138 .s_stream = hdmiphy_s_stream,
139};
140
141static const struct v4l2_subdev_ops hdmiphy_ops = {
142 .core = &hdmiphy_core_ops,
143 .video = &hdmiphy_video_ops,
144};
145
146static int __devinit hdmiphy_probe(struct i2c_client *client,
147 const struct i2c_device_id *id)
148{
149 static struct v4l2_subdev sd;
150
151 v4l2_i2c_subdev_init(&sd, client, &hdmiphy_ops);
152 dev_info(&client->dev, "probe successful\n");
153 return 0;
154}
155
156static int __devexit hdmiphy_remove(struct i2c_client *client)
157{
158 dev_info(&client->dev, "remove successful\n");
159 return 0;
160}
161
162static const struct i2c_device_id hdmiphy_id[] = {
163 { "hdmiphy", 0 },
164 { },
165};
166MODULE_DEVICE_TABLE(i2c, hdmiphy_id);
167
168static struct i2c_driver hdmiphy_driver = {
169 .driver = {
170 .name = "s5p-hdmiphy",
171 .owner = THIS_MODULE,
172 },
173 .probe = hdmiphy_probe,
174 .remove = __devexit_p(hdmiphy_remove),
175 .id_table = hdmiphy_id,
176};
177
178static int __init hdmiphy_init(void)
179{
180 return i2c_add_driver(&hdmiphy_driver);
181}
182module_init(hdmiphy_init);
183
184static void __exit hdmiphy_exit(void)
185{
186 i2c_del_driver(&hdmiphy_driver);
187}
188module_exit(hdmiphy_exit);
diff --git a/drivers/media/video/s5p-tv/mixer.h b/drivers/media/video/s5p-tv/mixer.h
new file mode 100644
index 000000000000..e2242243f63d
--- /dev/null
+++ b/drivers/media/video/s5p-tv/mixer.h
@@ -0,0 +1,354 @@
1/*
2 * Samsung TV Mixer driver
3 *
4 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
5 *
6 * Tomasz Stanislawski, <t.stanislaws@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published
10 * by the Free Software Foundiation. either version 2 of the License,
11 * or (at your option) any later version
12 */
13
14#ifndef SAMSUNG_MIXER_H
15#define SAMSUNG_MIXER_H
16
17#ifdef CONFIG_VIDEO_SAMSUNG_S5P_MIXER_DEBUG
18 #define DEBUG
19#endif
20
21#include <linux/fb.h>
22#include <linux/kernel.h>
23#include <linux/spinlock.h>
24#include <linux/wait.h>
25#include <media/v4l2-device.h>
26#include <media/videobuf2-core.h>
27
28#include "regs-mixer.h"
29
30/** maximum number of output interfaces */
31#define MXR_MAX_OUTPUTS 2
32/** maximum number of input interfaces (layers) */
33#define MXR_MAX_LAYERS 3
34#define MXR_DRIVER_NAME "s5p-mixer"
35/** maximal number of planes for every layer */
36#define MXR_MAX_PLANES 2
37
38#define MXR_ENABLE 1
39#define MXR_DISABLE 0
40
41/** description of a macroblock for packed formats */
42struct mxr_block {
43 /** vertical number of pixels in macroblock */
44 unsigned int width;
45 /** horizontal number of pixels in macroblock */
46 unsigned int height;
47 /** size of block in bytes */
48 unsigned int size;
49};
50
51/** description of supported format */
52struct mxr_format {
53 /** format name/mnemonic */
54 const char *name;
55 /** fourcc identifier */
56 u32 fourcc;
57 /** colorspace identifier */
58 enum v4l2_colorspace colorspace;
59 /** number of planes in image data */
60 int num_planes;
61 /** description of block for each plane */
62 struct mxr_block plane[MXR_MAX_PLANES];
63 /** number of subframes in image data */
64 int num_subframes;
65 /** specifies to which subframe belong given plane */
66 int plane2subframe[MXR_MAX_PLANES];
67 /** internal code, driver dependant */
68 unsigned long cookie;
69};
70
71/** description of crop configuration for image */
72struct mxr_crop {
73 /** width of layer in pixels */
74 unsigned int full_width;
75 /** height of layer in pixels */
76 unsigned int full_height;
77 /** horizontal offset of first pixel to be displayed */
78 unsigned int x_offset;
79 /** vertical offset of first pixel to be displayed */
80 unsigned int y_offset;
81 /** width of displayed data in pixels */
82 unsigned int width;
83 /** height of displayed data in pixels */
84 unsigned int height;
85 /** indicate which fields are present in buffer */
86 unsigned int field;
87};
88
89/** description of transformation from source to destination image */
90struct mxr_geometry {
91 /** cropping for source image */
92 struct mxr_crop src;
93 /** cropping for destination image */
94 struct mxr_crop dst;
95 /** layer-dependant description of horizontal scaling */
96 unsigned int x_ratio;
97 /** layer-dependant description of vertical scaling */
98 unsigned int y_ratio;
99};
100
101/** instance of a buffer */
102struct mxr_buffer {
103 /** common v4l buffer stuff -- must be first */
104 struct vb2_buffer vb;
105 /** node for layer's lists */
106 struct list_head list;
107};
108
109
110/** internal states of layer */
111enum mxr_layer_state {
112 /** layers is not shown */
113 MXR_LAYER_IDLE = 0,
114 /** state between STREAMON and hardware start */
115 MXR_LAYER_STREAMING_START,
116 /** layer is shown */
117 MXR_LAYER_STREAMING,
118 /** state before STREAMOFF is finished */
119 MXR_LAYER_STREAMING_FINISH,
120};
121
122/** forward declarations */
123struct mxr_device;
124struct mxr_layer;
125
126/** callback for layers operation */
127struct mxr_layer_ops {
128 /* TODO: try to port it to subdev API */
129 /** handler for resource release function */
130 void (*release)(struct mxr_layer *);
131 /** setting buffer to HW */
132 void (*buffer_set)(struct mxr_layer *, struct mxr_buffer *);
133 /** setting format and geometry in HW */
134 void (*format_set)(struct mxr_layer *);
135 /** streaming stop/start */
136 void (*stream_set)(struct mxr_layer *, int);
137 /** adjusting geometry */
138 void (*fix_geometry)(struct mxr_layer *);
139};
140
141/** layer instance, a single window and content displayed on output */
142struct mxr_layer {
143 /** parent mixer device */
144 struct mxr_device *mdev;
145 /** layer index (unique identifier) */
146 int idx;
147 /** callbacks for layer methods */
148 struct mxr_layer_ops ops;
149 /** format array */
150 const struct mxr_format **fmt_array;
151 /** size of format array */
152 unsigned long fmt_array_size;
153
154 /** lock for protection of list and state fields */
155 spinlock_t enq_slock;
156 /** list for enqueued buffers */
157 struct list_head enq_list;
158 /** buffer currently owned by hardware in temporary registers */
159 struct mxr_buffer *update_buf;
160 /** buffer currently owned by hardware in shadow registers */
161 struct mxr_buffer *shadow_buf;
162 /** state of layer IDLE/STREAMING */
163 enum mxr_layer_state state;
164
165 /** mutex for protection of fields below */
166 struct mutex mutex;
167 /** handler for video node */
168 struct video_device vfd;
169 /** queue for output buffers */
170 struct vb2_queue vb_queue;
171 /** current image format */
172 const struct mxr_format *fmt;
173 /** current geometry of image */
174 struct mxr_geometry geo;
175};
176
177/** description of mixers output interface */
178struct mxr_output {
179 /** name of output */
180 char name[32];
181 /** output subdev */
182 struct v4l2_subdev *sd;
183 /** cookie used for configuration of registers */
184 int cookie;
185};
186
187/** specify source of output subdevs */
188struct mxr_output_conf {
189 /** name of output (connector) */
190 char *output_name;
191 /** name of module that generates output subdev */
192 char *module_name;
193 /** cookie need for mixer HW */
194 int cookie;
195};
196
197struct clk;
198struct regulator;
199
200/** auxiliary resources used my mixer */
201struct mxr_resources {
202 /** interrupt index */
203 int irq;
204 /** pointer to Mixer registers */
205 void __iomem *mxr_regs;
206 /** pointer to Video Processor registers */
207 void __iomem *vp_regs;
208 /** other resources, should used under mxr_device.mutex */
209 struct clk *mixer;
210 struct clk *vp;
211 struct clk *sclk_mixer;
212 struct clk *sclk_hdmi;
213 struct clk *sclk_dac;
214};
215
216/* event flags used */
217enum mxr_devide_flags {
218 MXR_EVENT_VSYNC = 0,
219};
220
221/** drivers instance */
222struct mxr_device {
223 /** master device */
224 struct device *dev;
225 /** state of each layer */
226 struct mxr_layer *layer[MXR_MAX_LAYERS];
227 /** state of each output */
228 struct mxr_output *output[MXR_MAX_OUTPUTS];
229 /** number of registered outputs */
230 int output_cnt;
231
232 /* video resources */
233
234 /** V4L2 device */
235 struct v4l2_device v4l2_dev;
236 /** context of allocator */
237 void *alloc_ctx;
238 /** event wait queue */
239 wait_queue_head_t event_queue;
240 /** state flags */
241 unsigned long event_flags;
242
243 /** spinlock for protection of registers */
244 spinlock_t reg_slock;
245
246 /** mutex for protection of fields below */
247 struct mutex mutex;
248 /** number of entities depndant on output configuration */
249 int n_output;
250 /** number of users that do streaming */
251 int n_streamer;
252 /** index of current output */
253 int current_output;
254 /** auxiliary resources used my mixer */
255 struct mxr_resources res;
256};
257
258/** transform device structure into mixer device */
259static inline struct mxr_device *to_mdev(struct device *dev)
260{
261 struct v4l2_device *vdev = dev_get_drvdata(dev);
262 return container_of(vdev, struct mxr_device, v4l2_dev);
263}
264
265/** get current output data, should be called under mdev's mutex */
266static inline struct mxr_output *to_output(struct mxr_device *mdev)
267{
268 return mdev->output[mdev->current_output];
269}
270
271/** get current output subdev, should be called under mdev's mutex */
272static inline struct v4l2_subdev *to_outsd(struct mxr_device *mdev)
273{
274 struct mxr_output *out = to_output(mdev);
275 return out ? out->sd : NULL;
276}
277
278/** forward declaration for mixer platform data */
279struct mxr_platform_data;
280
281/** acquiring common video resources */
282int __devinit mxr_acquire_video(struct mxr_device *mdev,
283 struct mxr_output_conf *output_cont, int output_count);
284
285/** releasing common video resources */
286void __devexit mxr_release_video(struct mxr_device *mdev);
287
288struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx);
289struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx);
290struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
291 int idx, char *name, struct mxr_layer_ops *ops);
292
293void mxr_base_layer_release(struct mxr_layer *layer);
294void mxr_layer_release(struct mxr_layer *layer);
295
296int mxr_base_layer_register(struct mxr_layer *layer);
297void mxr_base_layer_unregister(struct mxr_layer *layer);
298
299unsigned long mxr_get_plane_size(const struct mxr_block *blk,
300 unsigned int width, unsigned int height);
301
302/** adds new consumer for mixer's power */
303int __must_check mxr_power_get(struct mxr_device *mdev);
304/** removes consumer for mixer's power */
305void mxr_power_put(struct mxr_device *mdev);
306/** add new client for output configuration */
307void mxr_output_get(struct mxr_device *mdev);
308/** removes new client for output configuration */
309void mxr_output_put(struct mxr_device *mdev);
310/** add new client for streaming */
311void mxr_streamer_get(struct mxr_device *mdev);
312/** removes new client for streaming */
313void mxr_streamer_put(struct mxr_device *mdev);
314/** returns format of data delivared to current output */
315void mxr_get_mbus_fmt(struct mxr_device *mdev,
316 struct v4l2_mbus_framefmt *mbus_fmt);
317
318/* Debug */
319
320#define mxr_err(mdev, fmt, ...) dev_err(mdev->dev, fmt, ##__VA_ARGS__)
321#define mxr_warn(mdev, fmt, ...) dev_warn(mdev->dev, fmt, ##__VA_ARGS__)
322#define mxr_info(mdev, fmt, ...) dev_info(mdev->dev, fmt, ##__VA_ARGS__)
323
324#ifdef CONFIG_VIDEO_SAMSUNG_S5P_MIXER_DEBUG
325 #define mxr_dbg(mdev, fmt, ...) dev_dbg(mdev->dev, fmt, ##__VA_ARGS__)
326#else
327 #define mxr_dbg(mdev, fmt, ...) do { (void) mdev; } while (0)
328#endif
329
330/* accessing Mixer's and Video Processor's registers */
331
332void mxr_vsync_set_update(struct mxr_device *mdev, int en);
333void mxr_reg_reset(struct mxr_device *mdev);
334irqreturn_t mxr_irq_handler(int irq, void *dev_data);
335void mxr_reg_s_output(struct mxr_device *mdev, int cookie);
336void mxr_reg_streamon(struct mxr_device *mdev);
337void mxr_reg_streamoff(struct mxr_device *mdev);
338int mxr_reg_wait4vsync(struct mxr_device *mdev);
339void mxr_reg_set_mbus_fmt(struct mxr_device *mdev,
340 struct v4l2_mbus_framefmt *fmt);
341void mxr_reg_graph_layer_stream(struct mxr_device *mdev, int idx, int en);
342void mxr_reg_graph_buffer(struct mxr_device *mdev, int idx, dma_addr_t addr);
343void mxr_reg_graph_format(struct mxr_device *mdev, int idx,
344 const struct mxr_format *fmt, const struct mxr_geometry *geo);
345
346void mxr_reg_vp_layer_stream(struct mxr_device *mdev, int en);
347void mxr_reg_vp_buffer(struct mxr_device *mdev,
348 dma_addr_t luma_addr[2], dma_addr_t chroma_addr[2]);
349void mxr_reg_vp_format(struct mxr_device *mdev,
350 const struct mxr_format *fmt, const struct mxr_geometry *geo);
351void mxr_reg_dump(struct mxr_device *mdev);
352
353#endif /* SAMSUNG_MIXER_H */
354
diff --git a/drivers/media/video/s5p-tv/mixer_drv.c b/drivers/media/video/s5p-tv/mixer_drv.c
new file mode 100644
index 000000000000..00643094b221
--- /dev/null
+++ b/drivers/media/video/s5p-tv/mixer_drv.c
@@ -0,0 +1,487 @@
1/*
2 * Samsung TV Mixer driver
3 *
4 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
5 *
6 * Tomasz Stanislawski, <t.stanislaws@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published
10 * by the Free Software Foundiation. either version 2 of the License,
11 * or (at your option) any later version
12 */
13
14#include "mixer.h"
15
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/io.h>
19#include <linux/interrupt.h>
20#include <linux/irq.h>
21#include <linux/fb.h>
22#include <linux/delay.h>
23#include <linux/pm_runtime.h>
24#include <linux/clk.h>
25
26MODULE_AUTHOR("Tomasz Stanislawski, <t.stanislaws@samsung.com>");
27MODULE_DESCRIPTION("Samsung MIXER");
28MODULE_LICENSE("GPL");
29
30/* --------- DRIVER PARAMETERS ---------- */
31
32static struct mxr_output_conf mxr_output_conf[] = {
33 {
34 .output_name = "S5P HDMI connector",
35 .module_name = "s5p-hdmi",
36 .cookie = 1,
37 },
38 {
39 .output_name = "S5P SDO connector",
40 .module_name = "s5p-sdo",
41 .cookie = 0,
42 },
43};
44
45void mxr_get_mbus_fmt(struct mxr_device *mdev,
46 struct v4l2_mbus_framefmt *mbus_fmt)
47{
48 struct v4l2_subdev *sd;
49 int ret;
50
51 mutex_lock(&mdev->mutex);
52 sd = to_outsd(mdev);
53 ret = v4l2_subdev_call(sd, video, g_mbus_fmt, mbus_fmt);
54 WARN(ret, "failed to get mbus_fmt for output %s\n", sd->name);
55 mutex_unlock(&mdev->mutex);
56}
57
58void mxr_streamer_get(struct mxr_device *mdev)
59{
60 mutex_lock(&mdev->mutex);
61 ++mdev->n_streamer;
62 mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_streamer);
63 if (mdev->n_streamer == 1) {
64 struct v4l2_subdev *sd = to_outsd(mdev);
65 struct v4l2_mbus_framefmt mbus_fmt;
66 struct mxr_resources *res = &mdev->res;
67 int ret;
68
69 if (to_output(mdev)->cookie == 0)
70 clk_set_parent(res->sclk_mixer, res->sclk_dac);
71 else
72 clk_set_parent(res->sclk_mixer, res->sclk_hdmi);
73 mxr_reg_s_output(mdev, to_output(mdev)->cookie);
74
75 ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mbus_fmt);
76 WARN(ret, "failed to get mbus_fmt for output %s\n", sd->name);
77 ret = v4l2_subdev_call(sd, video, s_stream, 1);
78 WARN(ret, "starting stream failed for output %s\n", sd->name);
79
80 mxr_reg_set_mbus_fmt(mdev, &mbus_fmt);
81 mxr_reg_streamon(mdev);
82 ret = mxr_reg_wait4vsync(mdev);
83 WARN(ret, "failed to get vsync (%d) from output\n", ret);
84 }
85 mutex_unlock(&mdev->mutex);
86 mxr_reg_dump(mdev);
87 /* FIXME: what to do when streaming fails? */
88}
89
90void mxr_streamer_put(struct mxr_device *mdev)
91{
92 mutex_lock(&mdev->mutex);
93 --mdev->n_streamer;
94 mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_streamer);
95 if (mdev->n_streamer == 0) {
96 int ret;
97 struct v4l2_subdev *sd = to_outsd(mdev);
98
99 mxr_reg_streamoff(mdev);
100 /* vsync applies Mixer setup */
101 ret = mxr_reg_wait4vsync(mdev);
102 WARN(ret, "failed to get vsync (%d) from output\n", ret);
103 ret = v4l2_subdev_call(sd, video, s_stream, 0);
104 WARN(ret, "stopping stream failed for output %s\n", sd->name);
105 }
106 WARN(mdev->n_streamer < 0, "negative number of streamers (%d)\n",
107 mdev->n_streamer);
108 mutex_unlock(&mdev->mutex);
109 mxr_reg_dump(mdev);
110}
111
112void mxr_output_get(struct mxr_device *mdev)
113{
114 mutex_lock(&mdev->mutex);
115 ++mdev->n_output;
116 mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_output);
117 /* turn on auxiliary driver */
118 if (mdev->n_output == 1)
119 v4l2_subdev_call(to_outsd(mdev), core, s_power, 1);
120 mutex_unlock(&mdev->mutex);
121}
122
123void mxr_output_put(struct mxr_device *mdev)
124{
125 mutex_lock(&mdev->mutex);
126 --mdev->n_output;
127 mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_output);
128 /* turn on auxiliary driver */
129 if (mdev->n_output == 0)
130 v4l2_subdev_call(to_outsd(mdev), core, s_power, 0);
131 WARN(mdev->n_output < 0, "negative number of output users (%d)\n",
132 mdev->n_output);
133 mutex_unlock(&mdev->mutex);
134}
135
136int mxr_power_get(struct mxr_device *mdev)
137{
138 int ret = pm_runtime_get_sync(mdev->dev);
139
140 /* returning 1 means that power is already enabled,
141 * so zero success be returned */
142 if (IS_ERR_VALUE(ret))
143 return ret;
144 return 0;
145}
146
147void mxr_power_put(struct mxr_device *mdev)
148{
149 pm_runtime_put_sync(mdev->dev);
150}
151
152/* --------- RESOURCE MANAGEMENT -------------*/
153
154static int __devinit mxr_acquire_plat_resources(struct mxr_device *mdev,
155 struct platform_device *pdev)
156{
157 struct resource *res;
158 int ret;
159
160 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mxr");
161 if (res == NULL) {
162 mxr_err(mdev, "get memory resource failed.\n");
163 ret = -ENXIO;
164 goto fail;
165 }
166
167 mdev->res.mxr_regs = ioremap(res->start, resource_size(res));
168 if (mdev->res.mxr_regs == NULL) {
169 mxr_err(mdev, "register mapping failed.\n");
170 ret = -ENXIO;
171 goto fail;
172 }
173
174 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vp");
175 if (res == NULL) {
176 mxr_err(mdev, "get memory resource failed.\n");
177 ret = -ENXIO;
178 goto fail_mxr_regs;
179 }
180
181 mdev->res.vp_regs = ioremap(res->start, resource_size(res));
182 if (mdev->res.vp_regs == NULL) {
183 mxr_err(mdev, "register mapping failed.\n");
184 ret = -ENXIO;
185 goto fail_mxr_regs;
186 }
187
188 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq");
189 if (res == NULL) {
190 mxr_err(mdev, "get interrupt resource failed.\n");
191 ret = -ENXIO;
192 goto fail_vp_regs;
193 }
194
195 ret = request_irq(res->start, mxr_irq_handler, 0, "s5p-mixer", mdev);
196 if (ret) {
197 mxr_err(mdev, "request interrupt failed.\n");
198 goto fail_vp_regs;
199 }
200 mdev->res.irq = res->start;
201
202 return 0;
203
204fail_vp_regs:
205 iounmap(mdev->res.vp_regs);
206
207fail_mxr_regs:
208 iounmap(mdev->res.mxr_regs);
209
210fail:
211 return ret;
212}
213
214static void mxr_release_plat_resources(struct mxr_device *mdev)
215{
216 free_irq(mdev->res.irq, mdev);
217 iounmap(mdev->res.vp_regs);
218 iounmap(mdev->res.mxr_regs);
219}
220
221static void mxr_release_clocks(struct mxr_device *mdev)
222{
223 struct mxr_resources *res = &mdev->res;
224
225 if (!IS_ERR_OR_NULL(res->sclk_dac))
226 clk_put(res->sclk_dac);
227 if (!IS_ERR_OR_NULL(res->sclk_hdmi))
228 clk_put(res->sclk_hdmi);
229 if (!IS_ERR_OR_NULL(res->sclk_mixer))
230 clk_put(res->sclk_mixer);
231 if (!IS_ERR_OR_NULL(res->vp))
232 clk_put(res->vp);
233 if (!IS_ERR_OR_NULL(res->mixer))
234 clk_put(res->mixer);
235}
236
237static int mxr_acquire_clocks(struct mxr_device *mdev)
238{
239 struct mxr_resources *res = &mdev->res;
240 struct device *dev = mdev->dev;
241
242 res->mixer = clk_get(dev, "mixer");
243 if (IS_ERR_OR_NULL(res->mixer)) {
244 mxr_err(mdev, "failed to get clock 'mixer'\n");
245 goto fail;
246 }
247 res->vp = clk_get(dev, "vp");
248 if (IS_ERR_OR_NULL(res->vp)) {
249 mxr_err(mdev, "failed to get clock 'vp'\n");
250 goto fail;
251 }
252 res->sclk_mixer = clk_get(dev, "sclk_mixer");
253 if (IS_ERR_OR_NULL(res->sclk_mixer)) {
254 mxr_err(mdev, "failed to get clock 'sclk_mixer'\n");
255 goto fail;
256 }
257 res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
258 if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
259 mxr_err(mdev, "failed to get clock 'sclk_hdmi'\n");
260 goto fail;
261 }
262 res->sclk_dac = clk_get(dev, "sclk_dac");
263 if (IS_ERR_OR_NULL(res->sclk_dac)) {
264 mxr_err(mdev, "failed to get clock 'sclk_dac'\n");
265 goto fail;
266 }
267
268 return 0;
269fail:
270 mxr_release_clocks(mdev);
271 return -ENODEV;
272}
273
274static int __devinit mxr_acquire_resources(struct mxr_device *mdev,
275 struct platform_device *pdev)
276{
277 int ret;
278 ret = mxr_acquire_plat_resources(mdev, pdev);
279
280 if (ret)
281 goto fail;
282
283 ret = mxr_acquire_clocks(mdev);
284 if (ret)
285 goto fail_plat;
286
287 mxr_info(mdev, "resources acquired\n");
288 return 0;
289
290fail_plat:
291 mxr_release_plat_resources(mdev);
292fail:
293 mxr_err(mdev, "resources acquire failed\n");
294 return ret;
295}
296
297static void mxr_release_resources(struct mxr_device *mdev)
298{
299 mxr_release_clocks(mdev);
300 mxr_release_plat_resources(mdev);
301 memset(&mdev->res, 0, sizeof mdev->res);
302}
303
304static void mxr_release_layers(struct mxr_device *mdev)
305{
306 int i;
307
308 for (i = 0; i < ARRAY_SIZE(mdev->layer); ++i)
309 if (mdev->layer[i])
310 mxr_layer_release(mdev->layer[i]);
311}
312
313static int __devinit mxr_acquire_layers(struct mxr_device *mdev,
314 struct mxr_platform_data *pdata)
315{
316 mdev->layer[0] = mxr_graph_layer_create(mdev, 0);
317 mdev->layer[1] = mxr_graph_layer_create(mdev, 1);
318 mdev->layer[2] = mxr_vp_layer_create(mdev, 0);
319
320 if (!mdev->layer[0] || !mdev->layer[1] || !mdev->layer[2]) {
321 mxr_err(mdev, "failed to acquire layers\n");
322 goto fail;
323 }
324
325 return 0;
326
327fail:
328 mxr_release_layers(mdev);
329 return -ENODEV;
330}
331
332/* ---------- POWER MANAGEMENT ----------- */
333
334static int mxr_runtime_resume(struct device *dev)
335{
336 struct mxr_device *mdev = to_mdev(dev);
337 struct mxr_resources *res = &mdev->res;
338
339 mxr_dbg(mdev, "resume - start\n");
340 mutex_lock(&mdev->mutex);
341 /* turn clocks on */
342 clk_enable(res->mixer);
343 clk_enable(res->vp);
344 clk_enable(res->sclk_mixer);
345 /* apply default configuration */
346 mxr_reg_reset(mdev);
347 mxr_dbg(mdev, "resume - finished\n");
348
349 mutex_unlock(&mdev->mutex);
350 return 0;
351}
352
353static int mxr_runtime_suspend(struct device *dev)
354{
355 struct mxr_device *mdev = to_mdev(dev);
356 struct mxr_resources *res = &mdev->res;
357 mxr_dbg(mdev, "suspend - start\n");
358 mutex_lock(&mdev->mutex);
359 /* turn clocks off */
360 clk_disable(res->sclk_mixer);
361 clk_disable(res->vp);
362 clk_disable(res->mixer);
363 mutex_unlock(&mdev->mutex);
364 mxr_dbg(mdev, "suspend - finished\n");
365 return 0;
366}
367
368static const struct dev_pm_ops mxr_pm_ops = {
369 .runtime_suspend = mxr_runtime_suspend,
370 .runtime_resume = mxr_runtime_resume,
371};
372
373/* --------- DRIVER INITIALIZATION ---------- */
374
375static int __devinit mxr_probe(struct platform_device *pdev)
376{
377 struct device *dev = &pdev->dev;
378 struct mxr_platform_data *pdata = dev->platform_data;
379 struct mxr_device *mdev;
380 int ret;
381
382 /* mdev does not exist yet so no mxr_dbg is used */
383 dev_info(dev, "probe start\n");
384
385 mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
386 if (!mdev) {
387 mxr_err(mdev, "not enough memory.\n");
388 ret = -ENOMEM;
389 goto fail;
390 }
391
392 /* setup pointer to master device */
393 mdev->dev = dev;
394
395 mutex_init(&mdev->mutex);
396 spin_lock_init(&mdev->reg_slock);
397 init_waitqueue_head(&mdev->event_queue);
398
399 /* acquire resources: regs, irqs, clocks, regulators */
400 ret = mxr_acquire_resources(mdev, pdev);
401 if (ret)
402 goto fail_mem;
403
404 /* configure resources for video output */
405 ret = mxr_acquire_video(mdev, mxr_output_conf,
406 ARRAY_SIZE(mxr_output_conf));
407 if (ret)
408 goto fail_resources;
409
410 /* configure layers */
411 ret = mxr_acquire_layers(mdev, pdata);
412 if (ret)
413 goto fail_video;
414
415 pm_runtime_enable(dev);
416
417 mxr_info(mdev, "probe successful\n");
418 return 0;
419
420fail_video:
421 mxr_release_video(mdev);
422
423fail_resources:
424 mxr_release_resources(mdev);
425
426fail_mem:
427 kfree(mdev);
428
429fail:
430 dev_info(dev, "probe failed\n");
431 return ret;
432}
433
434static int __devexit mxr_remove(struct platform_device *pdev)
435{
436 struct device *dev = &pdev->dev;
437 struct mxr_device *mdev = to_mdev(dev);
438
439 pm_runtime_disable(dev);
440
441 mxr_release_layers(mdev);
442 mxr_release_video(mdev);
443 mxr_release_resources(mdev);
444
445 kfree(mdev);
446
447 dev_info(dev, "remove sucessful\n");
448 return 0;
449}
450
451static struct platform_driver mxr_driver __refdata = {
452 .probe = mxr_probe,
453 .remove = __devexit_p(mxr_remove),
454 .driver = {
455 .name = MXR_DRIVER_NAME,
456 .owner = THIS_MODULE,
457 .pm = &mxr_pm_ops,
458 }
459};
460
461static int __init mxr_init(void)
462{
463 int i, ret;
464 static const char banner[] __initdata = KERN_INFO
465 "Samsung TV Mixer driver, "
466 "(c) 2010-2011 Samsung Electronics Co., Ltd.\n";
467 printk(banner);
468
469 /* Loading auxiliary modules */
470 for (i = 0; i < ARRAY_SIZE(mxr_output_conf); ++i)
471 request_module(mxr_output_conf[i].module_name);
472
473 ret = platform_driver_register(&mxr_driver);
474 if (ret != 0) {
475 printk(KERN_ERR "registration of MIXER driver failed\n");
476 return -ENXIO;
477 }
478
479 return 0;
480}
481module_init(mxr_init);
482
483static void __exit mxr_exit(void)
484{
485 platform_driver_unregister(&mxr_driver);
486}
487module_exit(mxr_exit);
diff --git a/drivers/media/video/s5p-tv/mixer_grp_layer.c b/drivers/media/video/s5p-tv/mixer_grp_layer.c
new file mode 100644
index 000000000000..58f0ba49580f
--- /dev/null
+++ b/drivers/media/video/s5p-tv/mixer_grp_layer.c
@@ -0,0 +1,185 @@
1/*
2 * Samsung TV Mixer driver
3 *
4 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
5 *
6 * Tomasz Stanislawski, <t.stanislaws@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published
10 * by the Free Software Foundiation. either version 2 of the License,
11 * or (at your option) any later version
12 */
13
14#include "mixer.h"
15
16#include <media/videobuf2-dma-contig.h>
17
18/* FORMAT DEFINITIONS */
19
20static const struct mxr_format mxr_fb_fmt_rgb565 = {
21 .name = "RGB565",
22 .fourcc = V4L2_PIX_FMT_RGB565,
23 .colorspace = V4L2_COLORSPACE_SRGB,
24 .num_planes = 1,
25 .plane = {
26 { .width = 1, .height = 1, .size = 2 },
27 },
28 .num_subframes = 1,
29 .cookie = 4,
30};
31
32static const struct mxr_format mxr_fb_fmt_argb1555 = {
33 .name = "ARGB1555",
34 .num_planes = 1,
35 .fourcc = V4L2_PIX_FMT_RGB555,
36 .colorspace = V4L2_COLORSPACE_SRGB,
37 .plane = {
38 { .width = 1, .height = 1, .size = 2 },
39 },
40 .num_subframes = 1,
41 .cookie = 5,
42};
43
44static const struct mxr_format mxr_fb_fmt_argb4444 = {
45 .name = "ARGB4444",
46 .num_planes = 1,
47 .fourcc = V4L2_PIX_FMT_RGB444,
48 .colorspace = V4L2_COLORSPACE_SRGB,
49 .plane = {
50 { .width = 1, .height = 1, .size = 2 },
51 },
52 .num_subframes = 1,
53 .cookie = 6,
54};
55
56static const struct mxr_format mxr_fb_fmt_argb8888 = {
57 .name = "ARGB8888",
58 .fourcc = V4L2_PIX_FMT_BGR32,
59 .colorspace = V4L2_COLORSPACE_SRGB,
60 .num_planes = 1,
61 .plane = {
62 { .width = 1, .height = 1, .size = 4 },
63 },
64 .num_subframes = 1,
65 .cookie = 7,
66};
67
68static const struct mxr_format *mxr_graph_format[] = {
69 &mxr_fb_fmt_rgb565,
70 &mxr_fb_fmt_argb1555,
71 &mxr_fb_fmt_argb4444,
72 &mxr_fb_fmt_argb8888,
73};
74
75/* AUXILIARY CALLBACKS */
76
77static void mxr_graph_layer_release(struct mxr_layer *layer)
78{
79 mxr_base_layer_unregister(layer);
80 mxr_base_layer_release(layer);
81}
82
83static void mxr_graph_buffer_set(struct mxr_layer *layer,
84 struct mxr_buffer *buf)
85{
86 dma_addr_t addr = 0;
87
88 if (buf)
89 addr = vb2_dma_contig_plane_paddr(&buf->vb, 0);
90 mxr_reg_graph_buffer(layer->mdev, layer->idx, addr);
91}
92
93static void mxr_graph_stream_set(struct mxr_layer *layer, int en)
94{
95 mxr_reg_graph_layer_stream(layer->mdev, layer->idx, en);
96}
97
98static void mxr_graph_format_set(struct mxr_layer *layer)
99{
100 mxr_reg_graph_format(layer->mdev, layer->idx,
101 layer->fmt, &layer->geo);
102}
103
104static void mxr_graph_fix_geometry(struct mxr_layer *layer)
105{
106 struct mxr_geometry *geo = &layer->geo;
107
108 /* limit to boundary size */
109 geo->src.full_width = clamp_val(geo->src.full_width, 1, 32767);
110 geo->src.full_height = clamp_val(geo->src.full_height, 1, 2047);
111 geo->src.width = clamp_val(geo->src.width, 1, geo->src.full_width);
112 geo->src.width = min(geo->src.width, 2047U);
113 /* not possible to crop of Y axis */
114 geo->src.y_offset = min(geo->src.y_offset, geo->src.full_height - 1);
115 geo->src.height = geo->src.full_height - geo->src.y_offset;
116 /* limitting offset */
117 geo->src.x_offset = min(geo->src.x_offset,
118 geo->src.full_width - geo->src.width);
119
120 /* setting position in output */
121 geo->dst.width = min(geo->dst.width, geo->dst.full_width);
122 geo->dst.height = min(geo->dst.height, geo->dst.full_height);
123
124 /* Mixer supports only 1x and 2x scaling */
125 if (geo->dst.width >= 2 * geo->src.width) {
126 geo->x_ratio = 1;
127 geo->dst.width = 2 * geo->src.width;
128 } else {
129 geo->x_ratio = 0;
130 geo->dst.width = geo->src.width;
131 }
132
133 if (geo->dst.height >= 2 * geo->src.height) {
134 geo->y_ratio = 1;
135 geo->dst.height = 2 * geo->src.height;
136 } else {
137 geo->y_ratio = 0;
138 geo->dst.height = geo->src.height;
139 }
140
141 geo->dst.x_offset = min(geo->dst.x_offset,
142 geo->dst.full_width - geo->dst.width);
143 geo->dst.y_offset = min(geo->dst.y_offset,
144 geo->dst.full_height - geo->dst.height);
145}
146
147/* PUBLIC API */
148
149struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
150{
151 struct mxr_layer *layer;
152 int ret;
153 struct mxr_layer_ops ops = {
154 .release = mxr_graph_layer_release,
155 .buffer_set = mxr_graph_buffer_set,
156 .stream_set = mxr_graph_stream_set,
157 .format_set = mxr_graph_format_set,
158 .fix_geometry = mxr_graph_fix_geometry,
159 };
160 char name[32];
161
162 sprintf(name, "graph%d", idx);
163
164 layer = mxr_base_layer_create(mdev, idx, name, &ops);
165 if (layer == NULL) {
166 mxr_err(mdev, "failed to initialize layer(%d) base\n", idx);
167 goto fail;
168 }
169
170 layer->fmt_array = mxr_graph_format;
171 layer->fmt_array_size = ARRAY_SIZE(mxr_graph_format);
172
173 ret = mxr_base_layer_register(layer);
174 if (ret)
175 goto fail_layer;
176
177 return layer;
178
179fail_layer:
180 mxr_base_layer_release(layer);
181
182fail:
183 return NULL;
184}
185
diff --git a/drivers/media/video/s5p-tv/mixer_reg.c b/drivers/media/video/s5p-tv/mixer_reg.c
new file mode 100644
index 000000000000..38dac672aa1c
--- /dev/null
+++ b/drivers/media/video/s5p-tv/mixer_reg.c
@@ -0,0 +1,541 @@
1/*
2 * Samsung TV Mixer driver
3 *
4 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
5 *
6 * Tomasz Stanislawski, <t.stanislaws@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published
10 * by the Free Software Foundiation. either version 2 of the License,
11 * or (at your option) any later version
12 */
13
14#include "mixer.h"
15#include "regs-mixer.h"
16#include "regs-vp.h"
17
18#include <linux/delay.h>
19
20/* Register access subroutines */
21
22static inline u32 vp_read(struct mxr_device *mdev, u32 reg_id)
23{
24 return readl(mdev->res.vp_regs + reg_id);
25}
26
27static inline void vp_write(struct mxr_device *mdev, u32 reg_id, u32 val)
28{
29 writel(val, mdev->res.vp_regs + reg_id);
30}
31
32static inline void vp_write_mask(struct mxr_device *mdev, u32 reg_id,
33 u32 val, u32 mask)
34{
35 u32 old = vp_read(mdev, reg_id);
36
37 val = (val & mask) | (old & ~mask);
38 writel(val, mdev->res.vp_regs + reg_id);
39}
40
41static inline u32 mxr_read(struct mxr_device *mdev, u32 reg_id)
42{
43 return readl(mdev->res.mxr_regs + reg_id);
44}
45
46static inline void mxr_write(struct mxr_device *mdev, u32 reg_id, u32 val)
47{
48 writel(val, mdev->res.mxr_regs + reg_id);
49}
50
51static inline void mxr_write_mask(struct mxr_device *mdev, u32 reg_id,
52 u32 val, u32 mask)
53{
54 u32 old = mxr_read(mdev, reg_id);
55
56 val = (val & mask) | (old & ~mask);
57 writel(val, mdev->res.mxr_regs + reg_id);
58}
59
60void mxr_vsync_set_update(struct mxr_device *mdev, int en)
61{
62 /* block update on vsync */
63 mxr_write_mask(mdev, MXR_STATUS, en ? MXR_STATUS_SYNC_ENABLE : 0,
64 MXR_STATUS_SYNC_ENABLE);
65 vp_write(mdev, VP_SHADOW_UPDATE, en ? VP_SHADOW_UPDATE_ENABLE : 0);
66}
67
68static void __mxr_reg_vp_reset(struct mxr_device *mdev)
69{
70 int tries = 100;
71
72 vp_write(mdev, VP_SRESET, VP_SRESET_PROCESSING);
73 for (tries = 100; tries; --tries) {
74 /* waiting until VP_SRESET_PROCESSING is 0 */
75 if (~vp_read(mdev, VP_SRESET) & VP_SRESET_PROCESSING)
76 break;
77 mdelay(10);
78 }
79 WARN(tries == 0, "failed to reset Video Processor\n");
80}
81
82static void mxr_reg_vp_default_filter(struct mxr_device *mdev);
83
84void mxr_reg_reset(struct mxr_device *mdev)
85{
86 unsigned long flags;
87 u32 val; /* value stored to register */
88
89 spin_lock_irqsave(&mdev->reg_slock, flags);
90 mxr_vsync_set_update(mdev, MXR_DISABLE);
91
92 /* set output in RGB888 mode */
93 mxr_write(mdev, MXR_CFG, MXR_CFG_OUT_YUV444);
94
95 /* 16 beat burst in DMA */
96 mxr_write_mask(mdev, MXR_STATUS, MXR_STATUS_16_BURST,
97 MXR_STATUS_BURST_MASK);
98
99 /* setting default layer priority: layer1 > video > layer0
100 * because typical usage scenario would be
101 * layer0 - framebuffer
102 * video - video overlay
103 * layer1 - OSD
104 */
105 val = MXR_LAYER_CFG_GRP0_VAL(1);
106 val |= MXR_LAYER_CFG_VP_VAL(2);
107 val |= MXR_LAYER_CFG_GRP1_VAL(3);
108 mxr_write(mdev, MXR_LAYER_CFG, val);
109
110 /* use dark gray background color */
111 mxr_write(mdev, MXR_BG_COLOR0, 0x808080);
112 mxr_write(mdev, MXR_BG_COLOR1, 0x808080);
113 mxr_write(mdev, MXR_BG_COLOR2, 0x808080);
114
115 /* setting graphical layers */
116
117 val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
118 val |= MXR_GRP_CFG_BLEND_PRE_MUL; /* premul mode */
119 val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
120
121 /* the same configuration for both layers */
122 mxr_write(mdev, MXR_GRAPHIC_CFG(0), val);
123 mxr_write(mdev, MXR_GRAPHIC_CFG(1), val);
124
125 /* configuration of Video Processor Registers */
126 __mxr_reg_vp_reset(mdev);
127 mxr_reg_vp_default_filter(mdev);
128
129 /* enable all interrupts */
130 mxr_write_mask(mdev, MXR_INT_EN, ~0, MXR_INT_EN_ALL);
131
132 mxr_vsync_set_update(mdev, MXR_ENABLE);
133 spin_unlock_irqrestore(&mdev->reg_slock, flags);
134}
135
136void mxr_reg_graph_format(struct mxr_device *mdev, int idx,
137 const struct mxr_format *fmt, const struct mxr_geometry *geo)
138{
139 u32 val;
140 unsigned long flags;
141
142 spin_lock_irqsave(&mdev->reg_slock, flags);
143 mxr_vsync_set_update(mdev, MXR_DISABLE);
144
145 /* setup format */
146 mxr_write_mask(mdev, MXR_GRAPHIC_CFG(idx),
147 MXR_GRP_CFG_FORMAT_VAL(fmt->cookie), MXR_GRP_CFG_FORMAT_MASK);
148
149 /* setup geometry */
150 mxr_write(mdev, MXR_GRAPHIC_SPAN(idx), geo->src.full_width);
151 val = MXR_GRP_WH_WIDTH(geo->src.width);
152 val |= MXR_GRP_WH_HEIGHT(geo->src.height);
153 val |= MXR_GRP_WH_H_SCALE(geo->x_ratio);
154 val |= MXR_GRP_WH_V_SCALE(geo->y_ratio);
155 mxr_write(mdev, MXR_GRAPHIC_WH(idx), val);
156
157 /* setup offsets in source image */
158 val = MXR_GRP_SXY_SX(geo->src.x_offset);
159 val |= MXR_GRP_SXY_SY(geo->src.y_offset);
160 mxr_write(mdev, MXR_GRAPHIC_SXY(idx), val);
161
162 /* setup offsets in display image */
163 val = MXR_GRP_DXY_DX(geo->dst.x_offset);
164 val |= MXR_GRP_DXY_DY(geo->dst.y_offset);
165 mxr_write(mdev, MXR_GRAPHIC_DXY(idx), val);
166
167 mxr_vsync_set_update(mdev, MXR_ENABLE);
168 spin_unlock_irqrestore(&mdev->reg_slock, flags);
169}
170
171void mxr_reg_vp_format(struct mxr_device *mdev,
172 const struct mxr_format *fmt, const struct mxr_geometry *geo)
173{
174 unsigned long flags;
175
176 spin_lock_irqsave(&mdev->reg_slock, flags);
177 mxr_vsync_set_update(mdev, MXR_DISABLE);
178
179 vp_write_mask(mdev, VP_MODE, fmt->cookie, VP_MODE_FMT_MASK);
180
181 /* setting size of input image */
182 vp_write(mdev, VP_IMG_SIZE_Y, VP_IMG_HSIZE(geo->src.full_width) |
183 VP_IMG_VSIZE(geo->src.full_height));
184 /* chroma height has to reduced by 2 to avoid chroma distorions */
185 vp_write(mdev, VP_IMG_SIZE_C, VP_IMG_HSIZE(geo->src.full_width) |
186 VP_IMG_VSIZE(geo->src.full_height / 2));
187
188 vp_write(mdev, VP_SRC_WIDTH, geo->src.width);
189 vp_write(mdev, VP_SRC_HEIGHT, geo->src.height);
190 vp_write(mdev, VP_SRC_H_POSITION,
191 VP_SRC_H_POSITION_VAL(geo->src.x_offset));
192 vp_write(mdev, VP_SRC_V_POSITION, geo->src.y_offset);
193
194 vp_write(mdev, VP_DST_WIDTH, geo->dst.width);
195 vp_write(mdev, VP_DST_H_POSITION, geo->dst.x_offset);
196 if (geo->dst.field == V4L2_FIELD_INTERLACED) {
197 vp_write(mdev, VP_DST_HEIGHT, geo->dst.height / 2);
198 vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset / 2);
199 } else {
200 vp_write(mdev, VP_DST_HEIGHT, geo->dst.height);
201 vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset);
202 }
203
204 vp_write(mdev, VP_H_RATIO, geo->x_ratio);
205 vp_write(mdev, VP_V_RATIO, geo->y_ratio);
206
207 vp_write(mdev, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
208
209 mxr_vsync_set_update(mdev, MXR_ENABLE);
210 spin_unlock_irqrestore(&mdev->reg_slock, flags);
211
212}
213
214void mxr_reg_graph_buffer(struct mxr_device *mdev, int idx, dma_addr_t addr)
215{
216 u32 val = addr ? ~0 : 0;
217 unsigned long flags;
218
219 spin_lock_irqsave(&mdev->reg_slock, flags);
220 mxr_vsync_set_update(mdev, MXR_DISABLE);
221
222 if (idx == 0)
223 mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
224 else
225 mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
226 mxr_write(mdev, MXR_GRAPHIC_BASE(idx), addr);
227
228 mxr_vsync_set_update(mdev, MXR_ENABLE);
229 spin_unlock_irqrestore(&mdev->reg_slock, flags);
230}
231
232void mxr_reg_vp_buffer(struct mxr_device *mdev,
233 dma_addr_t luma_addr[2], dma_addr_t chroma_addr[2])
234{
235 u32 val = luma_addr[0] ? ~0 : 0;
236 unsigned long flags;
237
238 spin_lock_irqsave(&mdev->reg_slock, flags);
239 mxr_vsync_set_update(mdev, MXR_DISABLE);
240
241 mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_VP_ENABLE);
242 vp_write_mask(mdev, VP_ENABLE, val, VP_ENABLE_ON);
243 /* TODO: fix tiled mode */
244 vp_write(mdev, VP_TOP_Y_PTR, luma_addr[0]);
245 vp_write(mdev, VP_TOP_C_PTR, chroma_addr[0]);
246 vp_write(mdev, VP_BOT_Y_PTR, luma_addr[1]);
247 vp_write(mdev, VP_BOT_C_PTR, chroma_addr[1]);
248
249 mxr_vsync_set_update(mdev, MXR_ENABLE);
250 spin_unlock_irqrestore(&mdev->reg_slock, flags);
251}
252
253static void mxr_irq_layer_handle(struct mxr_layer *layer)
254{
255 struct list_head *head = &layer->enq_list;
256 struct mxr_buffer *done;
257
258 /* skip non-existing layer */
259 if (layer == NULL)
260 return;
261
262 spin_lock(&layer->enq_slock);
263 if (layer->state == MXR_LAYER_IDLE)
264 goto done;
265
266 done = layer->shadow_buf;
267 layer->shadow_buf = layer->update_buf;
268
269 if (list_empty(head)) {
270 if (layer->state != MXR_LAYER_STREAMING)
271 layer->update_buf = NULL;
272 } else {
273 struct mxr_buffer *next;
274 next = list_first_entry(head, struct mxr_buffer, list);
275 list_del(&next->list);
276 layer->update_buf = next;
277 }
278
279 layer->ops.buffer_set(layer, layer->update_buf);
280
281 if (done && done != layer->shadow_buf)
282 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
283
284done:
285 spin_unlock(&layer->enq_slock);
286}
287
288irqreturn_t mxr_irq_handler(int irq, void *dev_data)
289{
290 struct mxr_device *mdev = dev_data;
291 u32 i, val;
292
293 spin_lock(&mdev->reg_slock);
294 val = mxr_read(mdev, MXR_INT_STATUS);
295
296 /* wake up process waiting for VSYNC */
297 if (val & MXR_INT_STATUS_VSYNC) {
298 set_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
299 wake_up(&mdev->event_queue);
300 }
301
302 /* clear interrupts */
303 if (~val & MXR_INT_EN_VSYNC) {
304 /* vsync interrupt use different bit for read and clear */
305 val &= ~MXR_INT_EN_VSYNC;
306 val |= MXR_INT_CLEAR_VSYNC;
307 }
308 mxr_write(mdev, MXR_INT_STATUS, val);
309
310 spin_unlock(&mdev->reg_slock);
311 /* leave on non-vsync event */
312 if (~val & MXR_INT_CLEAR_VSYNC)
313 return IRQ_HANDLED;
314 for (i = 0; i < MXR_MAX_LAYERS; ++i)
315 mxr_irq_layer_handle(mdev->layer[i]);
316 return IRQ_HANDLED;
317}
318
319void mxr_reg_s_output(struct mxr_device *mdev, int cookie)
320{
321 u32 val;
322
323 val = cookie == 0 ? MXR_CFG_DST_SDO : MXR_CFG_DST_HDMI;
324 mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_DST_MASK);
325}
326
327void mxr_reg_streamon(struct mxr_device *mdev)
328{
329 unsigned long flags;
330
331 spin_lock_irqsave(&mdev->reg_slock, flags);
332 /* single write -> no need to block vsync update */
333
334 /* start MIXER */
335 mxr_write_mask(mdev, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
336
337 spin_unlock_irqrestore(&mdev->reg_slock, flags);
338}
339
340void mxr_reg_streamoff(struct mxr_device *mdev)
341{
342 unsigned long flags;
343
344 spin_lock_irqsave(&mdev->reg_slock, flags);
345 /* single write -> no need to block vsync update */
346
347 /* stop MIXER */
348 mxr_write_mask(mdev, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
349
350 spin_unlock_irqrestore(&mdev->reg_slock, flags);
351}
352
353int mxr_reg_wait4vsync(struct mxr_device *mdev)
354{
355 int ret;
356
357 clear_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
358 /* TODO: consider adding interruptible */
359 ret = wait_event_timeout(mdev->event_queue,
360 test_bit(MXR_EVENT_VSYNC, &mdev->event_flags),
361 msecs_to_jiffies(1000));
362 if (ret > 0)
363 return 0;
364 if (ret < 0)
365 return ret;
366 mxr_warn(mdev, "no vsync detected - timeout\n");
367 return -ETIME;
368}
369
370void mxr_reg_set_mbus_fmt(struct mxr_device *mdev,
371 struct v4l2_mbus_framefmt *fmt)
372{
373 u32 val = 0;
374 unsigned long flags;
375
376 spin_lock_irqsave(&mdev->reg_slock, flags);
377 mxr_vsync_set_update(mdev, MXR_DISABLE);
378
379 /* choosing between interlace and progressive mode */
380 if (fmt->field == V4L2_FIELD_INTERLACED)
381 val |= MXR_CFG_SCAN_INTERLACE;
382 else
383 val |= MXR_CFG_SCAN_PROGRASSIVE;
384
385 /* choosing between porper HD and SD mode */
386 if (fmt->height == 480)
387 val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
388 else if (fmt->height == 576)
389 val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
390 else if (fmt->height == 720)
391 val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
392 else if (fmt->height == 1080)
393 val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
394 else
395 WARN(1, "unrecognized mbus height %u!\n", fmt->height);
396
397 mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_SCAN_MASK);
398
399 val = (fmt->field == V4L2_FIELD_INTERLACED) ? ~0 : 0;
400 vp_write_mask(mdev, VP_MODE, val,
401 VP_MODE_LINE_SKIP | VP_MODE_FIELD_ID_AUTO_TOGGLING);
402
403 mxr_vsync_set_update(mdev, MXR_ENABLE);
404 spin_unlock_irqrestore(&mdev->reg_slock, flags);
405}
406
407void mxr_reg_graph_layer_stream(struct mxr_device *mdev, int idx, int en)
408{
409 /* no extra actions need to be done */
410}
411
412void mxr_reg_vp_layer_stream(struct mxr_device *mdev, int en)
413{
414 /* no extra actions need to be done */
415}
416
417static const u8 filter_y_horiz_tap8[] = {
418 0, -1, -1, -1, -1, -1, -1, -1,
419 -1, -1, -1, -1, -1, 0, 0, 0,
420 0, 2, 4, 5, 6, 6, 6, 6,
421 6, 5, 5, 4, 3, 2, 1, 1,
422 0, -6, -12, -16, -18, -20, -21, -20,
423 -20, -18, -16, -13, -10, -8, -5, -2,
424 127, 126, 125, 121, 114, 107, 99, 89,
425 79, 68, 57, 46, 35, 25, 16, 8,
426};
427
428static const u8 filter_y_vert_tap4[] = {
429 0, -3, -6, -8, -8, -8, -8, -7,
430 -6, -5, -4, -3, -2, -1, -1, 0,
431 127, 126, 124, 118, 111, 102, 92, 81,
432 70, 59, 48, 37, 27, 19, 11, 5,
433 0, 5, 11, 19, 27, 37, 48, 59,
434 70, 81, 92, 102, 111, 118, 124, 126,
435 0, 0, -1, -1, -2, -3, -4, -5,
436 -6, -7, -8, -8, -8, -8, -6, -3,
437};
438
439static const u8 filter_cr_horiz_tap4[] = {
440 0, -3, -6, -8, -8, -8, -8, -7,
441 -6, -5, -4, -3, -2, -1, -1, 0,
442 127, 126, 124, 118, 111, 102, 92, 81,
443 70, 59, 48, 37, 27, 19, 11, 5,
444};
445
446static inline void mxr_reg_vp_filter_set(struct mxr_device *mdev,
447 int reg_id, const u8 *data, unsigned int size)
448{
449 /* assure 4-byte align */
450 BUG_ON(size & 3);
451 for (; size; size -= 4, reg_id += 4, data += 4) {
452 u32 val = (data[0] << 24) | (data[1] << 16) |
453 (data[2] << 8) | data[3];
454 vp_write(mdev, reg_id, val);
455 }
456}
457
458static void mxr_reg_vp_default_filter(struct mxr_device *mdev)
459{
460 mxr_reg_vp_filter_set(mdev, VP_POLY8_Y0_LL,
461 filter_y_horiz_tap8, sizeof filter_y_horiz_tap8);
462 mxr_reg_vp_filter_set(mdev, VP_POLY4_Y0_LL,
463 filter_y_vert_tap4, sizeof filter_y_vert_tap4);
464 mxr_reg_vp_filter_set(mdev, VP_POLY4_C0_LL,
465 filter_cr_horiz_tap4, sizeof filter_cr_horiz_tap4);
466}
467
468static void mxr_reg_mxr_dump(struct mxr_device *mdev)
469{
470#define DUMPREG(reg_id) \
471do { \
472 mxr_dbg(mdev, #reg_id " = %08x\n", \
473 (u32)readl(mdev->res.mxr_regs + reg_id)); \
474} while (0)
475
476 DUMPREG(MXR_STATUS);
477 DUMPREG(MXR_CFG);
478 DUMPREG(MXR_INT_EN);
479 DUMPREG(MXR_INT_STATUS);
480
481 DUMPREG(MXR_LAYER_CFG);
482 DUMPREG(MXR_VIDEO_CFG);
483
484 DUMPREG(MXR_GRAPHIC0_CFG);
485 DUMPREG(MXR_GRAPHIC0_BASE);
486 DUMPREG(MXR_GRAPHIC0_SPAN);
487 DUMPREG(MXR_GRAPHIC0_WH);
488 DUMPREG(MXR_GRAPHIC0_SXY);
489 DUMPREG(MXR_GRAPHIC0_DXY);
490
491 DUMPREG(MXR_GRAPHIC1_CFG);
492 DUMPREG(MXR_GRAPHIC1_BASE);
493 DUMPREG(MXR_GRAPHIC1_SPAN);
494 DUMPREG(MXR_GRAPHIC1_WH);
495 DUMPREG(MXR_GRAPHIC1_SXY);
496 DUMPREG(MXR_GRAPHIC1_DXY);
497#undef DUMPREG
498}
499
500static void mxr_reg_vp_dump(struct mxr_device *mdev)
501{
502#define DUMPREG(reg_id) \
503do { \
504 mxr_dbg(mdev, #reg_id " = %08x\n", \
505 (u32) readl(mdev->res.vp_regs + reg_id)); \
506} while (0)
507
508
509 DUMPREG(VP_ENABLE);
510 DUMPREG(VP_SRESET);
511 DUMPREG(VP_SHADOW_UPDATE);
512 DUMPREG(VP_FIELD_ID);
513 DUMPREG(VP_MODE);
514 DUMPREG(VP_IMG_SIZE_Y);
515 DUMPREG(VP_IMG_SIZE_C);
516 DUMPREG(VP_PER_RATE_CTRL);
517 DUMPREG(VP_TOP_Y_PTR);
518 DUMPREG(VP_BOT_Y_PTR);
519 DUMPREG(VP_TOP_C_PTR);
520 DUMPREG(VP_BOT_C_PTR);
521 DUMPREG(VP_ENDIAN_MODE);
522 DUMPREG(VP_SRC_H_POSITION);
523 DUMPREG(VP_SRC_V_POSITION);
524 DUMPREG(VP_SRC_WIDTH);
525 DUMPREG(VP_SRC_HEIGHT);
526 DUMPREG(VP_DST_H_POSITION);
527 DUMPREG(VP_DST_V_POSITION);
528 DUMPREG(VP_DST_WIDTH);
529 DUMPREG(VP_DST_HEIGHT);
530 DUMPREG(VP_H_RATIO);
531 DUMPREG(VP_V_RATIO);
532
533#undef DUMPREG
534}
535
536void mxr_reg_dump(struct mxr_device *mdev)
537{
538 mxr_reg_mxr_dump(mdev);
539 mxr_reg_vp_dump(mdev);
540}
541
diff --git a/drivers/media/video/s5p-tv/mixer_video.c b/drivers/media/video/s5p-tv/mixer_video.c
new file mode 100644
index 000000000000..43ac22f35bc7
--- /dev/null
+++ b/drivers/media/video/s5p-tv/mixer_video.c
@@ -0,0 +1,1006 @@
1/*
2 * Samsung TV Mixer driver
3 *
4 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
5 *
6 * Tomasz Stanislawski, <t.stanislaws@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published
10 * by the Free Software Foundation. either version 2 of the License,
11 * or (at your option) any later version
12 */
13
14#include "mixer.h"
15
16#include <media/v4l2-ioctl.h>
17#include <linux/videodev2.h>
18#include <linux/mm.h>
19#include <linux/version.h>
20#include <linux/timer.h>
21#include <media/videobuf2-dma-contig.h>
22
23static int find_reg_callback(struct device *dev, void *p)
24{
25 struct v4l2_subdev **sd = p;
26
27 *sd = dev_get_drvdata(dev);
28 /* non-zero value stops iteration */
29 return 1;
30}
31
32static struct v4l2_subdev *find_and_register_subdev(
33 struct mxr_device *mdev, char *module_name)
34{
35 struct device_driver *drv;
36 struct v4l2_subdev *sd = NULL;
37 int ret;
38
39 /* TODO: add waiting until probe is finished */
40 drv = driver_find(module_name, &platform_bus_type);
41 if (!drv) {
42 mxr_warn(mdev, "module %s is missing\n", module_name);
43 return NULL;
44 }
45 /* driver refcnt is increased, it is safe to iterate over devices */
46 ret = driver_for_each_device(drv, NULL, &sd, find_reg_callback);
47 /* ret == 0 means that find_reg_callback was never executed */
48 if (sd == NULL) {
49 mxr_warn(mdev, "module %s provides no subdev!\n", module_name);
50 goto done;
51 }
52 /* v4l2_device_register_subdev detects if sd is NULL */
53 ret = v4l2_device_register_subdev(&mdev->v4l2_dev, sd);
54 if (ret) {
55 mxr_warn(mdev, "failed to register subdev %s\n", sd->name);
56 sd = NULL;
57 }
58
59done:
60 put_driver(drv);
61 return sd;
62}
63
64int __devinit mxr_acquire_video(struct mxr_device *mdev,
65 struct mxr_output_conf *output_conf, int output_count)
66{
67 struct device *dev = mdev->dev;
68 struct v4l2_device *v4l2_dev = &mdev->v4l2_dev;
69 int i;
70 int ret = 0;
71 struct v4l2_subdev *sd;
72
73 strlcpy(v4l2_dev->name, dev_name(mdev->dev), sizeof(v4l2_dev->name));
74 /* prepare context for V4L2 device */
75 ret = v4l2_device_register(dev, v4l2_dev);
76 if (ret) {
77 mxr_err(mdev, "could not register v4l2 device.\n");
78 goto fail;
79 }
80
81 mdev->alloc_ctx = vb2_dma_contig_init_ctx(mdev->dev);
82 if (IS_ERR_OR_NULL(mdev->alloc_ctx)) {
83 mxr_err(mdev, "could not acquire vb2 allocator\n");
84 goto fail_v4l2_dev;
85 }
86
87 /* registering outputs */
88 mdev->output_cnt = 0;
89 for (i = 0; i < output_count; ++i) {
90 struct mxr_output_conf *conf = &output_conf[i];
91 struct mxr_output *out;
92
93 sd = find_and_register_subdev(mdev, conf->module_name);
94 /* trying to register next output */
95 if (sd == NULL)
96 continue;
97 out = kzalloc(sizeof *out, GFP_KERNEL);
98 if (out == NULL) {
99 mxr_err(mdev, "no memory for '%s'\n",
100 conf->output_name);
101 ret = -ENOMEM;
102 /* registered subdevs are removed in fail_v4l2_dev */
103 goto fail_output;
104 }
105 strlcpy(out->name, conf->output_name, sizeof(out->name));
106 out->sd = sd;
107 out->cookie = conf->cookie;
108 mdev->output[mdev->output_cnt++] = out;
109 mxr_info(mdev, "added output '%s' from module '%s'\n",
110 conf->output_name, conf->module_name);
111 /* checking if maximal number of outputs is reached */
112 if (mdev->output_cnt >= MXR_MAX_OUTPUTS)
113 break;
114 }
115
116 if (mdev->output_cnt == 0) {
117 mxr_err(mdev, "failed to register any output\n");
118 ret = -ENODEV;
119 /* skipping fail_output because there is nothing to free */
120 goto fail_vb2_allocator;
121 }
122
123 return 0;
124
125fail_output:
126 /* kfree is NULL-safe */
127 for (i = 0; i < mdev->output_cnt; ++i)
128 kfree(mdev->output[i]);
129 memset(mdev->output, 0, sizeof mdev->output);
130
131fail_vb2_allocator:
132 /* freeing allocator context */
133 vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
134
135fail_v4l2_dev:
136 /* NOTE: automatically unregister all subdevs */
137 v4l2_device_unregister(v4l2_dev);
138
139fail:
140 return ret;
141}
142
143void __devexit mxr_release_video(struct mxr_device *mdev)
144{
145 int i;
146
147 /* kfree is NULL-safe */
148 for (i = 0; i < mdev->output_cnt; ++i)
149 kfree(mdev->output[i]);
150
151 vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
152 v4l2_device_unregister(&mdev->v4l2_dev);
153}
154
155static int mxr_querycap(struct file *file, void *priv,
156 struct v4l2_capability *cap)
157{
158 struct mxr_layer *layer = video_drvdata(file);
159
160 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
161
162 strlcpy(cap->driver, MXR_DRIVER_NAME, sizeof cap->driver);
163 strlcpy(cap->card, layer->vfd.name, sizeof cap->card);
164 sprintf(cap->bus_info, "%d", layer->idx);
165 cap->version = KERNEL_VERSION(0, 1, 0);
166 cap->capabilities = V4L2_CAP_STREAMING |
167 V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
168
169 return 0;
170}
171
172/* Geometry handling */
173static void mxr_layer_geo_fix(struct mxr_layer *layer)
174{
175 struct mxr_device *mdev = layer->mdev;
176 struct v4l2_mbus_framefmt mbus_fmt;
177
178 /* TODO: add some dirty flag to avoid unnecessary adjustments */
179 mxr_get_mbus_fmt(mdev, &mbus_fmt);
180 layer->geo.dst.full_width = mbus_fmt.width;
181 layer->geo.dst.full_height = mbus_fmt.height;
182 layer->geo.dst.field = mbus_fmt.field;
183 layer->ops.fix_geometry(layer);
184}
185
186static void mxr_layer_default_geo(struct mxr_layer *layer)
187{
188 struct mxr_device *mdev = layer->mdev;
189 struct v4l2_mbus_framefmt mbus_fmt;
190
191 memset(&layer->geo, 0, sizeof layer->geo);
192
193 mxr_get_mbus_fmt(mdev, &mbus_fmt);
194
195 layer->geo.dst.full_width = mbus_fmt.width;
196 layer->geo.dst.full_height = mbus_fmt.height;
197 layer->geo.dst.width = layer->geo.dst.full_width;
198 layer->geo.dst.height = layer->geo.dst.full_height;
199 layer->geo.dst.field = mbus_fmt.field;
200
201 layer->geo.src.full_width = mbus_fmt.width;
202 layer->geo.src.full_height = mbus_fmt.height;
203 layer->geo.src.width = layer->geo.src.full_width;
204 layer->geo.src.height = layer->geo.src.full_height;
205
206 layer->ops.fix_geometry(layer);
207}
208
209static void mxr_geometry_dump(struct mxr_device *mdev, struct mxr_geometry *geo)
210{
211 mxr_dbg(mdev, "src.full_size = (%u, %u)\n",
212 geo->src.full_width, geo->src.full_height);
213 mxr_dbg(mdev, "src.size = (%u, %u)\n",
214 geo->src.width, geo->src.height);
215 mxr_dbg(mdev, "src.offset = (%u, %u)\n",
216 geo->src.x_offset, geo->src.y_offset);
217 mxr_dbg(mdev, "dst.full_size = (%u, %u)\n",
218 geo->dst.full_width, geo->dst.full_height);
219 mxr_dbg(mdev, "dst.size = (%u, %u)\n",
220 geo->dst.width, geo->dst.height);
221 mxr_dbg(mdev, "dst.offset = (%u, %u)\n",
222 geo->dst.x_offset, geo->dst.y_offset);
223 mxr_dbg(mdev, "ratio = (%u, %u)\n",
224 geo->x_ratio, geo->y_ratio);
225}
226
227
228static const struct mxr_format *find_format_by_fourcc(
229 struct mxr_layer *layer, unsigned long fourcc);
230static const struct mxr_format *find_format_by_index(
231 struct mxr_layer *layer, unsigned long index);
232
233static int mxr_enum_fmt(struct file *file, void *priv,
234 struct v4l2_fmtdesc *f)
235{
236 struct mxr_layer *layer = video_drvdata(file);
237 struct mxr_device *mdev = layer->mdev;
238 const struct mxr_format *fmt;
239
240 mxr_dbg(mdev, "%s\n", __func__);
241 fmt = find_format_by_index(layer, f->index);
242 if (fmt == NULL)
243 return -EINVAL;
244
245 strlcpy(f->description, fmt->name, sizeof(f->description));
246 f->pixelformat = fmt->fourcc;
247
248 return 0;
249}
250
251static int mxr_s_fmt(struct file *file, void *priv,
252 struct v4l2_format *f)
253{
254 struct mxr_layer *layer = video_drvdata(file);
255 const struct mxr_format *fmt;
256 struct v4l2_pix_format_mplane *pix;
257 struct mxr_device *mdev = layer->mdev;
258 struct mxr_geometry *geo = &layer->geo;
259
260 mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
261
262 pix = &f->fmt.pix_mp;
263 fmt = find_format_by_fourcc(layer, pix->pixelformat);
264 if (fmt == NULL) {
265 mxr_warn(mdev, "not recognized fourcc: %08x\n",
266 pix->pixelformat);
267 return -EINVAL;
268 }
269 layer->fmt = fmt;
270 geo->src.full_width = pix->width;
271 geo->src.width = pix->width;
272 geo->src.full_height = pix->height;
273 geo->src.height = pix->height;
274 /* assure consistency of geometry */
275 mxr_layer_geo_fix(layer);
276 mxr_dbg(mdev, "width=%u height=%u span=%u\n",
277 geo->src.width, geo->src.height, geo->src.full_width);
278
279 return 0;
280}
281
282static unsigned int divup(unsigned int divident, unsigned int divisor)
283{
284 return (divident + divisor - 1) / divisor;
285}
286
287unsigned long mxr_get_plane_size(const struct mxr_block *blk,
288 unsigned int width, unsigned int height)
289{
290 unsigned int bl_width = divup(width, blk->width);
291 unsigned int bl_height = divup(height, blk->height);
292
293 return bl_width * bl_height * blk->size;
294}
295
296static void mxr_mplane_fill(struct v4l2_plane_pix_format *planes,
297 const struct mxr_format *fmt, u32 width, u32 height)
298{
299 int i;
300
301 memset(planes, 0, sizeof(*planes) * fmt->num_subframes);
302 for (i = 0; i < fmt->num_planes; ++i) {
303 struct v4l2_plane_pix_format *plane = planes
304 + fmt->plane2subframe[i];
305 const struct mxr_block *blk = &fmt->plane[i];
306 u32 bl_width = divup(width, blk->width);
307 u32 bl_height = divup(height, blk->height);
308 u32 sizeimage = bl_width * bl_height * blk->size;
309 u16 bytesperline = bl_width * blk->size / blk->height;
310
311 plane->sizeimage += sizeimage;
312 plane->bytesperline = max(plane->bytesperline, bytesperline);
313 }
314}
315
316static int mxr_g_fmt(struct file *file, void *priv,
317 struct v4l2_format *f)
318{
319 struct mxr_layer *layer = video_drvdata(file);
320 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
321
322 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
323
324 pix->width = layer->geo.src.full_width;
325 pix->height = layer->geo.src.full_height;
326 pix->field = V4L2_FIELD_NONE;
327 pix->pixelformat = layer->fmt->fourcc;
328 pix->colorspace = layer->fmt->colorspace;
329 mxr_mplane_fill(pix->plane_fmt, layer->fmt, pix->width, pix->height);
330
331 return 0;
332}
333
334static inline struct mxr_crop *choose_crop_by_type(struct mxr_geometry *geo,
335 enum v4l2_buf_type type)
336{
337 switch (type) {
338 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
339 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
340 return &geo->dst;
341 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
342 return &geo->src;
343 default:
344 return NULL;
345 }
346}
347
348static int mxr_g_crop(struct file *file, void *fh, struct v4l2_crop *a)
349{
350 struct mxr_layer *layer = video_drvdata(file);
351 struct mxr_crop *crop;
352
353 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
354 crop = choose_crop_by_type(&layer->geo, a->type);
355 if (crop == NULL)
356 return -EINVAL;
357 mxr_layer_geo_fix(layer);
358 a->c.left = crop->x_offset;
359 a->c.top = crop->y_offset;
360 a->c.width = crop->width;
361 a->c.height = crop->height;
362 return 0;
363}
364
365static int mxr_s_crop(struct file *file, void *fh, struct v4l2_crop *a)
366{
367 struct mxr_layer *layer = video_drvdata(file);
368 struct mxr_crop *crop;
369
370 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
371 crop = choose_crop_by_type(&layer->geo, a->type);
372 if (crop == NULL)
373 return -EINVAL;
374 crop->x_offset = a->c.left;
375 crop->y_offset = a->c.top;
376 crop->width = a->c.width;
377 crop->height = a->c.height;
378 mxr_layer_geo_fix(layer);
379 return 0;
380}
381
382static int mxr_cropcap(struct file *file, void *fh, struct v4l2_cropcap *a)
383{
384 struct mxr_layer *layer = video_drvdata(file);
385 struct mxr_crop *crop;
386
387 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
388 crop = choose_crop_by_type(&layer->geo, a->type);
389 if (crop == NULL)
390 return -EINVAL;
391 mxr_layer_geo_fix(layer);
392 a->bounds.left = 0;
393 a->bounds.top = 0;
394 a->bounds.width = crop->full_width;
395 a->bounds.top = crop->full_height;
396 a->defrect = a->bounds;
397 /* setting pixel aspect to 1/1 */
398 a->pixelaspect.numerator = 1;
399 a->pixelaspect.denominator = 1;
400 return 0;
401}
402
403static int mxr_enum_dv_presets(struct file *file, void *fh,
404 struct v4l2_dv_enum_preset *preset)
405{
406 struct mxr_layer *layer = video_drvdata(file);
407 struct mxr_device *mdev = layer->mdev;
408 int ret;
409
410 /* lock protects from changing sd_out */
411 mutex_lock(&mdev->mutex);
412 ret = v4l2_subdev_call(to_outsd(mdev), video, enum_dv_presets, preset);
413 mutex_unlock(&mdev->mutex);
414
415 return ret ? -EINVAL : 0;
416}
417
418static int mxr_s_dv_preset(struct file *file, void *fh,
419 struct v4l2_dv_preset *preset)
420{
421 struct mxr_layer *layer = video_drvdata(file);
422 struct mxr_device *mdev = layer->mdev;
423 int ret;
424
425 /* lock protects from changing sd_out */
426 mutex_lock(&mdev->mutex);
427
428 /* preset change cannot be done while there is an entity
429 * dependant on output configuration
430 */
431 if (mdev->n_output > 0) {
432 mutex_unlock(&mdev->mutex);
433 return -EBUSY;
434 }
435
436 ret = v4l2_subdev_call(to_outsd(mdev), video, s_dv_preset, preset);
437
438 mutex_unlock(&mdev->mutex);
439
440 /* any failure should return EINVAL according to V4L2 doc */
441 return ret ? -EINVAL : 0;
442}
443
444static int mxr_g_dv_preset(struct file *file, void *fh,
445 struct v4l2_dv_preset *preset)
446{
447 struct mxr_layer *layer = video_drvdata(file);
448 struct mxr_device *mdev = layer->mdev;
449 int ret;
450
451 /* lock protects from changing sd_out */
452 mutex_lock(&mdev->mutex);
453 ret = v4l2_subdev_call(to_outsd(mdev), video, g_dv_preset, preset);
454 mutex_unlock(&mdev->mutex);
455
456 return ret ? -EINVAL : 0;
457}
458
459static int mxr_s_std(struct file *file, void *fh, v4l2_std_id *norm)
460{
461 struct mxr_layer *layer = video_drvdata(file);
462 struct mxr_device *mdev = layer->mdev;
463 int ret;
464
465 /* lock protects from changing sd_out */
466 mutex_lock(&mdev->mutex);
467
468 /* standard change cannot be done while there is an entity
469 * dependant on output configuration
470 */
471 if (mdev->n_output > 0) {
472 mutex_unlock(&mdev->mutex);
473 return -EBUSY;
474 }
475
476 ret = v4l2_subdev_call(to_outsd(mdev), video, s_std_output, *norm);
477
478 mutex_unlock(&mdev->mutex);
479
480 return ret ? -EINVAL : 0;
481}
482
483static int mxr_g_std(struct file *file, void *fh, v4l2_std_id *norm)
484{
485 struct mxr_layer *layer = video_drvdata(file);
486 struct mxr_device *mdev = layer->mdev;
487 int ret;
488
489 /* lock protects from changing sd_out */
490 mutex_lock(&mdev->mutex);
491 ret = v4l2_subdev_call(to_outsd(mdev), video, g_std_output, norm);
492 mutex_unlock(&mdev->mutex);
493
494 return ret ? -EINVAL : 0;
495}
496
497static int mxr_enum_output(struct file *file, void *fh, struct v4l2_output *a)
498{
499 struct mxr_layer *layer = video_drvdata(file);
500 struct mxr_device *mdev = layer->mdev;
501 struct mxr_output *out;
502 struct v4l2_subdev *sd;
503
504 if (a->index >= mdev->output_cnt)
505 return -EINVAL;
506 out = mdev->output[a->index];
507 BUG_ON(out == NULL);
508 sd = out->sd;
509 strlcpy(a->name, out->name, sizeof(a->name));
510
511 /* try to obtain supported tv norms */
512 v4l2_subdev_call(sd, video, g_tvnorms_output, &a->std);
513 a->capabilities = 0;
514 if (sd->ops->video && sd->ops->video->s_dv_preset)
515 a->capabilities |= V4L2_OUT_CAP_PRESETS;
516 if (sd->ops->video && sd->ops->video->s_std_output)
517 a->capabilities |= V4L2_OUT_CAP_STD;
518 a->type = V4L2_OUTPUT_TYPE_ANALOG;
519
520 return 0;
521}
522
523static int mxr_s_output(struct file *file, void *fh, unsigned int i)
524{
525 struct video_device *vfd = video_devdata(file);
526 struct mxr_layer *layer = video_drvdata(file);
527 struct mxr_device *mdev = layer->mdev;
528 int ret = 0;
529
530 if (i >= mdev->output_cnt || mdev->output[i] == NULL)
531 return -EINVAL;
532
533 mutex_lock(&mdev->mutex);
534 if (mdev->n_output > 0) {
535 ret = -EBUSY;
536 goto done;
537 }
538 mdev->current_output = i;
539 vfd->tvnorms = 0;
540 v4l2_subdev_call(to_outsd(mdev), video, g_tvnorms_output,
541 &vfd->tvnorms);
542 mxr_dbg(mdev, "tvnorms = %08llx\n", vfd->tvnorms);
543
544done:
545 mutex_unlock(&mdev->mutex);
546 return ret;
547}
548
549static int mxr_g_output(struct file *file, void *fh, unsigned int *p)
550{
551 struct mxr_layer *layer = video_drvdata(file);
552 struct mxr_device *mdev = layer->mdev;
553
554 mutex_lock(&mdev->mutex);
555 *p = mdev->current_output;
556 mutex_unlock(&mdev->mutex);
557
558 return 0;
559}
560
561static int mxr_reqbufs(struct file *file, void *priv,
562 struct v4l2_requestbuffers *p)
563{
564 struct mxr_layer *layer = video_drvdata(file);
565
566 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
567 return vb2_reqbufs(&layer->vb_queue, p);
568}
569
570static int mxr_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
571{
572 struct mxr_layer *layer = video_drvdata(file);
573
574 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
575 return vb2_querybuf(&layer->vb_queue, p);
576}
577
578static int mxr_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
579{
580 struct mxr_layer *layer = video_drvdata(file);
581
582 mxr_dbg(layer->mdev, "%s:%d(%d)\n", __func__, __LINE__, p->index);
583 return vb2_qbuf(&layer->vb_queue, p);
584}
585
586static int mxr_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
587{
588 struct mxr_layer *layer = video_drvdata(file);
589
590 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
591 return vb2_dqbuf(&layer->vb_queue, p, file->f_flags & O_NONBLOCK);
592}
593
594static int mxr_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
595{
596 struct mxr_layer *layer = video_drvdata(file);
597
598 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
599 return vb2_streamon(&layer->vb_queue, i);
600}
601
602static int mxr_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
603{
604 struct mxr_layer *layer = video_drvdata(file);
605
606 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
607 return vb2_streamoff(&layer->vb_queue, i);
608}
609
610static const struct v4l2_ioctl_ops mxr_ioctl_ops = {
611 .vidioc_querycap = mxr_querycap,
612 /* format handling */
613 .vidioc_enum_fmt_vid_out = mxr_enum_fmt,
614 .vidioc_s_fmt_vid_out_mplane = mxr_s_fmt,
615 .vidioc_g_fmt_vid_out_mplane = mxr_g_fmt,
616 /* buffer control */
617 .vidioc_reqbufs = mxr_reqbufs,
618 .vidioc_querybuf = mxr_querybuf,
619 .vidioc_qbuf = mxr_qbuf,
620 .vidioc_dqbuf = mxr_dqbuf,
621 /* Streaming control */
622 .vidioc_streamon = mxr_streamon,
623 .vidioc_streamoff = mxr_streamoff,
624 /* Preset functions */
625 .vidioc_enum_dv_presets = mxr_enum_dv_presets,
626 .vidioc_s_dv_preset = mxr_s_dv_preset,
627 .vidioc_g_dv_preset = mxr_g_dv_preset,
628 /* analog TV standard functions */
629 .vidioc_s_std = mxr_s_std,
630 .vidioc_g_std = mxr_g_std,
631 /* Output handling */
632 .vidioc_enum_output = mxr_enum_output,
633 .vidioc_s_output = mxr_s_output,
634 .vidioc_g_output = mxr_g_output,
635 /* Crop ioctls */
636 .vidioc_g_crop = mxr_g_crop,
637 .vidioc_s_crop = mxr_s_crop,
638 .vidioc_cropcap = mxr_cropcap,
639};
640
641static int mxr_video_open(struct file *file)
642{
643 struct mxr_layer *layer = video_drvdata(file);
644 struct mxr_device *mdev = layer->mdev;
645 int ret = 0;
646
647 mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
648 /* assure device probe is finished */
649 wait_for_device_probe();
650 /* creating context for file descriptor */
651 ret = v4l2_fh_open(file);
652 if (ret) {
653 mxr_err(mdev, "v4l2_fh_open failed\n");
654 return ret;
655 }
656
657 /* leaving if layer is already initialized */
658 if (!v4l2_fh_is_singular_file(file))
659 return 0;
660
661 /* FIXME: should power be enabled on open? */
662 ret = mxr_power_get(mdev);
663 if (ret) {
664 mxr_err(mdev, "power on failed\n");
665 goto fail_fh_open;
666 }
667
668 ret = vb2_queue_init(&layer->vb_queue);
669 if (ret != 0) {
670 mxr_err(mdev, "failed to initialize vb2 queue\n");
671 goto fail_power;
672 }
673 /* set default format, first on the list */
674 layer->fmt = layer->fmt_array[0];
675 /* setup default geometry */
676 mxr_layer_default_geo(layer);
677
678 return 0;
679
680fail_power:
681 mxr_power_put(mdev);
682
683fail_fh_open:
684 v4l2_fh_release(file);
685
686 return ret;
687}
688
689static unsigned int
690mxr_video_poll(struct file *file, struct poll_table_struct *wait)
691{
692 struct mxr_layer *layer = video_drvdata(file);
693
694 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
695
696 return vb2_poll(&layer->vb_queue, file, wait);
697}
698
699static int mxr_video_mmap(struct file *file, struct vm_area_struct *vma)
700{
701 struct mxr_layer *layer = video_drvdata(file);
702
703 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
704
705 return vb2_mmap(&layer->vb_queue, vma);
706}
707
708static int mxr_video_release(struct file *file)
709{
710 struct mxr_layer *layer = video_drvdata(file);
711
712 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
713 if (v4l2_fh_is_singular_file(file)) {
714 vb2_queue_release(&layer->vb_queue);
715 mxr_power_put(layer->mdev);
716 }
717 v4l2_fh_release(file);
718 return 0;
719}
720
721static const struct v4l2_file_operations mxr_fops = {
722 .owner = THIS_MODULE,
723 .open = mxr_video_open,
724 .poll = mxr_video_poll,
725 .mmap = mxr_video_mmap,
726 .release = mxr_video_release,
727 .unlocked_ioctl = video_ioctl2,
728};
729
730static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
731 unsigned int *nplanes, unsigned long sizes[],
732 void *alloc_ctxs[])
733{
734 struct mxr_layer *layer = vb2_get_drv_priv(vq);
735 const struct mxr_format *fmt = layer->fmt;
736 int i;
737 struct mxr_device *mdev = layer->mdev;
738 struct v4l2_plane_pix_format planes[3];
739
740 mxr_dbg(mdev, "%s\n", __func__);
741 /* checking if format was configured */
742 if (fmt == NULL)
743 return -EINVAL;
744 mxr_dbg(mdev, "fmt = %s\n", fmt->name);
745 mxr_mplane_fill(planes, fmt, layer->geo.src.full_width,
746 layer->geo.src.full_height);
747
748 *nplanes = fmt->num_subframes;
749 for (i = 0; i < fmt->num_subframes; ++i) {
750 alloc_ctxs[i] = layer->mdev->alloc_ctx;
751 sizes[i] = PAGE_ALIGN(planes[i].sizeimage);
752 mxr_dbg(mdev, "size[%d] = %08lx\n", i, sizes[i]);
753 }
754
755 if (*nbuffers == 0)
756 *nbuffers = 1;
757
758 return 0;
759}
760
761static void buf_queue(struct vb2_buffer *vb)
762{
763 struct mxr_buffer *buffer = container_of(vb, struct mxr_buffer, vb);
764 struct mxr_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
765 struct mxr_device *mdev = layer->mdev;
766 unsigned long flags;
767 int must_start = 0;
768
769 spin_lock_irqsave(&layer->enq_slock, flags);
770 if (layer->state == MXR_LAYER_STREAMING_START) {
771 layer->state = MXR_LAYER_STREAMING;
772 must_start = 1;
773 }
774 list_add_tail(&buffer->list, &layer->enq_list);
775 spin_unlock_irqrestore(&layer->enq_slock, flags);
776 if (must_start) {
777 layer->ops.stream_set(layer, MXR_ENABLE);
778 mxr_streamer_get(mdev);
779 }
780
781 mxr_dbg(mdev, "queuing buffer\n");
782}
783
784static void wait_lock(struct vb2_queue *vq)
785{
786 struct mxr_layer *layer = vb2_get_drv_priv(vq);
787
788 mxr_dbg(layer->mdev, "%s\n", __func__);
789 mutex_lock(&layer->mutex);
790}
791
792static void wait_unlock(struct vb2_queue *vq)
793{
794 struct mxr_layer *layer = vb2_get_drv_priv(vq);
795
796 mxr_dbg(layer->mdev, "%s\n", __func__);
797 mutex_unlock(&layer->mutex);
798}
799
800static int start_streaming(struct vb2_queue *vq)
801{
802 struct mxr_layer *layer = vb2_get_drv_priv(vq);
803 struct mxr_device *mdev = layer->mdev;
804 unsigned long flags;
805
806 mxr_dbg(mdev, "%s\n", __func__);
807 /* block any changes in output configuration */
808 mxr_output_get(mdev);
809
810 /* update layers geometry */
811 mxr_layer_geo_fix(layer);
812 mxr_geometry_dump(mdev, &layer->geo);
813
814 layer->ops.format_set(layer);
815 /* enabling layer in hardware */
816 spin_lock_irqsave(&layer->enq_slock, flags);
817 layer->state = MXR_LAYER_STREAMING_START;
818 spin_unlock_irqrestore(&layer->enq_slock, flags);
819
820 return 0;
821}
822
823static void mxr_watchdog(unsigned long arg)
824{
825 struct mxr_layer *layer = (struct mxr_layer *) arg;
826 struct mxr_device *mdev = layer->mdev;
827 unsigned long flags;
828
829 mxr_err(mdev, "watchdog fired for layer %s\n", layer->vfd.name);
830
831 spin_lock_irqsave(&layer->enq_slock, flags);
832
833 if (layer->update_buf == layer->shadow_buf)
834 layer->update_buf = NULL;
835 if (layer->update_buf) {
836 vb2_buffer_done(&layer->update_buf->vb, VB2_BUF_STATE_ERROR);
837 layer->update_buf = NULL;
838 }
839 if (layer->shadow_buf) {
840 vb2_buffer_done(&layer->shadow_buf->vb, VB2_BUF_STATE_ERROR);
841 layer->shadow_buf = NULL;
842 }
843 spin_unlock_irqrestore(&layer->enq_slock, flags);
844}
845
846static int stop_streaming(struct vb2_queue *vq)
847{
848 struct mxr_layer *layer = vb2_get_drv_priv(vq);
849 struct mxr_device *mdev = layer->mdev;
850 unsigned long flags;
851 struct timer_list watchdog;
852 struct mxr_buffer *buf, *buf_tmp;
853
854 mxr_dbg(mdev, "%s\n", __func__);
855
856 spin_lock_irqsave(&layer->enq_slock, flags);
857
858 /* reset list */
859 layer->state = MXR_LAYER_STREAMING_FINISH;
860
861 /* set all buffer to be done */
862 list_for_each_entry_safe(buf, buf_tmp, &layer->enq_list, list) {
863 list_del(&buf->list);
864 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
865 }
866
867 spin_unlock_irqrestore(&layer->enq_slock, flags);
868
869 /* give 1 seconds to complete to complete last buffers */
870 setup_timer_on_stack(&watchdog, mxr_watchdog,
871 (unsigned long)layer);
872 mod_timer(&watchdog, jiffies + msecs_to_jiffies(1000));
873
874 /* wait until all buffers are goes to done state */
875 vb2_wait_for_all_buffers(vq);
876
877 /* stop timer if all synchronization is done */
878 del_timer_sync(&watchdog);
879 destroy_timer_on_stack(&watchdog);
880
881 /* stopping hardware */
882 spin_lock_irqsave(&layer->enq_slock, flags);
883 layer->state = MXR_LAYER_IDLE;
884 spin_unlock_irqrestore(&layer->enq_slock, flags);
885
886 /* disabling layer in hardware */
887 layer->ops.stream_set(layer, MXR_DISABLE);
888 /* remove one streamer */
889 mxr_streamer_put(mdev);
890 /* allow changes in output configuration */
891 mxr_output_put(mdev);
892 return 0;
893}
894
895static struct vb2_ops mxr_video_qops = {
896 .queue_setup = queue_setup,
897 .buf_queue = buf_queue,
898 .wait_prepare = wait_unlock,
899 .wait_finish = wait_lock,
900 .start_streaming = start_streaming,
901 .stop_streaming = stop_streaming,
902};
903
904/* FIXME: try to put this functions to mxr_base_layer_create */
905int mxr_base_layer_register(struct mxr_layer *layer)
906{
907 struct mxr_device *mdev = layer->mdev;
908 int ret;
909
910 ret = video_register_device(&layer->vfd, VFL_TYPE_GRABBER, -1);
911 if (ret)
912 mxr_err(mdev, "failed to register video device\n");
913 else
914 mxr_info(mdev, "registered layer %s as /dev/video%d\n",
915 layer->vfd.name, layer->vfd.num);
916 return ret;
917}
918
919void mxr_base_layer_unregister(struct mxr_layer *layer)
920{
921 video_unregister_device(&layer->vfd);
922}
923
924void mxr_layer_release(struct mxr_layer *layer)
925{
926 if (layer->ops.release)
927 layer->ops.release(layer);
928}
929
930void mxr_base_layer_release(struct mxr_layer *layer)
931{
932 kfree(layer);
933}
934
935static void mxr_vfd_release(struct video_device *vdev)
936{
937 printk(KERN_INFO "video device release\n");
938}
939
940struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
941 int idx, char *name, struct mxr_layer_ops *ops)
942{
943 struct mxr_layer *layer;
944
945 layer = kzalloc(sizeof *layer, GFP_KERNEL);
946 if (layer == NULL) {
947 mxr_err(mdev, "not enough memory for layer.\n");
948 goto fail;
949 }
950
951 layer->mdev = mdev;
952 layer->idx = idx;
953 layer->ops = *ops;
954
955 spin_lock_init(&layer->enq_slock);
956 INIT_LIST_HEAD(&layer->enq_list);
957 mutex_init(&layer->mutex);
958
959 layer->vfd = (struct video_device) {
960 .minor = -1,
961 .release = mxr_vfd_release,
962 .fops = &mxr_fops,
963 .ioctl_ops = &mxr_ioctl_ops,
964 };
965 strlcpy(layer->vfd.name, name, sizeof(layer->vfd.name));
966 /* let framework control PRIORITY */
967 set_bit(V4L2_FL_USE_FH_PRIO, &layer->vfd.flags);
968
969 video_set_drvdata(&layer->vfd, layer);
970 layer->vfd.lock = &layer->mutex;
971 layer->vfd.v4l2_dev = &mdev->v4l2_dev;
972
973 layer->vb_queue = (struct vb2_queue) {
974 .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
975 .io_modes = VB2_MMAP | VB2_USERPTR,
976 .drv_priv = layer,
977 .buf_struct_size = sizeof(struct mxr_buffer),
978 .ops = &mxr_video_qops,
979 .mem_ops = &vb2_dma_contig_memops,
980 };
981
982 return layer;
983
984fail:
985 return NULL;
986}
987
988static const struct mxr_format *find_format_by_fourcc(
989 struct mxr_layer *layer, unsigned long fourcc)
990{
991 int i;
992
993 for (i = 0; i < layer->fmt_array_size; ++i)
994 if (layer->fmt_array[i]->fourcc == fourcc)
995 return layer->fmt_array[i];
996 return NULL;
997}
998
999static const struct mxr_format *find_format_by_index(
1000 struct mxr_layer *layer, unsigned long index)
1001{
1002 if (index >= layer->fmt_array_size)
1003 return NULL;
1004 return layer->fmt_array[index];
1005}
1006
diff --git a/drivers/media/video/s5p-tv/mixer_vp_layer.c b/drivers/media/video/s5p-tv/mixer_vp_layer.c
new file mode 100644
index 000000000000..6950ed8ac1a0
--- /dev/null
+++ b/drivers/media/video/s5p-tv/mixer_vp_layer.c
@@ -0,0 +1,211 @@
1/*
2 * Samsung TV Mixer driver
3 *
4 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
5 *
6 * Tomasz Stanislawski, <t.stanislaws@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published
10 * by the Free Software Foundiation. either version 2 of the License,
11 * or (at your option) any later version
12 */
13
14#include "mixer.h"
15
16#include "regs-vp.h"
17
18#include <media/videobuf2-dma-contig.h>
19
20/* FORMAT DEFINITIONS */
21static const struct mxr_format mxr_fmt_nv12 = {
22 .name = "NV12",
23 .fourcc = V4L2_PIX_FMT_NV12,
24 .colorspace = V4L2_COLORSPACE_JPEG,
25 .num_planes = 2,
26 .plane = {
27 { .width = 1, .height = 1, .size = 1 },
28 { .width = 2, .height = 2, .size = 2 },
29 },
30 .num_subframes = 1,
31 .cookie = VP_MODE_NV12 | VP_MODE_MEM_LINEAR,
32};
33
34static const struct mxr_format mxr_fmt_nv21 = {
35 .name = "NV21",
36 .fourcc = V4L2_PIX_FMT_NV21,
37 .colorspace = V4L2_COLORSPACE_JPEG,
38 .num_planes = 2,
39 .plane = {
40 { .width = 1, .height = 1, .size = 1 },
41 { .width = 2, .height = 2, .size = 2 },
42 },
43 .num_subframes = 1,
44 .cookie = VP_MODE_NV21 | VP_MODE_MEM_LINEAR,
45};
46
47static const struct mxr_format mxr_fmt_nv12m = {
48 .name = "NV12 (mplane)",
49 .fourcc = V4L2_PIX_FMT_NV12M,
50 .colorspace = V4L2_COLORSPACE_JPEG,
51 .num_planes = 2,
52 .plane = {
53 { .width = 1, .height = 1, .size = 1 },
54 { .width = 2, .height = 2, .size = 2 },
55 },
56 .num_subframes = 2,
57 .plane2subframe = {0, 1},
58 .cookie = VP_MODE_NV12 | VP_MODE_MEM_LINEAR,
59};
60
61static const struct mxr_format mxr_fmt_nv12mt = {
62 .name = "NV12 tiled (mplane)",
63 .fourcc = V4L2_PIX_FMT_NV12MT,
64 .colorspace = V4L2_COLORSPACE_JPEG,
65 .num_planes = 2,
66 .plane = {
67 { .width = 128, .height = 32, .size = 4096 },
68 { .width = 128, .height = 32, .size = 2048 },
69 },
70 .num_subframes = 2,
71 .plane2subframe = {0, 1},
72 .cookie = VP_MODE_NV12 | VP_MODE_MEM_TILED,
73};
74
75static const struct mxr_format *mxr_video_format[] = {
76 &mxr_fmt_nv12,
77 &mxr_fmt_nv21,
78 &mxr_fmt_nv12m,
79 &mxr_fmt_nv12mt,
80};
81
82/* AUXILIARY CALLBACKS */
83
84static void mxr_vp_layer_release(struct mxr_layer *layer)
85{
86 mxr_base_layer_unregister(layer);
87 mxr_base_layer_release(layer);
88}
89
90static void mxr_vp_buffer_set(struct mxr_layer *layer,
91 struct mxr_buffer *buf)
92{
93 dma_addr_t luma_addr[2] = {0, 0};
94 dma_addr_t chroma_addr[2] = {0, 0};
95
96 if (buf == NULL) {
97 mxr_reg_vp_buffer(layer->mdev, luma_addr, chroma_addr);
98 return;
99 }
100 luma_addr[0] = vb2_dma_contig_plane_paddr(&buf->vb, 0);
101 if (layer->fmt->num_subframes == 2) {
102 chroma_addr[0] = vb2_dma_contig_plane_paddr(&buf->vb, 1);
103 } else {
104 /* FIXME: mxr_get_plane_size compute integer division,
105 * which is slow and should not be performed in interrupt */
106 chroma_addr[0] = luma_addr[0] + mxr_get_plane_size(
107 &layer->fmt->plane[0], layer->geo.src.full_width,
108 layer->geo.src.full_height);
109 }
110 if (layer->fmt->cookie & VP_MODE_MEM_TILED) {
111 luma_addr[1] = luma_addr[0] + 0x40;
112 chroma_addr[1] = chroma_addr[0] + 0x40;
113 } else {
114 luma_addr[1] = luma_addr[0] + layer->geo.src.full_width;
115 chroma_addr[1] = chroma_addr[0];
116 }
117 mxr_reg_vp_buffer(layer->mdev, luma_addr, chroma_addr);
118}
119
120static void mxr_vp_stream_set(struct mxr_layer *layer, int en)
121{
122 mxr_reg_vp_layer_stream(layer->mdev, en);
123}
124
125static void mxr_vp_format_set(struct mxr_layer *layer)
126{
127 mxr_reg_vp_format(layer->mdev, layer->fmt, &layer->geo);
128}
129
130static void mxr_vp_fix_geometry(struct mxr_layer *layer)
131{
132 struct mxr_geometry *geo = &layer->geo;
133
134 /* align horizontal size to 8 pixels */
135 geo->src.full_width = ALIGN(geo->src.full_width, 8);
136 /* limit to boundary size */
137 geo->src.full_width = clamp_val(geo->src.full_width, 8, 8192);
138 geo->src.full_height = clamp_val(geo->src.full_height, 1, 8192);
139 geo->src.width = clamp_val(geo->src.width, 32, geo->src.full_width);
140 geo->src.width = min(geo->src.width, 2047U);
141 geo->src.height = clamp_val(geo->src.height, 4, geo->src.full_height);
142 geo->src.height = min(geo->src.height, 2047U);
143
144 /* setting size of output window */
145 geo->dst.width = clamp_val(geo->dst.width, 8, geo->dst.full_width);
146 geo->dst.height = clamp_val(geo->dst.height, 1, geo->dst.full_height);
147
148 /* ensure that scaling is in range 1/4x to 16x */
149 if (geo->src.width >= 4 * geo->dst.width)
150 geo->src.width = 4 * geo->dst.width;
151 if (geo->dst.width >= 16 * geo->src.width)
152 geo->dst.width = 16 * geo->src.width;
153 if (geo->src.height >= 4 * geo->dst.height)
154 geo->src.height = 4 * geo->dst.height;
155 if (geo->dst.height >= 16 * geo->src.height)
156 geo->dst.height = 16 * geo->src.height;
157
158 /* setting scaling ratio */
159 geo->x_ratio = (geo->src.width << 16) / geo->dst.width;
160 geo->y_ratio = (geo->src.height << 16) / geo->dst.height;
161
162 /* adjust offsets */
163 geo->src.x_offset = min(geo->src.x_offset,
164 geo->src.full_width - geo->src.width);
165 geo->src.y_offset = min(geo->src.y_offset,
166 geo->src.full_height - geo->src.height);
167 geo->dst.x_offset = min(geo->dst.x_offset,
168 geo->dst.full_width - geo->dst.width);
169 geo->dst.y_offset = min(geo->dst.y_offset,
170 geo->dst.full_height - geo->dst.height);
171}
172
173/* PUBLIC API */
174
175struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
176{
177 struct mxr_layer *layer;
178 int ret;
179 struct mxr_layer_ops ops = {
180 .release = mxr_vp_layer_release,
181 .buffer_set = mxr_vp_buffer_set,
182 .stream_set = mxr_vp_stream_set,
183 .format_set = mxr_vp_format_set,
184 .fix_geometry = mxr_vp_fix_geometry,
185 };
186 char name[32];
187
188 sprintf(name, "video%d", idx);
189
190 layer = mxr_base_layer_create(mdev, idx, name, &ops);
191 if (layer == NULL) {
192 mxr_err(mdev, "failed to initialize layer(%d) base\n", idx);
193 goto fail;
194 }
195
196 layer->fmt_array = mxr_video_format;
197 layer->fmt_array_size = ARRAY_SIZE(mxr_video_format);
198
199 ret = mxr_base_layer_register(layer);
200 if (ret)
201 goto fail_layer;
202
203 return layer;
204
205fail_layer:
206 mxr_base_layer_release(layer);
207
208fail:
209 return NULL;
210}
211
diff --git a/drivers/media/video/s5p-tv/regs-hdmi.h b/drivers/media/video/s5p-tv/regs-hdmi.h
new file mode 100644
index 000000000000..ac93ad6f2bc3
--- /dev/null
+++ b/drivers/media/video/s5p-tv/regs-hdmi.h
@@ -0,0 +1,141 @@
1/* linux/arch/arm/mach-exynos4/include/mach/regs-hdmi.h
2 *
3 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * HDMI register header file for Samsung TVOUT driver
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#ifndef SAMSUNG_REGS_HDMI_H
14#define SAMSUNG_REGS_HDMI_H
15
16/*
17 * Register part
18*/
19
20#define HDMI_CTRL_BASE(x) ((x) + 0x00000000)
21#define HDMI_CORE_BASE(x) ((x) + 0x00010000)
22#define HDMI_TG_BASE(x) ((x) + 0x00050000)
23
24/* Control registers */
25#define HDMI_INTC_CON HDMI_CTRL_BASE(0x0000)
26#define HDMI_INTC_FLAG HDMI_CTRL_BASE(0x0004)
27#define HDMI_HPD_STATUS HDMI_CTRL_BASE(0x000C)
28#define HDMI_PHY_RSTOUT HDMI_CTRL_BASE(0x0014)
29#define HDMI_PHY_VPLL HDMI_CTRL_BASE(0x0018)
30#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x001C)
31#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0020)
32
33/* Core registers */
34#define HDMI_CON_0 HDMI_CORE_BASE(0x0000)
35#define HDMI_CON_1 HDMI_CORE_BASE(0x0004)
36#define HDMI_CON_2 HDMI_CORE_BASE(0x0008)
37#define HDMI_SYS_STATUS HDMI_CORE_BASE(0x0010)
38#define HDMI_PHY_STATUS HDMI_CORE_BASE(0x0014)
39#define HDMI_STATUS_EN HDMI_CORE_BASE(0x0020)
40#define HDMI_HPD HDMI_CORE_BASE(0x0030)
41#define HDMI_MODE_SEL HDMI_CORE_BASE(0x0040)
42#define HDMI_BLUE_SCREEN_0 HDMI_CORE_BASE(0x0050)
43#define HDMI_BLUE_SCREEN_1 HDMI_CORE_BASE(0x0054)
44#define HDMI_BLUE_SCREEN_2 HDMI_CORE_BASE(0x0058)
45#define HDMI_H_BLANK_0 HDMI_CORE_BASE(0x00A0)
46#define HDMI_H_BLANK_1 HDMI_CORE_BASE(0x00A4)
47#define HDMI_V_BLANK_0 HDMI_CORE_BASE(0x00B0)
48#define HDMI_V_BLANK_1 HDMI_CORE_BASE(0x00B4)
49#define HDMI_V_BLANK_2 HDMI_CORE_BASE(0x00B8)
50#define HDMI_H_V_LINE_0 HDMI_CORE_BASE(0x00C0)
51#define HDMI_H_V_LINE_1 HDMI_CORE_BASE(0x00C4)
52#define HDMI_H_V_LINE_2 HDMI_CORE_BASE(0x00C8)
53#define HDMI_VSYNC_POL HDMI_CORE_BASE(0x00E4)
54#define HDMI_INT_PRO_MODE HDMI_CORE_BASE(0x00E8)
55#define HDMI_V_BLANK_F_0 HDMI_CORE_BASE(0x0110)
56#define HDMI_V_BLANK_F_1 HDMI_CORE_BASE(0x0114)
57#define HDMI_V_BLANK_F_2 HDMI_CORE_BASE(0x0118)
58#define HDMI_H_SYNC_GEN_0 HDMI_CORE_BASE(0x0120)
59#define HDMI_H_SYNC_GEN_1 HDMI_CORE_BASE(0x0124)
60#define HDMI_H_SYNC_GEN_2 HDMI_CORE_BASE(0x0128)
61#define HDMI_V_SYNC_GEN_1_0 HDMI_CORE_BASE(0x0130)
62#define HDMI_V_SYNC_GEN_1_1 HDMI_CORE_BASE(0x0134)
63#define HDMI_V_SYNC_GEN_1_2 HDMI_CORE_BASE(0x0138)
64#define HDMI_V_SYNC_GEN_2_0 HDMI_CORE_BASE(0x0140)
65#define HDMI_V_SYNC_GEN_2_1 HDMI_CORE_BASE(0x0144)
66#define HDMI_V_SYNC_GEN_2_2 HDMI_CORE_BASE(0x0148)
67#define HDMI_V_SYNC_GEN_3_0 HDMI_CORE_BASE(0x0150)
68#define HDMI_V_SYNC_GEN_3_1 HDMI_CORE_BASE(0x0154)
69#define HDMI_V_SYNC_GEN_3_2 HDMI_CORE_BASE(0x0158)
70#define HDMI_AVI_CON HDMI_CORE_BASE(0x0300)
71#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0320 + 4 * (n))
72#define HDMI_DC_CONTROL HDMI_CORE_BASE(0x05C0)
73#define HDMI_VIDEO_PATTERN_GEN HDMI_CORE_BASE(0x05C4)
74#define HDMI_HPD_GEN HDMI_CORE_BASE(0x05C8)
75
76/* Timing generator registers */
77#define HDMI_TG_CMD HDMI_TG_BASE(0x0000)
78#define HDMI_TG_H_FSZ_L HDMI_TG_BASE(0x0018)
79#define HDMI_TG_H_FSZ_H HDMI_TG_BASE(0x001C)
80#define HDMI_TG_HACT_ST_L HDMI_TG_BASE(0x0020)
81#define HDMI_TG_HACT_ST_H HDMI_TG_BASE(0x0024)
82#define HDMI_TG_HACT_SZ_L HDMI_TG_BASE(0x0028)
83#define HDMI_TG_HACT_SZ_H HDMI_TG_BASE(0x002C)
84#define HDMI_TG_V_FSZ_L HDMI_TG_BASE(0x0030)
85#define HDMI_TG_V_FSZ_H HDMI_TG_BASE(0x0034)
86#define HDMI_TG_VSYNC_L HDMI_TG_BASE(0x0038)
87#define HDMI_TG_VSYNC_H HDMI_TG_BASE(0x003C)
88#define HDMI_TG_VSYNC2_L HDMI_TG_BASE(0x0040)
89#define HDMI_TG_VSYNC2_H HDMI_TG_BASE(0x0044)
90#define HDMI_TG_VACT_ST_L HDMI_TG_BASE(0x0048)
91#define HDMI_TG_VACT_ST_H HDMI_TG_BASE(0x004C)
92#define HDMI_TG_VACT_SZ_L HDMI_TG_BASE(0x0050)
93#define HDMI_TG_VACT_SZ_H HDMI_TG_BASE(0x0054)
94#define HDMI_TG_FIELD_CHG_L HDMI_TG_BASE(0x0058)
95#define HDMI_TG_FIELD_CHG_H HDMI_TG_BASE(0x005C)
96#define HDMI_TG_VACT_ST2_L HDMI_TG_BASE(0x0060)
97#define HDMI_TG_VACT_ST2_H HDMI_TG_BASE(0x0064)
98#define HDMI_TG_VSYNC_TOP_HDMI_L HDMI_TG_BASE(0x0078)
99#define HDMI_TG_VSYNC_TOP_HDMI_H HDMI_TG_BASE(0x007C)
100#define HDMI_TG_VSYNC_BOT_HDMI_L HDMI_TG_BASE(0x0080)
101#define HDMI_TG_VSYNC_BOT_HDMI_H HDMI_TG_BASE(0x0084)
102#define HDMI_TG_FIELD_TOP_HDMI_L HDMI_TG_BASE(0x0088)
103#define HDMI_TG_FIELD_TOP_HDMI_H HDMI_TG_BASE(0x008C)
104#define HDMI_TG_FIELD_BOT_HDMI_L HDMI_TG_BASE(0x0090)
105#define HDMI_TG_FIELD_BOT_HDMI_H HDMI_TG_BASE(0x0094)
106
107/*
108 * Bit definition part
109 */
110
111/* HDMI_INTC_CON */
112#define HDMI_INTC_EN_GLOBAL (1 << 6)
113#define HDMI_INTC_EN_HPD_PLUG (1 << 3)
114#define HDMI_INTC_EN_HPD_UNPLUG (1 << 2)
115
116/* HDMI_INTC_FLAG */
117#define HDMI_INTC_FLAG_HPD_PLUG (1 << 3)
118#define HDMI_INTC_FLAG_HPD_UNPLUG (1 << 2)
119
120/* HDMI_PHY_RSTOUT */
121#define HDMI_PHY_SW_RSTOUT (1 << 0)
122
123/* HDMI_CORE_RSTOUT */
124#define HDMI_CORE_SW_RSTOUT (1 << 0)
125
126/* HDMI_CON_0 */
127#define HDMI_BLUE_SCR_EN (1 << 5)
128#define HDMI_EN (1 << 0)
129
130/* HDMI_PHY_STATUS */
131#define HDMI_PHY_STATUS_READY (1 << 0)
132
133/* HDMI_MODE_SEL */
134#define HDMI_MODE_HDMI_EN (1 << 1)
135#define HDMI_MODE_DVI_EN (1 << 0)
136#define HDMI_MODE_MASK (3 << 0)
137
138/* HDMI_TG_CMD */
139#define HDMI_TG_EN (1 << 0)
140
141#endif /* SAMSUNG_REGS_HDMI_H */
diff --git a/drivers/media/video/s5p-tv/regs-mixer.h b/drivers/media/video/s5p-tv/regs-mixer.h
new file mode 100644
index 000000000000..3c8442609c1a
--- /dev/null
+++ b/drivers/media/video/s5p-tv/regs-mixer.h
@@ -0,0 +1,121 @@
1/*
2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com/
4 *
5 * Mixer register header file for Samsung Mixer driver
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10*/
11#ifndef SAMSUNG_REGS_MIXER_H
12#define SAMSUNG_REGS_MIXER_H
13
14/*
15 * Register part
16 */
17#define MXR_STATUS 0x0000
18#define MXR_CFG 0x0004
19#define MXR_INT_EN 0x0008
20#define MXR_INT_STATUS 0x000C
21#define MXR_LAYER_CFG 0x0010
22#define MXR_VIDEO_CFG 0x0014
23#define MXR_GRAPHIC0_CFG 0x0020
24#define MXR_GRAPHIC0_BASE 0x0024
25#define MXR_GRAPHIC0_SPAN 0x0028
26#define MXR_GRAPHIC0_SXY 0x002C
27#define MXR_GRAPHIC0_WH 0x0030
28#define MXR_GRAPHIC0_DXY 0x0034
29#define MXR_GRAPHIC0_BLANK 0x0038
30#define MXR_GRAPHIC1_CFG 0x0040
31#define MXR_GRAPHIC1_BASE 0x0044
32#define MXR_GRAPHIC1_SPAN 0x0048
33#define MXR_GRAPHIC1_SXY 0x004C
34#define MXR_GRAPHIC1_WH 0x0050
35#define MXR_GRAPHIC1_DXY 0x0054
36#define MXR_GRAPHIC1_BLANK 0x0058
37#define MXR_BG_CFG 0x0060
38#define MXR_BG_COLOR0 0x0064
39#define MXR_BG_COLOR1 0x0068
40#define MXR_BG_COLOR2 0x006C
41
42/* for parametrized access to layer registers */
43#define MXR_GRAPHIC_CFG(i) (0x0020 + (i) * 0x20)
44#define MXR_GRAPHIC_BASE(i) (0x0024 + (i) * 0x20)
45#define MXR_GRAPHIC_SPAN(i) (0x0028 + (i) * 0x20)
46#define MXR_GRAPHIC_SXY(i) (0x002C + (i) * 0x20)
47#define MXR_GRAPHIC_WH(i) (0x0030 + (i) * 0x20)
48#define MXR_GRAPHIC_DXY(i) (0x0034 + (i) * 0x20)
49
50/*
51 * Bit definition part
52 */
53
54/* generates mask for range of bits */
55#define MXR_MASK(high_bit, low_bit) \
56 (((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
57
58#define MXR_MASK_VAL(val, high_bit, low_bit) \
59 (((val) << (low_bit)) & MXR_MASK(high_bit, low_bit))
60
61/* bits for MXR_STATUS */
62#define MXR_STATUS_16_BURST (1 << 7)
63#define MXR_STATUS_BURST_MASK (1 << 7)
64#define MXR_STATUS_SYNC_ENABLE (1 << 2)
65#define MXR_STATUS_REG_RUN (1 << 0)
66
67/* bits for MXR_CFG */
68#define MXR_CFG_OUT_YUV444 (0 << 8)
69#define MXR_CFG_OUT_RGB888 (1 << 8)
70#define MXR_CFG_DST_SDO (0 << 7)
71#define MXR_CFG_DST_HDMI (1 << 7)
72#define MXR_CFG_DST_MASK (1 << 7)
73#define MXR_CFG_SCAN_HD_720 (0 << 6)
74#define MXR_CFG_SCAN_HD_1080 (1 << 6)
75#define MXR_CFG_GRP1_ENABLE (1 << 5)
76#define MXR_CFG_GRP0_ENABLE (1 << 4)
77#define MXR_CFG_VP_ENABLE (1 << 3)
78#define MXR_CFG_SCAN_INTERLACE (0 << 2)
79#define MXR_CFG_SCAN_PROGRASSIVE (1 << 2)
80#define MXR_CFG_SCAN_NTSC (0 << 1)
81#define MXR_CFG_SCAN_PAL (1 << 1)
82#define MXR_CFG_SCAN_SD (0 << 0)
83#define MXR_CFG_SCAN_HD (1 << 0)
84#define MXR_CFG_SCAN_MASK 0x47
85
86/* bits for MXR_GRAPHICn_CFG */
87#define MXR_GRP_CFG_COLOR_KEY_DISABLE (1 << 21)
88#define MXR_GRP_CFG_BLEND_PRE_MUL (1 << 20)
89#define MXR_GRP_CFG_FORMAT_VAL(x) MXR_MASK_VAL(x, 11, 8)
90#define MXR_GRP_CFG_FORMAT_MASK MXR_GRP_CFG_FORMAT_VAL(~0)
91#define MXR_GRP_CFG_ALPHA_VAL(x) MXR_MASK_VAL(x, 7, 0)
92
93/* bits for MXR_GRAPHICn_WH */
94#define MXR_GRP_WH_H_SCALE(x) MXR_MASK_VAL(x, 28, 28)
95#define MXR_GRP_WH_V_SCALE(x) MXR_MASK_VAL(x, 12, 12)
96#define MXR_GRP_WH_WIDTH(x) MXR_MASK_VAL(x, 26, 16)
97#define MXR_GRP_WH_HEIGHT(x) MXR_MASK_VAL(x, 10, 0)
98
99/* bits for MXR_GRAPHICn_SXY */
100#define MXR_GRP_SXY_SX(x) MXR_MASK_VAL(x, 26, 16)
101#define MXR_GRP_SXY_SY(x) MXR_MASK_VAL(x, 10, 0)
102
103/* bits for MXR_GRAPHICn_DXY */
104#define MXR_GRP_DXY_DX(x) MXR_MASK_VAL(x, 26, 16)
105#define MXR_GRP_DXY_DY(x) MXR_MASK_VAL(x, 10, 0)
106
107/* bits for MXR_INT_EN */
108#define MXR_INT_EN_VSYNC (1 << 11)
109#define MXR_INT_EN_ALL (0x0f << 8)
110
111/* bit for MXR_INT_STATUS */
112#define MXR_INT_CLEAR_VSYNC (1 << 11)
113#define MXR_INT_STATUS_VSYNC (1 << 0)
114
115/* bit for MXR_LAYER_CFG */
116#define MXR_LAYER_CFG_GRP1_VAL(x) MXR_MASK_VAL(x, 11, 8)
117#define MXR_LAYER_CFG_GRP0_VAL(x) MXR_MASK_VAL(x, 7, 4)
118#define MXR_LAYER_CFG_VP_VAL(x) MXR_MASK_VAL(x, 3, 0)
119
120#endif /* SAMSUNG_REGS_MIXER_H */
121
diff --git a/drivers/media/video/s5p-tv/regs-sdo.h b/drivers/media/video/s5p-tv/regs-sdo.h
new file mode 100644
index 000000000000..7f7c2b8ac140
--- /dev/null
+++ b/drivers/media/video/s5p-tv/regs-sdo.h
@@ -0,0 +1,63 @@
1/* drivers/media/video/s5p-tv/regs-sdo.h
2 *
3 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * SDO register description file
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef SAMSUNG_REGS_SDO_H
14#define SAMSUNG_REGS_SDO_H
15
16/*
17 * Register part
18 */
19
20#define SDO_CLKCON 0x0000
21#define SDO_CONFIG 0x0008
22#define SDO_VBI 0x0014
23#define SDO_DAC 0x003C
24#define SDO_CCCON 0x0180
25#define SDO_IRQ 0x0280
26#define SDO_IRQMASK 0x0284
27#define SDO_VERSION 0x03D8
28
29/*
30 * Bit definition part
31 */
32
33/* SDO Clock Control Register (SDO_CLKCON) */
34#define SDO_TVOUT_SW_RESET (1 << 4)
35#define SDO_TVOUT_CLOCK_READY (1 << 1)
36#define SDO_TVOUT_CLOCK_ON (1 << 0)
37
38/* SDO Video Standard Configuration Register (SDO_CONFIG) */
39#define SDO_PROGRESSIVE (1 << 4)
40#define SDO_NTSC_M 0
41#define SDO_PAL_M 1
42#define SDO_PAL_BGHID 2
43#define SDO_PAL_N 3
44#define SDO_PAL_NC 4
45#define SDO_NTSC_443 8
46#define SDO_PAL_60 9
47#define SDO_STANDARD_MASK 0xf
48
49/* SDO VBI Configuration Register (SDO_VBI) */
50#define SDO_CVBS_WSS_INS (1 << 14)
51#define SDO_CVBS_CLOSED_CAPTION_MASK (3 << 12)
52
53/* SDO DAC Configuration Register (SDO_DAC) */
54#define SDO_POWER_ON_DAC (1 << 0)
55
56/* SDO Color Compensation On/Off Control (SDO_CCCON) */
57#define SDO_COMPENSATION_BHS_ADJ_OFF (1 << 4)
58#define SDO_COMPENSATION_CVBS_COMP_OFF (1 << 0)
59
60/* SDO Interrupt Request Register (SDO_IRQ) */
61#define SDO_VSYNC_IRQ_PEND (1 << 0)
62
63#endif /* SAMSUNG_REGS_SDO_H */
diff --git a/drivers/media/video/s5p-tv/regs-vp.h b/drivers/media/video/s5p-tv/regs-vp.h
new file mode 100644
index 000000000000..6c63984e11e8
--- /dev/null
+++ b/drivers/media/video/s5p-tv/regs-vp.h
@@ -0,0 +1,88 @@
1/*
2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com/
4 *
5 * Video processor register header file for Samsung Mixer driver
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef SAMSUNG_REGS_VP_H
13#define SAMSUNG_REGS_VP_H
14
15/*
16 * Register part
17 */
18
19#define VP_ENABLE 0x0000
20#define VP_SRESET 0x0004
21#define VP_SHADOW_UPDATE 0x0008
22#define VP_FIELD_ID 0x000C
23#define VP_MODE 0x0010
24#define VP_IMG_SIZE_Y 0x0014
25#define VP_IMG_SIZE_C 0x0018
26#define VP_PER_RATE_CTRL 0x001C
27#define VP_TOP_Y_PTR 0x0028
28#define VP_BOT_Y_PTR 0x002C
29#define VP_TOP_C_PTR 0x0030
30#define VP_BOT_C_PTR 0x0034
31#define VP_ENDIAN_MODE 0x03CC
32#define VP_SRC_H_POSITION 0x0044
33#define VP_SRC_V_POSITION 0x0048
34#define VP_SRC_WIDTH 0x004C
35#define VP_SRC_HEIGHT 0x0050
36#define VP_DST_H_POSITION 0x0054
37#define VP_DST_V_POSITION 0x0058
38#define VP_DST_WIDTH 0x005C
39#define VP_DST_HEIGHT 0x0060
40#define VP_H_RATIO 0x0064
41#define VP_V_RATIO 0x0068
42#define VP_POLY8_Y0_LL 0x006C
43#define VP_POLY4_Y0_LL 0x00EC
44#define VP_POLY4_C0_LL 0x012C
45
46/*
47 * Bit definition part
48 */
49
50/* generates mask for range of bits */
51
52#define VP_MASK(high_bit, low_bit) \
53 (((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
54
55#define VP_MASK_VAL(val, high_bit, low_bit) \
56 (((val) << (low_bit)) & VP_MASK(high_bit, low_bit))
57
58 /* VP_ENABLE */
59#define VP_ENABLE_ON (1 << 0)
60
61/* VP_SRESET */
62#define VP_SRESET_PROCESSING (1 << 0)
63
64/* VP_SHADOW_UPDATE */
65#define VP_SHADOW_UPDATE_ENABLE (1 << 0)
66
67/* VP_MODE */
68#define VP_MODE_NV12 (0 << 6)
69#define VP_MODE_NV21 (1 << 6)
70#define VP_MODE_LINE_SKIP (1 << 5)
71#define VP_MODE_MEM_LINEAR (0 << 4)
72#define VP_MODE_MEM_TILED (1 << 4)
73#define VP_MODE_FMT_MASK (5 << 4)
74#define VP_MODE_FIELD_ID_AUTO_TOGGLING (1 << 2)
75#define VP_MODE_2D_IPC (1 << 1)
76
77/* VP_IMG_SIZE_Y */
78/* VP_IMG_SIZE_C */
79#define VP_IMG_HSIZE(x) VP_MASK_VAL(x, 29, 16)
80#define VP_IMG_VSIZE(x) VP_MASK_VAL(x, 13, 0)
81
82/* VP_SRC_H_POSITION */
83#define VP_SRC_H_POSITION_VAL(x) VP_MASK_VAL(x, 14, 4)
84
85/* VP_ENDIAN_MODE */
86#define VP_ENDIAN_MODE_LITTLE (1 << 0)
87
88#endif /* SAMSUNG_REGS_VP_H */
diff --git a/drivers/media/video/s5p-tv/sdo_drv.c b/drivers/media/video/s5p-tv/sdo_drv.c
new file mode 100644
index 000000000000..4dddd6bd635b
--- /dev/null
+++ b/drivers/media/video/s5p-tv/sdo_drv.c
@@ -0,0 +1,479 @@
1/*
2 * Samsung Standard Definition Output (SDO) driver
3 *
4 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
5 *
6 * Tomasz Stanislawski, <t.stanislaws@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published
10 * by the Free Software Foundiation. either version 2 of the License,
11 * or (at your option) any later version
12 */
13
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/irq.h>
21#include <linux/platform_device.h>
22#include <linux/pm_runtime.h>
23#include <linux/regulator/consumer.h>
24#include <linux/slab.h>
25
26#include <media/v4l2-subdev.h>
27
28#include "regs-sdo.h"
29
30MODULE_AUTHOR("Tomasz Stanislawski, <t.stanislaws@samsung.com>");
31MODULE_DESCRIPTION("Samsung Standard Definition Output (SDO)");
32MODULE_LICENSE("GPL");
33
34#define SDO_DEFAULT_STD V4L2_STD_PAL
35
36struct sdo_format {
37 v4l2_std_id id;
38 /* all modes are 720 pixels wide */
39 unsigned int height;
40 unsigned int cookie;
41};
42
43struct sdo_device {
44 /** pointer to device parent */
45 struct device *dev;
46 /** base address of SDO registers */
47 void __iomem *regs;
48 /** SDO interrupt */
49 unsigned int irq;
50 /** DAC source clock */
51 struct clk *sclk_dac;
52 /** DAC clock */
53 struct clk *dac;
54 /** DAC physical interface */
55 struct clk *dacphy;
56 /** clock for control of VPLL */
57 struct clk *fout_vpll;
58 /** regulator for SDO IP power */
59 struct regulator *vdac;
60 /** regulator for SDO plug detection */
61 struct regulator *vdet;
62 /** subdev used as device interface */
63 struct v4l2_subdev sd;
64 /** current format */
65 const struct sdo_format *fmt;
66};
67
68static inline struct sdo_device *sd_to_sdev(struct v4l2_subdev *sd)
69{
70 return container_of(sd, struct sdo_device, sd);
71}
72
73static inline
74void sdo_write_mask(struct sdo_device *sdev, u32 reg_id, u32 value, u32 mask)
75{
76 u32 old = readl(sdev->regs + reg_id);
77 value = (value & mask) | (old & ~mask);
78 writel(value, sdev->regs + reg_id);
79}
80
81static inline
82void sdo_write(struct sdo_device *sdev, u32 reg_id, u32 value)
83{
84 writel(value, sdev->regs + reg_id);
85}
86
87static inline
88u32 sdo_read(struct sdo_device *sdev, u32 reg_id)
89{
90 return readl(sdev->regs + reg_id);
91}
92
93static irqreturn_t sdo_irq_handler(int irq, void *dev_data)
94{
95 struct sdo_device *sdev = dev_data;
96
97 /* clear interrupt */
98 sdo_write_mask(sdev, SDO_IRQ, ~0, SDO_VSYNC_IRQ_PEND);
99 return IRQ_HANDLED;
100}
101
102static void sdo_reg_debug(struct sdo_device *sdev)
103{
104#define DBGREG(reg_id) \
105 dev_info(sdev->dev, #reg_id " = %08x\n", \
106 sdo_read(sdev, reg_id))
107
108 DBGREG(SDO_CLKCON);
109 DBGREG(SDO_CONFIG);
110 DBGREG(SDO_VBI);
111 DBGREG(SDO_DAC);
112 DBGREG(SDO_IRQ);
113 DBGREG(SDO_IRQMASK);
114 DBGREG(SDO_VERSION);
115}
116
117static const struct sdo_format sdo_format[] = {
118 { V4L2_STD_PAL_N, .height = 576, .cookie = SDO_PAL_N },
119 { V4L2_STD_PAL_Nc, .height = 576, .cookie = SDO_PAL_NC },
120 { V4L2_STD_PAL_M, .height = 480, .cookie = SDO_PAL_M },
121 { V4L2_STD_PAL_60, .height = 480, .cookie = SDO_PAL_60 },
122 { V4L2_STD_NTSC_443, .height = 480, .cookie = SDO_NTSC_443 },
123 { V4L2_STD_PAL, .height = 576, .cookie = SDO_PAL_BGHID },
124 { V4L2_STD_NTSC_M, .height = 480, .cookie = SDO_NTSC_M },
125};
126
127static const struct sdo_format *sdo_find_format(v4l2_std_id id)
128{
129 int i;
130 for (i = 0; i < ARRAY_SIZE(sdo_format); ++i)
131 if (sdo_format[i].id & id)
132 return &sdo_format[i];
133 return NULL;
134}
135
136static int sdo_g_tvnorms_output(struct v4l2_subdev *sd, v4l2_std_id *std)
137{
138 *std = V4L2_STD_NTSC_M | V4L2_STD_PAL_M | V4L2_STD_PAL |
139 V4L2_STD_PAL_N | V4L2_STD_PAL_Nc |
140 V4L2_STD_NTSC_443 | V4L2_STD_PAL_60;
141 return 0;
142}
143
144static int sdo_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
145{
146 struct sdo_device *sdev = sd_to_sdev(sd);
147 const struct sdo_format *fmt;
148 fmt = sdo_find_format(std);
149 if (fmt == NULL)
150 return -EINVAL;
151 sdev->fmt = fmt;
152 return 0;
153}
154
155static int sdo_g_std_output(struct v4l2_subdev *sd, v4l2_std_id *std)
156{
157 *std = sd_to_sdev(sd)->fmt->id;
158 return 0;
159}
160
161static int sdo_g_mbus_fmt(struct v4l2_subdev *sd,
162 struct v4l2_mbus_framefmt *fmt)
163{
164 struct sdo_device *sdev = sd_to_sdev(sd);
165
166 if (!sdev->fmt)
167 return -ENXIO;
168 /* all modes are 720 pixels wide */
169 fmt->width = 720;
170 fmt->height = sdev->fmt->height;
171 fmt->code = V4L2_MBUS_FMT_FIXED;
172 fmt->field = V4L2_FIELD_INTERLACED;
173 return 0;
174}
175
176static int sdo_s_power(struct v4l2_subdev *sd, int on)
177{
178 struct sdo_device *sdev = sd_to_sdev(sd);
179 struct device *dev = sdev->dev;
180 int ret;
181
182 dev_info(dev, "sdo_s_power(%d)\n", on);
183
184 if (on)
185 ret = pm_runtime_get_sync(dev);
186 else
187 ret = pm_runtime_put_sync(dev);
188
189 /* only values < 0 indicate errors */
190 return IS_ERR_VALUE(ret) ? ret : 0;
191}
192
193static int sdo_streamon(struct sdo_device *sdev)
194{
195 /* set proper clock for Timing Generator */
196 clk_set_rate(sdev->fout_vpll, 54000000);
197 dev_info(sdev->dev, "fout_vpll.rate = %lu\n",
198 clk_get_rate(sdev->fout_vpll));
199 /* enable clock in SDO */
200 sdo_write_mask(sdev, SDO_CLKCON, ~0, SDO_TVOUT_CLOCK_ON);
201 clk_enable(sdev->dacphy);
202 /* enable DAC */
203 sdo_write_mask(sdev, SDO_DAC, ~0, SDO_POWER_ON_DAC);
204 sdo_reg_debug(sdev);
205 return 0;
206}
207
208static int sdo_streamoff(struct sdo_device *sdev)
209{
210 int tries;
211
212 sdo_write_mask(sdev, SDO_DAC, 0, SDO_POWER_ON_DAC);
213 clk_disable(sdev->dacphy);
214 sdo_write_mask(sdev, SDO_CLKCON, 0, SDO_TVOUT_CLOCK_ON);
215 for (tries = 100; tries; --tries) {
216 if (sdo_read(sdev, SDO_CLKCON) & SDO_TVOUT_CLOCK_READY)
217 break;
218 mdelay(1);
219 }
220 if (tries == 0)
221 dev_err(sdev->dev, "failed to stop streaming\n");
222 return tries ? 0 : -EIO;
223}
224
225static int sdo_s_stream(struct v4l2_subdev *sd, int on)
226{
227 struct sdo_device *sdev = sd_to_sdev(sd);
228 return on ? sdo_streamon(sdev) : sdo_streamoff(sdev);
229}
230
231static const struct v4l2_subdev_core_ops sdo_sd_core_ops = {
232 .s_power = sdo_s_power,
233};
234
235static const struct v4l2_subdev_video_ops sdo_sd_video_ops = {
236 .s_std_output = sdo_s_std_output,
237 .g_std_output = sdo_g_std_output,
238 .g_tvnorms_output = sdo_g_tvnorms_output,
239 .g_mbus_fmt = sdo_g_mbus_fmt,
240 .s_stream = sdo_s_stream,
241};
242
243static const struct v4l2_subdev_ops sdo_sd_ops = {
244 .core = &sdo_sd_core_ops,
245 .video = &sdo_sd_video_ops,
246};
247
248static int sdo_runtime_suspend(struct device *dev)
249{
250 struct v4l2_subdev *sd = dev_get_drvdata(dev);
251 struct sdo_device *sdev = sd_to_sdev(sd);
252
253 dev_info(dev, "suspend\n");
254 regulator_disable(sdev->vdet);
255 regulator_disable(sdev->vdac);
256 clk_disable(sdev->sclk_dac);
257 return 0;
258}
259
260static int sdo_runtime_resume(struct device *dev)
261{
262 struct v4l2_subdev *sd = dev_get_drvdata(dev);
263 struct sdo_device *sdev = sd_to_sdev(sd);
264
265 dev_info(dev, "resume\n");
266 clk_enable(sdev->sclk_dac);
267 regulator_enable(sdev->vdac);
268 regulator_enable(sdev->vdet);
269
270 /* software reset */
271 sdo_write_mask(sdev, SDO_CLKCON, ~0, SDO_TVOUT_SW_RESET);
272 mdelay(10);
273 sdo_write_mask(sdev, SDO_CLKCON, 0, SDO_TVOUT_SW_RESET);
274
275 /* setting TV mode */
276 sdo_write_mask(sdev, SDO_CONFIG, sdev->fmt->cookie, SDO_STANDARD_MASK);
277 /* XXX: forcing interlaced mode using undocumented bit */
278 sdo_write_mask(sdev, SDO_CONFIG, 0, SDO_PROGRESSIVE);
279 /* turn all VBI off */
280 sdo_write_mask(sdev, SDO_VBI, 0, SDO_CVBS_WSS_INS |
281 SDO_CVBS_CLOSED_CAPTION_MASK);
282 /* turn all post processing off */
283 sdo_write_mask(sdev, SDO_CCCON, ~0, SDO_COMPENSATION_BHS_ADJ_OFF |
284 SDO_COMPENSATION_CVBS_COMP_OFF);
285 sdo_reg_debug(sdev);
286 return 0;
287}
288
289static const struct dev_pm_ops sdo_pm_ops = {
290 .runtime_suspend = sdo_runtime_suspend,
291 .runtime_resume = sdo_runtime_resume,
292};
293
294static int __devinit sdo_probe(struct platform_device *pdev)
295{
296 struct device *dev = &pdev->dev;
297 struct sdo_device *sdev;
298 struct resource *res;
299 int ret = 0;
300 struct clk *sclk_vpll;
301
302 dev_info(dev, "probe start\n");
303 sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
304 if (!sdev) {
305 dev_err(dev, "not enough memory.\n");
306 ret = -ENOMEM;
307 goto fail;
308 }
309 sdev->dev = dev;
310
311 /* mapping registers */
312 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
313 if (res == NULL) {
314 dev_err(dev, "get memory resource failed.\n");
315 ret = -ENXIO;
316 goto fail_sdev;
317 }
318
319 sdev->regs = ioremap(res->start, resource_size(res));
320 if (sdev->regs == NULL) {
321 dev_err(dev, "register mapping failed.\n");
322 ret = -ENXIO;
323 goto fail_sdev;
324 }
325
326 /* acquiring interrupt */
327 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
328 if (res == NULL) {
329 dev_err(dev, "get interrupt resource failed.\n");
330 ret = -ENXIO;
331 goto fail_regs;
332 }
333 ret = request_irq(res->start, sdo_irq_handler, 0, "s5p-sdo", sdev);
334 if (ret) {
335 dev_err(dev, "request interrupt failed.\n");
336 goto fail_regs;
337 }
338 sdev->irq = res->start;
339
340 /* acquire clocks */
341 sdev->sclk_dac = clk_get(dev, "sclk_dac");
342 if (IS_ERR_OR_NULL(sdev->sclk_dac)) {
343 dev_err(dev, "failed to get clock 'sclk_dac'\n");
344 ret = -ENXIO;
345 goto fail_irq;
346 }
347 sdev->dac = clk_get(dev, "dac");
348 if (IS_ERR_OR_NULL(sdev->dac)) {
349 dev_err(dev, "failed to get clock 'dac'\n");
350 ret = -ENXIO;
351 goto fail_sclk_dac;
352 }
353 sdev->dacphy = clk_get(dev, "dacphy");
354 if (IS_ERR_OR_NULL(sdev->dacphy)) {
355 dev_err(dev, "failed to get clock 'dacphy'\n");
356 ret = -ENXIO;
357 goto fail_dac;
358 }
359 sclk_vpll = clk_get(dev, "sclk_vpll");
360 if (IS_ERR_OR_NULL(sclk_vpll)) {
361 dev_err(dev, "failed to get clock 'sclk_vpll'\n");
362 ret = -ENXIO;
363 goto fail_dacphy;
364 }
365 clk_set_parent(sdev->sclk_dac, sclk_vpll);
366 clk_put(sclk_vpll);
367 sdev->fout_vpll = clk_get(dev, "fout_vpll");
368 if (IS_ERR_OR_NULL(sdev->fout_vpll)) {
369 dev_err(dev, "failed to get clock 'fout_vpll'\n");
370 goto fail_dacphy;
371 }
372 dev_info(dev, "fout_vpll.rate = %lu\n", clk_get_rate(sclk_vpll));
373
374 /* acquire regulator */
375 sdev->vdac = regulator_get(dev, "vdd33a_dac");
376 if (IS_ERR_OR_NULL(sdev->vdac)) {
377 dev_err(dev, "failed to get regulator 'vdac'\n");
378 goto fail_fout_vpll;
379 }
380 sdev->vdet = regulator_get(dev, "vdet");
381 if (IS_ERR_OR_NULL(sdev->vdet)) {
382 dev_err(dev, "failed to get regulator 'vdet'\n");
383 goto fail_vdac;
384 }
385
386 /* enable gate for dac clock, because mixer uses it */
387 clk_enable(sdev->dac);
388
389 /* configure power management */
390 pm_runtime_enable(dev);
391
392 /* configuration of interface subdevice */
393 v4l2_subdev_init(&sdev->sd, &sdo_sd_ops);
394 sdev->sd.owner = THIS_MODULE;
395 strlcpy(sdev->sd.name, "s5p-sdo", sizeof sdev->sd.name);
396
397 /* set default format */
398 sdev->fmt = sdo_find_format(SDO_DEFAULT_STD);
399 BUG_ON(sdev->fmt == NULL);
400
401 /* keeping subdev in device's private for use by other drivers */
402 dev_set_drvdata(dev, &sdev->sd);
403
404 dev_info(dev, "probe succeeded\n");
405 return 0;
406
407fail_vdac:
408 regulator_put(sdev->vdac);
409fail_fout_vpll:
410 clk_put(sdev->fout_vpll);
411fail_dacphy:
412 clk_put(sdev->dacphy);
413fail_dac:
414 clk_put(sdev->dac);
415fail_sclk_dac:
416 clk_put(sdev->sclk_dac);
417fail_irq:
418 free_irq(sdev->irq, sdev);
419fail_regs:
420 iounmap(sdev->regs);
421fail_sdev:
422 kfree(sdev);
423fail:
424 dev_info(dev, "probe failed\n");
425 return ret;
426}
427
428static int __devexit sdo_remove(struct platform_device *pdev)
429{
430 struct v4l2_subdev *sd = dev_get_drvdata(&pdev->dev);
431 struct sdo_device *sdev = sd_to_sdev(sd);
432
433 pm_runtime_disable(&pdev->dev);
434 clk_disable(sdev->dac);
435 regulator_put(sdev->vdet);
436 regulator_put(sdev->vdac);
437 clk_put(sdev->fout_vpll);
438 clk_put(sdev->dacphy);
439 clk_put(sdev->dac);
440 clk_put(sdev->sclk_dac);
441 free_irq(sdev->irq, sdev);
442 iounmap(sdev->regs);
443 kfree(sdev);
444
445 dev_info(&pdev->dev, "remove successful\n");
446 return 0;
447}
448
449static struct platform_driver sdo_driver __refdata = {
450 .probe = sdo_probe,
451 .remove = __devexit_p(sdo_remove),
452 .driver = {
453 .name = "s5p-sdo",
454 .owner = THIS_MODULE,
455 .pm = &sdo_pm_ops,
456 }
457};
458
459static int __init sdo_init(void)
460{
461 int ret;
462 static const char banner[] __initdata = KERN_INFO \
463 "Samsung Standard Definition Output (SDO) driver, "
464 "(c) 2010-2011 Samsung Electronics Co., Ltd.\n";
465 printk(banner);
466
467 ret = platform_driver_register(&sdo_driver);
468 if (ret)
469 printk(KERN_ERR "SDO platform driver register failed\n");
470
471 return ret;
472}
473module_init(sdo_init);
474
475static void __exit sdo_exit(void)
476{
477 platform_driver_unregister(&sdo_driver);
478}
479module_exit(sdo_exit);
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index 0db90922ee93..f2ae405c74ac 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -757,8 +757,8 @@ static int saa711x_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
757 switch (ctrl->id) { 757 switch (ctrl->id) {
758 case V4L2_CID_CHROMA_AGC: 758 case V4L2_CID_CHROMA_AGC:
759 /* chroma gain cluster */ 759 /* chroma gain cluster */
760 if (state->agc->cur.val) 760 if (state->agc->val)
761 state->gain->cur.val = 761 state->gain->val =
762 saa711x_read(sd, R_0F_CHROMA_GAIN_CNTL) & 0x7f; 762 saa711x_read(sd, R_0F_CHROMA_GAIN_CNTL) & 0x7f;
763 break; 763 break;
764 } 764 }
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index e2062b240e32..0f9fb99adeb4 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -4951,8 +4951,9 @@ struct saa7134_board saa7134_boards[] = {
4951 .audio_clock = 0x00187de7, 4951 .audio_clock = 0x00187de7,
4952 .tuner_type = TUNER_XC2028, 4952 .tuner_type = TUNER_XC2028,
4953 .radio_type = UNSET, 4953 .radio_type = UNSET,
4954 .tuner_addr = ADDR_UNSET, 4954 .tuner_addr = 0x61,
4955 .radio_addr = ADDR_UNSET, 4955 .radio_addr = ADDR_UNSET,
4956 .mpeg = SAA7134_MPEG_DVB,
4956 .inputs = {{ 4957 .inputs = {{
4957 .name = name_tv, 4958 .name = name_tv,
4958 .vmux = 3, 4959 .vmux = 3,
@@ -6992,6 +6993,11 @@ static int saa7134_xc2028_callback(struct saa7134_dev *dev,
6992 msleep(10); 6993 msleep(10);
6993 saa7134_set_gpio(dev, 18, 1); 6994 saa7134_set_gpio(dev, 18, 1);
6994 break; 6995 break;
6996 case SAA7134_BOARD_VIDEOMATE_T750:
6997 saa7134_set_gpio(dev, 20, 0);
6998 msleep(10);
6999 saa7134_set_gpio(dev, 20, 1);
7000 break;
6995 } 7001 }
6996 return 0; 7002 return 0;
6997 } 7003 }
@@ -7451,6 +7457,11 @@ int saa7134_board_init1(struct saa7134_dev *dev)
7451 saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x0e050000, 0x0c050000); 7457 saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x0e050000, 0x0c050000);
7452 saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x0e050000, 0x0c050000); 7458 saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x0e050000, 0x0c050000);
7453 break; 7459 break;
7460 case SAA7134_BOARD_VIDEOMATE_T750:
7461 /* enable the analog tuner */
7462 saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x00008000, 0x00008000);
7463 saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x00008000, 0x00008000);
7464 break;
7454 } 7465 }
7455 return 0; 7466 return 0;
7456} 7467}
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index f9be737ba6f4..ca65cda3e101 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -39,6 +39,8 @@
39MODULE_DESCRIPTION("v4l2 driver module for saa7130/34 based TV cards"); 39MODULE_DESCRIPTION("v4l2 driver module for saa7130/34 based TV cards");
40MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]"); 40MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
41MODULE_LICENSE("GPL"); 41MODULE_LICENSE("GPL");
42MODULE_VERSION(SAA7134_VERSION);
43
42 44
43/* ------------------------------------------------------------------ */ 45/* ------------------------------------------------------------------ */
44 46
@@ -1332,14 +1334,8 @@ static struct pci_driver saa7134_pci_driver = {
1332static int __init saa7134_init(void) 1334static int __init saa7134_init(void)
1333{ 1335{
1334 INIT_LIST_HEAD(&saa7134_devlist); 1336 INIT_LIST_HEAD(&saa7134_devlist);
1335 printk(KERN_INFO "saa7130/34: v4l2 driver version %d.%d.%d loaded\n", 1337 printk(KERN_INFO "saa7130/34: v4l2 driver version %s loaded\n",
1336 (SAA7134_VERSION_CODE >> 16) & 0xff, 1338 SAA7134_VERSION);
1337 (SAA7134_VERSION_CODE >> 8) & 0xff,
1338 SAA7134_VERSION_CODE & 0xff);
1339#ifdef SNAPSHOT
1340 printk(KERN_INFO "saa7130/34: snapshot date %04d-%02d-%02d\n",
1341 SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
1342#endif
1343 return pci_register_driver(&saa7134_pci_driver); 1339 return pci_register_driver(&saa7134_pci_driver);
1344} 1340}
1345 1341
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index 996a206c6d79..1e4ef1669887 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -56,6 +56,7 @@
56#include "lgs8gxx.h" 56#include "lgs8gxx.h"
57 57
58#include "zl10353.h" 58#include "zl10353.h"
59#include "qt1010.h"
59 60
60#include "zl10036.h" 61#include "zl10036.h"
61#include "zl10039.h" 62#include "zl10039.h"
@@ -939,6 +940,18 @@ static struct zl10353_config behold_x7_config = {
939 .disable_i2c_gate_ctrl = 1, 940 .disable_i2c_gate_ctrl = 1,
940}; 941};
941 942
943static struct zl10353_config videomate_t750_zl10353_config = {
944 .demod_address = 0x0f,
945 .no_tuner = 1,
946 .parallel_ts = 1,
947 .disable_i2c_gate_ctrl = 1,
948};
949
950static struct qt1010_config videomate_t750_qt1010_config = {
951 .i2c_address = 0x62
952};
953
954
942/* ================================================================== 955/* ==================================================================
943 * tda10086 based DVB-S cards, helper functions 956 * tda10086 based DVB-S cards, helper functions
944 */ 957 */
@@ -1650,6 +1663,18 @@ static int dvb_init(struct saa7134_dev *dev)
1650 __func__); 1663 __func__);
1651 1664
1652 break; 1665 break;
1666 case SAA7134_BOARD_VIDEOMATE_T750:
1667 fe0->dvb.frontend = dvb_attach(zl10353_attach,
1668 &videomate_t750_zl10353_config,
1669 &dev->i2c_adap);
1670 if (fe0->dvb.frontend != NULL) {
1671 if (dvb_attach(qt1010_attach,
1672 fe0->dvb.frontend,
1673 &dev->i2c_adap,
1674 &videomate_t750_qt1010_config) == NULL)
1675 wprintk("error attaching QT1010\n");
1676 }
1677 break;
1653 case SAA7134_BOARD_ZOLID_HYBRID_PCI: 1678 case SAA7134_BOARD_ZOLID_HYBRID_PCI:
1654 fe0->dvb.frontend = dvb_attach(tda10048_attach, 1679 fe0->dvb.frontend = dvb_attach(tda10048_attach,
1655 &zolid_tda10048_config, 1680 &zolid_tda10048_config,
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index 18294db38a01..dde361a9194e 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -172,7 +172,6 @@ static int empress_querycap(struct file *file, void *priv,
172 strlcpy(cap->card, saa7134_boards[dev->board].name, 172 strlcpy(cap->card, saa7134_boards[dev->board].name,
173 sizeof(cap->card)); 173 sizeof(cap->card));
174 sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci)); 174 sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
175 cap->version = SAA7134_VERSION_CODE;
176 cap->capabilities = 175 cap->capabilities =
177 V4L2_CAP_VIDEO_CAPTURE | 176 V4L2_CAP_VIDEO_CAPTURE |
178 V4L2_CAP_READWRITE | 177 V4L2_CAP_READWRITE |
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index 776ba2dd7f9f..9cf7914f6f90 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -1810,7 +1810,6 @@ static int saa7134_querycap(struct file *file, void *priv,
1810 strlcpy(cap->card, saa7134_boards[dev->board].name, 1810 strlcpy(cap->card, saa7134_boards[dev->board].name,
1811 sizeof(cap->card)); 1811 sizeof(cap->card));
1812 sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci)); 1812 sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
1813 cap->version = SAA7134_VERSION_CODE;
1814 cap->capabilities = 1813 cap->capabilities =
1815 V4L2_CAP_VIDEO_CAPTURE | 1814 V4L2_CAP_VIDEO_CAPTURE |
1816 V4L2_CAP_VBI_CAPTURE | 1815 V4L2_CAP_VBI_CAPTURE |
@@ -2307,7 +2306,6 @@ static int radio_querycap(struct file *file, void *priv,
2307 strcpy(cap->driver, "saa7134"); 2306 strcpy(cap->driver, "saa7134");
2308 strlcpy(cap->card, saa7134_boards[dev->board].name, sizeof(cap->card)); 2307 strlcpy(cap->card, saa7134_boards[dev->board].name, sizeof(cap->card));
2309 sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci)); 2308 sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
2310 cap->version = SAA7134_VERSION_CODE;
2311 cap->capabilities = V4L2_CAP_TUNER; 2309 cap->capabilities = V4L2_CAP_TUNER;
2312 return 0; 2310 return 0;
2313} 2311}
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index 28eb10398323..bc8d6bba8ee5 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -19,8 +19,7 @@
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 20 */
21 21
22#include <linux/version.h> 22#define SAA7134_VERSION "0, 2, 17"
23#define SAA7134_VERSION_CODE KERNEL_VERSION(0, 2, 16)
24 23
25#include <linux/pci.h> 24#include <linux/pci.h>
26#include <linux/i2c.h> 25#include <linux/i2c.h>
diff --git a/drivers/media/video/saa7164/saa7164-encoder.c b/drivers/media/video/saa7164/saa7164-encoder.c
index 400364569c8d..2fd38a01887f 100644
--- a/drivers/media/video/saa7164/saa7164-encoder.c
+++ b/drivers/media/video/saa7164/saa7164-encoder.c
@@ -1246,7 +1246,6 @@ static unsigned int fops_poll(struct file *file, poll_table *wait)
1246 struct saa7164_encoder_fh *fh = 1246 struct saa7164_encoder_fh *fh =
1247 (struct saa7164_encoder_fh *)file->private_data; 1247 (struct saa7164_encoder_fh *)file->private_data;
1248 struct saa7164_port *port = fh->port; 1248 struct saa7164_port *port = fh->port;
1249 struct saa7164_user_buffer *ubuf;
1250 unsigned int mask = 0; 1249 unsigned int mask = 0;
1251 1250
1252 port->last_poll_msecs_diff = port->last_poll_msecs; 1251 port->last_poll_msecs_diff = port->last_poll_msecs;
@@ -1278,10 +1277,7 @@ static unsigned int fops_poll(struct file *file, poll_table *wait)
1278 } 1277 }
1279 1278
1280 /* Pull the first buffer from the used list */ 1279 /* Pull the first buffer from the used list */
1281 ubuf = list_first_entry(&port->list_buf_used.list, 1280 if (!list_empty(&port->list_buf_used.list))
1282 struct saa7164_user_buffer, list);
1283
1284 if (ubuf)
1285 mask |= POLLIN | POLLRDNORM; 1281 mask |= POLLIN | POLLRDNORM;
1286 1282
1287 return mask; 1283 return mask;
diff --git a/drivers/media/video/saa7164/saa7164-vbi.c b/drivers/media/video/saa7164/saa7164-vbi.c
index bc1fcedba874..e2e034158718 100644
--- a/drivers/media/video/saa7164/saa7164-vbi.c
+++ b/drivers/media/video/saa7164/saa7164-vbi.c
@@ -1192,7 +1192,6 @@ static unsigned int fops_poll(struct file *file, poll_table *wait)
1192{ 1192{
1193 struct saa7164_vbi_fh *fh = (struct saa7164_vbi_fh *)file->private_data; 1193 struct saa7164_vbi_fh *fh = (struct saa7164_vbi_fh *)file->private_data;
1194 struct saa7164_port *port = fh->port; 1194 struct saa7164_port *port = fh->port;
1195 struct saa7164_user_buffer *ubuf;
1196 unsigned int mask = 0; 1195 unsigned int mask = 0;
1197 1196
1198 port->last_poll_msecs_diff = port->last_poll_msecs; 1197 port->last_poll_msecs_diff = port->last_poll_msecs;
@@ -1224,10 +1223,7 @@ static unsigned int fops_poll(struct file *file, poll_table *wait)
1224 } 1223 }
1225 1224
1226 /* Pull the first buffer from the used list */ 1225 /* Pull the first buffer from the used list */
1227 ubuf = list_first_entry(&port->list_buf_used.list, 1226 if (!list_empty(&port->list_buf_used.list))
1228 struct saa7164_user_buffer, list);
1229
1230 if (ubuf)
1231 mask |= POLLIN | POLLRDNORM; 1227 mask |= POLLIN | POLLRDNORM;
1232 1228
1233 return mask; 1229 return mask;
diff --git a/drivers/media/video/saa7164/saa7164.h b/drivers/media/video/saa7164/saa7164.h
index 16745d2fb349..6678bf1e7816 100644
--- a/drivers/media/video/saa7164/saa7164.h
+++ b/drivers/media/video/saa7164/saa7164.h
@@ -48,7 +48,6 @@
48#include <linux/i2c.h> 48#include <linux/i2c.h>
49#include <linux/i2c-algo-bit.h> 49#include <linux/i2c-algo-bit.h>
50#include <linux/kdev_t.h> 50#include <linux/kdev_t.h>
51#include <linux/version.h>
52#include <linux/mutex.h> 51#include <linux/mutex.h>
53#include <linux/crc32.h> 52#include <linux/crc32.h>
54#include <linux/kthread.h> 53#include <linux/kthread.h>
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index 3ae5c9c58cba..e54089802b6b 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -27,7 +27,6 @@
27#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/moduleparam.h> 28#include <linux/moduleparam.h>
29#include <linux/time.h> 29#include <linux/time.h>
30#include <linux/version.h>
31#include <linux/slab.h> 30#include <linux/slab.h>
32#include <linux/device.h> 31#include <linux/device.h>
33#include <linux/platform_device.h> 32#include <linux/platform_device.h>
@@ -39,6 +38,7 @@
39#include <media/v4l2-dev.h> 38#include <media/v4l2-dev.h>
40#include <media/soc_camera.h> 39#include <media/soc_camera.h>
41#include <media/sh_mobile_ceu.h> 40#include <media/sh_mobile_ceu.h>
41#include <media/sh_mobile_csi2.h>
42#include <media/videobuf2-dma-contig.h> 42#include <media/videobuf2-dma-contig.h>
43#include <media/v4l2-mediabus.h> 43#include <media/v4l2-mediabus.h>
44#include <media/soc_mediabus.h> 44#include <media/soc_mediabus.h>
@@ -96,6 +96,7 @@ struct sh_mobile_ceu_buffer {
96struct sh_mobile_ceu_dev { 96struct sh_mobile_ceu_dev {
97 struct soc_camera_host ici; 97 struct soc_camera_host ici;
98 struct soc_camera_device *icd; 98 struct soc_camera_device *icd;
99 struct platform_device *csi2_pdev;
99 100
100 unsigned int irq; 101 unsigned int irq;
101 void __iomem *base; 102 void __iomem *base;
@@ -205,7 +206,7 @@ static int sh_mobile_ceu_soft_reset(struct sh_mobile_ceu_dev *pcdev)
205 206
206 207
207 if (2 != success) { 208 if (2 != success) {
208 dev_warn(&icd->dev, "soft reset time out\n"); 209 dev_warn(icd->pdev, "soft reset time out\n");
209 return -EIO; 210 return -EIO;
210 } 211 }
211 212
@@ -220,7 +221,7 @@ static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq,
220 unsigned long sizes[], void *alloc_ctxs[]) 221 unsigned long sizes[], void *alloc_ctxs[])
221{ 222{
222 struct soc_camera_device *icd = container_of(vq, struct soc_camera_device, vb2_vidq); 223 struct soc_camera_device *icd = container_of(vq, struct soc_camera_device, vb2_vidq);
223 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 224 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
224 struct sh_mobile_ceu_dev *pcdev = ici->priv; 225 struct sh_mobile_ceu_dev *pcdev = ici->priv;
225 int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, 226 int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
226 icd->current_fmt->host_fmt); 227 icd->current_fmt->host_fmt);
@@ -242,7 +243,7 @@ static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq,
242 *count = pcdev->video_limit / PAGE_ALIGN(sizes[0]); 243 *count = pcdev->video_limit / PAGE_ALIGN(sizes[0]);
243 } 244 }
244 245
245 dev_dbg(icd->dev.parent, "count=%d, size=%lu\n", *count, sizes[0]); 246 dev_dbg(icd->parent, "count=%d, size=%lu\n", *count, sizes[0]);
246 247
247 return 0; 248 return 0;
248} 249}
@@ -351,7 +352,7 @@ static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
351 352
352 buf = to_ceu_vb(vb); 353 buf = to_ceu_vb(vb);
353 354
354 dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%p %lu\n", __func__, 355 dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
355 vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0)); 356 vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
356 357
357 /* Added list head initialization on alloc */ 358 /* Added list head initialization on alloc */
@@ -371,7 +372,7 @@ static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
371 size = icd->user_height * bytes_per_line; 372 size = icd->user_height * bytes_per_line;
372 373
373 if (vb2_plane_size(vb, 0) < size) { 374 if (vb2_plane_size(vb, 0) < size) {
374 dev_err(icd->dev.parent, "Buffer too small (%lu < %lu)\n", 375 dev_err(icd->parent, "Buffer too small (%lu < %lu)\n",
375 vb2_plane_size(vb, 0), size); 376 vb2_plane_size(vb, 0), size);
376 return -ENOBUFS; 377 return -ENOBUFS;
377 } 378 }
@@ -384,11 +385,11 @@ static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
384static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb) 385static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
385{ 386{
386 struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq); 387 struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq);
387 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 388 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
388 struct sh_mobile_ceu_dev *pcdev = ici->priv; 389 struct sh_mobile_ceu_dev *pcdev = ici->priv;
389 struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb); 390 struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
390 391
391 dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%p %lu\n", __func__, 392 dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
392 vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0)); 393 vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
393 394
394 spin_lock_irq(&pcdev->lock); 395 spin_lock_irq(&pcdev->lock);
@@ -409,7 +410,7 @@ static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
409static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb) 410static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
410{ 411{
411 struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq); 412 struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq);
412 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 413 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
413 struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb); 414 struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
414 struct sh_mobile_ceu_dev *pcdev = ici->priv; 415 struct sh_mobile_ceu_dev *pcdev = ici->priv;
415 416
@@ -421,8 +422,12 @@ static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
421 pcdev->active = NULL; 422 pcdev->active = NULL;
422 } 423 }
423 424
424 /* Doesn't hurt also if the list is empty */ 425 /*
425 list_del_init(&buf->queue); 426 * Doesn't hurt also if the list is empty, but it hurts, if queuing the
427 * buffer failed, and .buf_init() hasn't been called
428 */
429 if (buf->queue.next)
430 list_del_init(&buf->queue);
426 431
427 spin_unlock_irq(&pcdev->lock); 432 spin_unlock_irq(&pcdev->lock);
428} 433}
@@ -437,7 +442,7 @@ static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb)
437static int sh_mobile_ceu_stop_streaming(struct vb2_queue *q) 442static int sh_mobile_ceu_stop_streaming(struct vb2_queue *q)
438{ 443{
439 struct soc_camera_device *icd = container_of(q, struct soc_camera_device, vb2_vidq); 444 struct soc_camera_device *icd = container_of(q, struct soc_camera_device, vb2_vidq);
440 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 445 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
441 struct sh_mobile_ceu_dev *pcdev = ici->priv; 446 struct sh_mobile_ceu_dev *pcdev = ici->priv;
442 struct list_head *buf_head, *tmp; 447 struct list_head *buf_head, *tmp;
443 448
@@ -499,25 +504,48 @@ out:
499 return IRQ_HANDLED; 504 return IRQ_HANDLED;
500} 505}
501 506
507static struct v4l2_subdev *find_csi2(struct sh_mobile_ceu_dev *pcdev)
508{
509 struct v4l2_subdev *sd;
510
511 if (!pcdev->csi2_pdev)
512 return NULL;
513
514 v4l2_device_for_each_subdev(sd, &pcdev->ici.v4l2_dev)
515 if (&pcdev->csi2_pdev->dev == v4l2_get_subdevdata(sd))
516 return sd;
517
518 return NULL;
519}
520
502/* Called with .video_lock held */ 521/* Called with .video_lock held */
503static int sh_mobile_ceu_add_device(struct soc_camera_device *icd) 522static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
504{ 523{
505 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 524 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
506 struct sh_mobile_ceu_dev *pcdev = ici->priv; 525 struct sh_mobile_ceu_dev *pcdev = ici->priv;
526 struct v4l2_subdev *csi2_sd;
507 int ret; 527 int ret;
508 528
509 if (pcdev->icd) 529 if (pcdev->icd)
510 return -EBUSY; 530 return -EBUSY;
511 531
512 dev_info(icd->dev.parent, 532 dev_info(icd->parent,
513 "SuperH Mobile CEU driver attached to camera %d\n", 533 "SuperH Mobile CEU driver attached to camera %d\n",
514 icd->devnum); 534 icd->devnum);
515 535
516 pm_runtime_get_sync(ici->v4l2_dev.dev); 536 pm_runtime_get_sync(ici->v4l2_dev.dev);
517 537
518 ret = sh_mobile_ceu_soft_reset(pcdev); 538 ret = sh_mobile_ceu_soft_reset(pcdev);
519 if (!ret) 539
540 csi2_sd = find_csi2(pcdev);
541
542 ret = v4l2_subdev_call(csi2_sd, core, s_power, 1);
543 if (ret != -ENODEV && ret != -ENOIOCTLCMD && ret < 0) {
544 pm_runtime_put_sync(ici->v4l2_dev.dev);
545 } else {
520 pcdev->icd = icd; 546 pcdev->icd = icd;
547 ret = 0;
548 }
521 549
522 return ret; 550 return ret;
523} 551}
@@ -525,11 +553,13 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
525/* Called with .video_lock held */ 553/* Called with .video_lock held */
526static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd) 554static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
527{ 555{
528 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 556 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
529 struct sh_mobile_ceu_dev *pcdev = ici->priv; 557 struct sh_mobile_ceu_dev *pcdev = ici->priv;
558 struct v4l2_subdev *csi2_sd = find_csi2(pcdev);
530 559
531 BUG_ON(icd != pcdev->icd); 560 BUG_ON(icd != pcdev->icd);
532 561
562 v4l2_subdev_call(csi2_sd, core, s_power, 0);
533 /* disable capture, disable interrupts */ 563 /* disable capture, disable interrupts */
534 ceu_write(pcdev, CEIER, 0); 564 ceu_write(pcdev, CEIER, 0);
535 sh_mobile_ceu_soft_reset(pcdev); 565 sh_mobile_ceu_soft_reset(pcdev);
@@ -545,7 +575,7 @@ static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
545 575
546 pm_runtime_put_sync(ici->v4l2_dev.dev); 576 pm_runtime_put_sync(ici->v4l2_dev.dev);
547 577
548 dev_info(icd->dev.parent, 578 dev_info(icd->parent,
549 "SuperH Mobile CEU driver detached from camera %d\n", 579 "SuperH Mobile CEU driver detached from camera %d\n",
550 icd->devnum); 580 icd->devnum);
551 581
@@ -585,14 +615,14 @@ static u16 calc_scale(unsigned int src, unsigned int *dst)
585/* rect is guaranteed to not exceed the scaled camera rectangle */ 615/* rect is guaranteed to not exceed the scaled camera rectangle */
586static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd) 616static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
587{ 617{
588 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 618 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
589 struct sh_mobile_ceu_cam *cam = icd->host_priv; 619 struct sh_mobile_ceu_cam *cam = icd->host_priv;
590 struct sh_mobile_ceu_dev *pcdev = ici->priv; 620 struct sh_mobile_ceu_dev *pcdev = ici->priv;
591 unsigned int height, width, cdwdr_width, in_width, in_height; 621 unsigned int height, width, cdwdr_width, in_width, in_height;
592 unsigned int left_offset, top_offset; 622 unsigned int left_offset, top_offset;
593 u32 camor; 623 u32 camor;
594 624
595 dev_geo(icd->dev.parent, "Crop %ux%u@%u:%u\n", 625 dev_geo(icd->parent, "Crop %ux%u@%u:%u\n",
596 icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top); 626 icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top);
597 627
598 left_offset = cam->ceu_left; 628 left_offset = cam->ceu_left;
@@ -641,7 +671,7 @@ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
641 } 671 }
642 672
643 /* CSI2 special configuration */ 673 /* CSI2 special configuration */
644 if (pcdev->pdata->csi2_dev) { 674 if (pcdev->pdata->csi2) {
645 in_width = ((in_width - 2) * 2); 675 in_width = ((in_width - 2) * 2);
646 left_offset *= 2; 676 left_offset *= 2;
647 } 677 }
@@ -649,7 +679,7 @@ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
649 /* Set CAMOR, CAPWR, CFSZR, take care of CDWDR */ 679 /* Set CAMOR, CAPWR, CFSZR, take care of CDWDR */
650 camor = left_offset | (top_offset << 16); 680 camor = left_offset | (top_offset << 16);
651 681
652 dev_geo(icd->dev.parent, 682 dev_geo(icd->parent,
653 "CAMOR 0x%x, CAPWR 0x%x, CFSZR 0x%x, CDWDR 0x%x\n", camor, 683 "CAMOR 0x%x, CAPWR 0x%x, CFSZR 0x%x, CDWDR 0x%x\n", camor,
654 (in_height << 16) | in_width, (height << 16) | width, 684 (in_height << 16) | in_width, (height << 16) | width,
655 cdwdr_width); 685 cdwdr_width);
@@ -697,7 +727,7 @@ static void capture_restore(struct sh_mobile_ceu_dev *pcdev, u32 capsr)
697static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd, 727static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
698 __u32 pixfmt) 728 __u32 pixfmt)
699{ 729{
700 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 730 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
701 struct sh_mobile_ceu_dev *pcdev = ici->priv; 731 struct sh_mobile_ceu_dev *pcdev = ici->priv;
702 int ret; 732 int ret;
703 unsigned long camera_flags, common_flags, value; 733 unsigned long camera_flags, common_flags, value;
@@ -783,7 +813,7 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
783 value |= pcdev->is_16bit ? 1 << 12 : 0; 813 value |= pcdev->is_16bit ? 1 << 12 : 0;
784 814
785 /* CSI2 mode */ 815 /* CSI2 mode */
786 if (pcdev->pdata->csi2_dev) 816 if (pcdev->pdata->csi2)
787 value |= 3 << 12; 817 value |= 3 << 12;
788 818
789 ceu_write(pcdev, CAMCR, value); 819 ceu_write(pcdev, CAMCR, value);
@@ -806,7 +836,7 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
806 sh_mobile_ceu_set_rect(icd); 836 sh_mobile_ceu_set_rect(icd);
807 mdelay(1); 837 mdelay(1);
808 838
809 dev_geo(icd->dev.parent, "CFLCR 0x%x\n", pcdev->cflcr); 839 dev_geo(icd->parent, "CFLCR 0x%x\n", pcdev->cflcr);
810 ceu_write(pcdev, CFLCR, pcdev->cflcr); 840 ceu_write(pcdev, CFLCR, pcdev->cflcr);
811 841
812 /* 842 /*
@@ -829,7 +859,7 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
829 ceu_write(pcdev, CDOCR, value); 859 ceu_write(pcdev, CDOCR, value);
830 ceu_write(pcdev, CFWCR, 0); /* keep "datafetch firewall" disabled */ 860 ceu_write(pcdev, CFWCR, 0); /* keep "datafetch firewall" disabled */
831 861
832 dev_dbg(icd->dev.parent, "S_FMT successful for %c%c%c%c %ux%u\n", 862 dev_dbg(icd->parent, "S_FMT successful for %c%c%c%c %ux%u\n",
833 pixfmt & 0xff, (pixfmt >> 8) & 0xff, 863 pixfmt & 0xff, (pixfmt >> 8) & 0xff,
834 (pixfmt >> 16) & 0xff, (pixfmt >> 24) & 0xff, 864 (pixfmt >> 16) & 0xff, (pixfmt >> 24) & 0xff,
835 icd->user_width, icd->user_height); 865 icd->user_width, icd->user_height);
@@ -843,7 +873,7 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
843static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd, 873static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd,
844 unsigned char buswidth) 874 unsigned char buswidth)
845{ 875{
846 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 876 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
847 struct sh_mobile_ceu_dev *pcdev = ici->priv; 877 struct sh_mobile_ceu_dev *pcdev = ici->priv;
848 unsigned long camera_flags, common_flags; 878 unsigned long camera_flags, common_flags;
849 879
@@ -901,7 +931,7 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
901 struct soc_camera_format_xlate *xlate) 931 struct soc_camera_format_xlate *xlate)
902{ 932{
903 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 933 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
904 struct device *dev = icd->dev.parent; 934 struct device *dev = icd->parent;
905 struct soc_camera_host *ici = to_soc_camera_host(dev); 935 struct soc_camera_host *ici = to_soc_camera_host(dev);
906 struct sh_mobile_ceu_dev *pcdev = ici->priv; 936 struct sh_mobile_ceu_dev *pcdev = ici->priv;
907 int ret, k, n; 937 int ret, k, n;
@@ -921,7 +951,7 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
921 return 0; 951 return 0;
922 } 952 }
923 953
924 if (!pcdev->pdata->csi2_dev) { 954 if (!pcdev->pdata->csi2) {
925 ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample); 955 ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample);
926 if (ret < 0) 956 if (ret < 0)
927 return 0; 957 return 0;
@@ -1244,7 +1274,7 @@ static int client_s_fmt(struct soc_camera_device *icd,
1244{ 1274{
1245 struct sh_mobile_ceu_cam *cam = icd->host_priv; 1275 struct sh_mobile_ceu_cam *cam = icd->host_priv;
1246 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 1276 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
1247 struct device *dev = icd->dev.parent; 1277 struct device *dev = icd->parent;
1248 unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h; 1278 unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h;
1249 unsigned int max_width, max_height; 1279 unsigned int max_width, max_height;
1250 struct v4l2_cropcap cap; 1280 struct v4l2_cropcap cap;
@@ -1313,7 +1343,7 @@ static int client_scale(struct soc_camera_device *icd,
1313 bool ceu_can_scale) 1343 bool ceu_can_scale)
1314{ 1344{
1315 struct sh_mobile_ceu_cam *cam = icd->host_priv; 1345 struct sh_mobile_ceu_cam *cam = icd->host_priv;
1316 struct device *dev = icd->dev.parent; 1346 struct device *dev = icd->parent;
1317 struct v4l2_mbus_framefmt mf_tmp = *mf; 1347 struct v4l2_mbus_framefmt mf_tmp = *mf;
1318 unsigned int scale_h, scale_v; 1348 unsigned int scale_h, scale_v;
1319 int ret; 1349 int ret;
@@ -1363,13 +1393,13 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
1363 struct v4l2_crop *a) 1393 struct v4l2_crop *a)
1364{ 1394{
1365 struct v4l2_rect *rect = &a->c; 1395 struct v4l2_rect *rect = &a->c;
1366 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 1396 struct device *dev = icd->parent;
1397 struct soc_camera_host *ici = to_soc_camera_host(dev);
1367 struct sh_mobile_ceu_dev *pcdev = ici->priv; 1398 struct sh_mobile_ceu_dev *pcdev = ici->priv;
1368 struct v4l2_crop cam_crop; 1399 struct v4l2_crop cam_crop;
1369 struct sh_mobile_ceu_cam *cam = icd->host_priv; 1400 struct sh_mobile_ceu_cam *cam = icd->host_priv;
1370 struct v4l2_rect *cam_rect = &cam_crop.c; 1401 struct v4l2_rect *cam_rect = &cam_crop.c;
1371 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 1402 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
1372 struct device *dev = icd->dev.parent;
1373 struct v4l2_mbus_framefmt mf; 1403 struct v4l2_mbus_framefmt mf;
1374 unsigned int scale_cam_h, scale_cam_v, scale_ceu_h, scale_ceu_v, 1404 unsigned int scale_cam_h, scale_cam_v, scale_ceu_h, scale_ceu_v,
1375 out_width, out_height; 1405 out_width, out_height;
@@ -1511,7 +1541,7 @@ static void calculate_client_output(struct soc_camera_device *icd,
1511 struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf) 1541 struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf)
1512{ 1542{
1513 struct sh_mobile_ceu_cam *cam = icd->host_priv; 1543 struct sh_mobile_ceu_cam *cam = icd->host_priv;
1514 struct device *dev = icd->dev.parent; 1544 struct device *dev = icd->parent;
1515 struct v4l2_rect *cam_subrect = &cam->subrect; 1545 struct v4l2_rect *cam_subrect = &cam->subrect;
1516 unsigned int scale_v, scale_h; 1546 unsigned int scale_v, scale_h;
1517 1547
@@ -1555,12 +1585,12 @@ static void calculate_client_output(struct soc_camera_device *icd,
1555static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd, 1585static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
1556 struct v4l2_format *f) 1586 struct v4l2_format *f)
1557{ 1587{
1558 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 1588 struct device *dev = icd->parent;
1589 struct soc_camera_host *ici = to_soc_camera_host(dev);
1559 struct sh_mobile_ceu_dev *pcdev = ici->priv; 1590 struct sh_mobile_ceu_dev *pcdev = ici->priv;
1560 struct sh_mobile_ceu_cam *cam = icd->host_priv; 1591 struct sh_mobile_ceu_cam *cam = icd->host_priv;
1561 struct v4l2_pix_format *pix = &f->fmt.pix; 1592 struct v4l2_pix_format *pix = &f->fmt.pix;
1562 struct v4l2_mbus_framefmt mf; 1593 struct v4l2_mbus_framefmt mf;
1563 struct device *dev = icd->dev.parent;
1564 __u32 pixfmt = pix->pixelformat; 1594 __u32 pixfmt = pix->pixelformat;
1565 const struct soc_camera_format_xlate *xlate; 1595 const struct soc_camera_format_xlate *xlate;
1566 /* Keep Compiler Happy */ 1596 /* Keep Compiler Happy */
@@ -1684,12 +1714,12 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
1684 int width, height; 1714 int width, height;
1685 int ret; 1715 int ret;
1686 1716
1687 dev_geo(icd->dev.parent, "TRY_FMT(pix=0x%x, %ux%u)\n", 1717 dev_geo(icd->parent, "TRY_FMT(pix=0x%x, %ux%u)\n",
1688 pixfmt, pix->width, pix->height); 1718 pixfmt, pix->width, pix->height);
1689 1719
1690 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); 1720 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
1691 if (!xlate) { 1721 if (!xlate) {
1692 dev_warn(icd->dev.parent, "Format %x not found\n", pixfmt); 1722 dev_warn(icd->parent, "Format %x not found\n", pixfmt);
1693 return -EINVAL; 1723 return -EINVAL;
1694 } 1724 }
1695 1725
@@ -1701,11 +1731,6 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
1701 width = pix->width; 1731 width = pix->width;
1702 height = pix->height; 1732 height = pix->height;
1703 1733
1704 pix->bytesperline = soc_mbus_bytes_per_line(width, xlate->host_fmt);
1705 if ((int)pix->bytesperline < 0)
1706 return pix->bytesperline;
1707 pix->sizeimage = height * pix->bytesperline;
1708
1709 /* limit to sensor capabilities */ 1734 /* limit to sensor capabilities */
1710 mf.width = pix->width; 1735 mf.width = pix->width;
1711 mf.height = pix->height; 1736 mf.height = pix->height;
@@ -1741,7 +1766,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
1741 try_mbus_fmt, &mf); 1766 try_mbus_fmt, &mf);
1742 if (ret < 0) { 1767 if (ret < 0) {
1743 /* Shouldn't actually happen... */ 1768 /* Shouldn't actually happen... */
1744 dev_err(icd->dev.parent, 1769 dev_err(icd->parent,
1745 "FIXME: client try_fmt() = %d\n", ret); 1770 "FIXME: client try_fmt() = %d\n", ret);
1746 return ret; 1771 return ret;
1747 } 1772 }
@@ -1753,7 +1778,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
1753 pix->height = height; 1778 pix->height = height;
1754 } 1779 }
1755 1780
1756 dev_geo(icd->dev.parent, "%s(): return %d, fmt 0x%x, %ux%u\n", 1781 dev_geo(icd->parent, "%s(): return %d, fmt 0x%x, %ux%u\n",
1757 __func__, ret, pix->pixelformat, pix->width, pix->height); 1782 __func__, ret, pix->pixelformat, pix->width, pix->height);
1758 1783
1759 return ret; 1784 return ret;
@@ -1763,7 +1788,7 @@ static int sh_mobile_ceu_set_livecrop(struct soc_camera_device *icd,
1763 struct v4l2_crop *a) 1788 struct v4l2_crop *a)
1764{ 1789{
1765 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 1790 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
1766 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 1791 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
1767 struct sh_mobile_ceu_dev *pcdev = ici->priv; 1792 struct sh_mobile_ceu_dev *pcdev = ici->priv;
1768 u32 out_width = icd->user_width, out_height = icd->user_height; 1793 u32 out_width = icd->user_width, out_height = icd->user_height;
1769 int ret; 1794 int ret;
@@ -1775,13 +1800,13 @@ static int sh_mobile_ceu_set_livecrop(struct soc_camera_device *icd,
1775 /* Stop the client */ 1800 /* Stop the client */
1776 ret = v4l2_subdev_call(sd, video, s_stream, 0); 1801 ret = v4l2_subdev_call(sd, video, s_stream, 0);
1777 if (ret < 0) 1802 if (ret < 0)
1778 dev_warn(icd->dev.parent, 1803 dev_warn(icd->parent,
1779 "Client failed to stop the stream: %d\n", ret); 1804 "Client failed to stop the stream: %d\n", ret);
1780 else 1805 else
1781 /* Do the crop, if it fails, there's nothing more we can do */ 1806 /* Do the crop, if it fails, there's nothing more we can do */
1782 sh_mobile_ceu_set_crop(icd, a); 1807 sh_mobile_ceu_set_crop(icd, a);
1783 1808
1784 dev_geo(icd->dev.parent, "Output after crop: %ux%u\n", icd->user_width, icd->user_height); 1809 dev_geo(icd->parent, "Output after crop: %ux%u\n", icd->user_width, icd->user_height);
1785 1810
1786 if (icd->user_width != out_width || icd->user_height != out_height) { 1811 if (icd->user_width != out_width || icd->user_height != out_height) {
1787 struct v4l2_format f = { 1812 struct v4l2_format f = {
@@ -1827,7 +1852,6 @@ static int sh_mobile_ceu_querycap(struct soc_camera_host *ici,
1827 struct v4l2_capability *cap) 1852 struct v4l2_capability *cap)
1828{ 1853{
1829 strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card)); 1854 strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card));
1830 cap->version = KERNEL_VERSION(0, 0, 5);
1831 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 1855 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
1832 return 0; 1856 return 0;
1833} 1857}
@@ -1848,7 +1872,7 @@ static int sh_mobile_ceu_init_videobuf(struct vb2_queue *q,
1848static int sh_mobile_ceu_get_ctrl(struct soc_camera_device *icd, 1872static int sh_mobile_ceu_get_ctrl(struct soc_camera_device *icd,
1849 struct v4l2_control *ctrl) 1873 struct v4l2_control *ctrl)
1850{ 1874{
1851 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 1875 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
1852 struct sh_mobile_ceu_dev *pcdev = ici->priv; 1876 struct sh_mobile_ceu_dev *pcdev = ici->priv;
1853 u32 val; 1877 u32 val;
1854 1878
@@ -1864,7 +1888,7 @@ static int sh_mobile_ceu_get_ctrl(struct soc_camera_device *icd,
1864static int sh_mobile_ceu_set_ctrl(struct soc_camera_device *icd, 1888static int sh_mobile_ceu_set_ctrl(struct soc_camera_device *icd,
1865 struct v4l2_control *ctrl) 1889 struct v4l2_control *ctrl)
1866{ 1890{
1867 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 1891 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
1868 struct sh_mobile_ceu_dev *pcdev = ici->priv; 1892 struct sh_mobile_ceu_dev *pcdev = ici->priv;
1869 1893
1870 switch (ctrl->id) { 1894 switch (ctrl->id) {
@@ -1950,7 +1974,7 @@ static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev)
1950 .completion = COMPLETION_INITIALIZER_ONSTACK(wait.completion), 1974 .completion = COMPLETION_INITIALIZER_ONSTACK(wait.completion),
1951 .notifier.notifier_call = bus_notify, 1975 .notifier.notifier_call = bus_notify,
1952 }; 1976 };
1953 struct device *csi2; 1977 struct sh_mobile_ceu_companion *csi2;
1954 1978
1955 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1979 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1956 irq = platform_get_irq(pdev, 0); 1980 irq = platform_get_irq(pdev, 0);
@@ -2023,26 +2047,61 @@ static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev)
2023 pcdev->ici.drv_name = dev_name(&pdev->dev); 2047 pcdev->ici.drv_name = dev_name(&pdev->dev);
2024 pcdev->ici.ops = &sh_mobile_ceu_host_ops; 2048 pcdev->ici.ops = &sh_mobile_ceu_host_ops;
2025 2049
2050 pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
2051 if (IS_ERR(pcdev->alloc_ctx)) {
2052 err = PTR_ERR(pcdev->alloc_ctx);
2053 goto exit_free_clk;
2054 }
2055
2056 err = soc_camera_host_register(&pcdev->ici);
2057 if (err)
2058 goto exit_free_ctx;
2059
2026 /* CSI2 interfacing */ 2060 /* CSI2 interfacing */
2027 csi2 = pcdev->pdata->csi2_dev; 2061 csi2 = pcdev->pdata->csi2;
2028 if (csi2) { 2062 if (csi2) {
2029 wait.dev = csi2; 2063 struct platform_device *csi2_pdev =
2064 platform_device_alloc("sh-mobile-csi2", csi2->id);
2065 struct sh_csi2_pdata *csi2_pdata = csi2->platform_data;
2066
2067 if (!csi2_pdev) {
2068 err = -ENOMEM;
2069 goto exit_host_unregister;
2070 }
2071
2072 pcdev->csi2_pdev = csi2_pdev;
2073
2074 err = platform_device_add_data(csi2_pdev, csi2_pdata, sizeof(*csi2_pdata));
2075 if (err < 0)
2076 goto exit_pdev_put;
2077
2078 csi2_pdata = csi2_pdev->dev.platform_data;
2079 csi2_pdata->v4l2_dev = &pcdev->ici.v4l2_dev;
2080
2081 csi2_pdev->resource = csi2->resource;
2082 csi2_pdev->num_resources = csi2->num_resources;
2083
2084 err = platform_device_add(csi2_pdev);
2085 if (err < 0)
2086 goto exit_pdev_put;
2087
2088 wait.dev = &csi2_pdev->dev;
2030 2089
2031 err = bus_register_notifier(&platform_bus_type, &wait.notifier); 2090 err = bus_register_notifier(&platform_bus_type, &wait.notifier);
2032 if (err < 0) 2091 if (err < 0)
2033 goto exit_free_clk; 2092 goto exit_pdev_unregister;
2034 2093
2035 /* 2094 /*
2036 * From this point the driver module will not unload, until 2095 * From this point the driver module will not unload, until
2037 * we complete the completion. 2096 * we complete the completion.
2038 */ 2097 */
2039 2098
2040 if (!csi2->driver) { 2099 if (!csi2_pdev->dev.driver) {
2041 complete(&wait.completion); 2100 complete(&wait.completion);
2042 /* Either too late, or probing failed */ 2101 /* Either too late, or probing failed */
2043 bus_unregister_notifier(&platform_bus_type, &wait.notifier); 2102 bus_unregister_notifier(&platform_bus_type, &wait.notifier);
2044 err = -ENXIO; 2103 err = -ENXIO;
2045 goto exit_free_clk; 2104 goto exit_pdev_unregister;
2046 } 2105 }
2047 2106
2048 /* 2107 /*
@@ -2051,34 +2110,28 @@ static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev)
2051 * the "owner" is safe! 2110 * the "owner" is safe!
2052 */ 2111 */
2053 2112
2054 err = try_module_get(csi2->driver->owner); 2113 err = try_module_get(csi2_pdev->dev.driver->owner);
2055 2114
2056 /* Let notifier complete, if it has been locked */ 2115 /* Let notifier complete, if it has been locked */
2057 complete(&wait.completion); 2116 complete(&wait.completion);
2058 bus_unregister_notifier(&platform_bus_type, &wait.notifier); 2117 bus_unregister_notifier(&platform_bus_type, &wait.notifier);
2059 if (!err) { 2118 if (!err) {
2060 err = -ENODEV; 2119 err = -ENODEV;
2061 goto exit_free_clk; 2120 goto exit_pdev_unregister;
2062 } 2121 }
2063 } 2122 }
2064 2123
2065 pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
2066 if (IS_ERR(pcdev->alloc_ctx)) {
2067 err = PTR_ERR(pcdev->alloc_ctx);
2068 goto exit_module_put;
2069 }
2070
2071 err = soc_camera_host_register(&pcdev->ici);
2072 if (err)
2073 goto exit_free_ctx;
2074
2075 return 0; 2124 return 0;
2076 2125
2126exit_pdev_unregister:
2127 platform_device_del(pcdev->csi2_pdev);
2128exit_pdev_put:
2129 pcdev->csi2_pdev->resource = NULL;
2130 platform_device_put(pcdev->csi2_pdev);
2131exit_host_unregister:
2132 soc_camera_host_unregister(&pcdev->ici);
2077exit_free_ctx: 2133exit_free_ctx:
2078 vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx); 2134 vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
2079exit_module_put:
2080 if (csi2 && csi2->driver)
2081 module_put(csi2->driver->owner);
2082exit_free_clk: 2135exit_free_clk:
2083 pm_runtime_disable(&pdev->dev); 2136 pm_runtime_disable(&pdev->dev);
2084 free_irq(pcdev->irq, pcdev); 2137 free_irq(pcdev->irq, pcdev);
@@ -2098,7 +2151,7 @@ static int __devexit sh_mobile_ceu_remove(struct platform_device *pdev)
2098 struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev); 2151 struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
2099 struct sh_mobile_ceu_dev *pcdev = container_of(soc_host, 2152 struct sh_mobile_ceu_dev *pcdev = container_of(soc_host,
2100 struct sh_mobile_ceu_dev, ici); 2153 struct sh_mobile_ceu_dev, ici);
2101 struct device *csi2 = pcdev->pdata->csi2_dev; 2154 struct platform_device *csi2_pdev = pcdev->csi2_pdev;
2102 2155
2103 soc_camera_host_unregister(soc_host); 2156 soc_camera_host_unregister(soc_host);
2104 pm_runtime_disable(&pdev->dev); 2157 pm_runtime_disable(&pdev->dev);
@@ -2107,8 +2160,13 @@ static int __devexit sh_mobile_ceu_remove(struct platform_device *pdev)
2107 dma_release_declared_memory(&pdev->dev); 2160 dma_release_declared_memory(&pdev->dev);
2108 iounmap(pcdev->base); 2161 iounmap(pcdev->base);
2109 vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx); 2162 vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
2110 if (csi2 && csi2->driver) 2163 if (csi2_pdev && csi2_pdev->dev.driver) {
2111 module_put(csi2->driver->owner); 2164 struct module *csi2_drv = csi2_pdev->dev.driver->owner;
2165 platform_device_del(csi2_pdev);
2166 csi2_pdev->resource = NULL;
2167 platform_device_put(csi2_pdev);
2168 module_put(csi2_drv);
2169 }
2112 kfree(pcdev); 2170 kfree(pcdev);
2113 2171
2114 return 0; 2172 return 0;
@@ -2158,4 +2216,5 @@ module_exit(sh_mobile_ceu_exit);
2158MODULE_DESCRIPTION("SuperH Mobile CEU driver"); 2216MODULE_DESCRIPTION("SuperH Mobile CEU driver");
2159MODULE_AUTHOR("Magnus Damm"); 2217MODULE_AUTHOR("Magnus Damm");
2160MODULE_LICENSE("GPL"); 2218MODULE_LICENSE("GPL");
2219MODULE_VERSION("0.0.6");
2161MODULE_ALIAS("platform:sh_mobile_ceu"); 2220MODULE_ALIAS("platform:sh_mobile_ceu");
diff --git a/drivers/media/video/sh_mobile_csi2.c b/drivers/media/video/sh_mobile_csi2.c
index 98b87481fa94..2893a0134c7e 100644
--- a/drivers/media/video/sh_mobile_csi2.c
+++ b/drivers/media/video/sh_mobile_csi2.c
@@ -16,6 +16,7 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/videodev2.h> 17#include <linux/videodev2.h>
18 18
19#include <media/sh_mobile_ceu.h>
19#include <media/sh_mobile_csi2.h> 20#include <media/sh_mobile_csi2.h>
20#include <media/soc_camera.h> 21#include <media/soc_camera.h>
21#include <media/v4l2-common.h> 22#include <media/v4l2-common.h>
@@ -33,7 +34,6 @@
33struct sh_csi2 { 34struct sh_csi2 {
34 struct v4l2_subdev subdev; 35 struct v4l2_subdev subdev;
35 struct list_head list; 36 struct list_head list;
36 struct notifier_block notifier;
37 unsigned int irq; 37 unsigned int irq;
38 void __iomem *base; 38 void __iomem *base;
39 struct platform_device *pdev; 39 struct platform_device *pdev;
@@ -132,13 +132,6 @@ static struct v4l2_subdev_video_ops sh_csi2_subdev_video_ops = {
132 .try_mbus_fmt = sh_csi2_try_fmt, 132 .try_mbus_fmt = sh_csi2_try_fmt,
133}; 133};
134 134
135static struct v4l2_subdev_core_ops sh_csi2_subdev_core_ops;
136
137static struct v4l2_subdev_ops sh_csi2_subdev_ops = {
138 .core = &sh_csi2_subdev_core_ops,
139 .video = &sh_csi2_subdev_video_ops,
140};
141
142static void sh_csi2_hwinit(struct sh_csi2 *priv) 135static void sh_csi2_hwinit(struct sh_csi2 *priv)
143{ 136{
144 struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data; 137 struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
@@ -186,65 +179,84 @@ static unsigned long sh_csi2_query_bus_param(struct soc_camera_device *icd)
186 return soc_camera_apply_sensor_flags(icl, flags); 179 return soc_camera_apply_sensor_flags(icl, flags);
187} 180}
188 181
189static int sh_csi2_notify(struct notifier_block *nb, 182static int sh_csi2_client_connect(struct sh_csi2 *priv)
190 unsigned long action, void *data)
191{ 183{
192 struct device *dev = data;
193 struct soc_camera_device *icd = to_soc_camera_dev(dev);
194 struct v4l2_device *v4l2_dev = dev_get_drvdata(dev->parent);
195 struct sh_csi2 *priv =
196 container_of(nb, struct sh_csi2, notifier);
197 struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data; 184 struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
198 int ret, i; 185 struct v4l2_subdev *sd, *csi2_sd = &priv->subdev;
186 struct soc_camera_device *icd = NULL;
187 struct device *dev = v4l2_get_subdevdata(&priv->subdev);
188 int i;
189
190 v4l2_device_for_each_subdev(sd, csi2_sd->v4l2_dev)
191 if (sd->grp_id) {
192 icd = (struct soc_camera_device *)sd->grp_id;
193 break;
194 }
195
196 if (!icd)
197 return -EINVAL;
199 198
200 for (i = 0; i < pdata->num_clients; i++) 199 for (i = 0; i < pdata->num_clients; i++)
201 if (&pdata->clients[i].pdev->dev == icd->pdev) 200 if (&pdata->clients[i].pdev->dev == icd->pdev)
202 break; 201 break;
203 202
204 dev_dbg(dev, "%s(%p): action = %lu, found #%d\n", __func__, dev, action, i); 203 dev_dbg(dev, "%s(%p): found #%d\n", __func__, dev, i);
205 204
206 if (i == pdata->num_clients) 205 if (i == pdata->num_clients)
207 return NOTIFY_DONE; 206 return -ENODEV;
208 207
209 switch (action) { 208 priv->client = pdata->clients + i;
210 case BUS_NOTIFY_BOUND_DRIVER:
211 snprintf(priv->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s%s",
212 dev_name(v4l2_dev->dev), ".mipi-csi");
213 priv->subdev.grp_id = (long)icd;
214 ret = v4l2_device_register_subdev(v4l2_dev, &priv->subdev);
215 dev_dbg(dev, "%s(%p): ret(register_subdev) = %d\n", __func__, priv, ret);
216 if (ret < 0)
217 return NOTIFY_DONE;
218 209
219 priv->client = pdata->clients + i; 210 priv->set_bus_param = icd->ops->set_bus_param;
211 priv->query_bus_param = icd->ops->query_bus_param;
212 icd->ops->set_bus_param = sh_csi2_set_bus_param;
213 icd->ops->query_bus_param = sh_csi2_query_bus_param;
220 214
221 priv->set_bus_param = icd->ops->set_bus_param; 215 csi2_sd->grp_id = (long)icd;
222 priv->query_bus_param = icd->ops->query_bus_param;
223 icd->ops->set_bus_param = sh_csi2_set_bus_param;
224 icd->ops->query_bus_param = sh_csi2_query_bus_param;
225 216
226 pm_runtime_get_sync(v4l2_get_subdevdata(&priv->subdev)); 217 pm_runtime_get_sync(dev);
227 218
228 sh_csi2_hwinit(priv); 219 sh_csi2_hwinit(priv);
229 break;
230 case BUS_NOTIFY_UNBIND_DRIVER:
231 priv->client = NULL;
232 220
233 /* Driver is about to be unbound */ 221 return 0;
234 icd->ops->set_bus_param = priv->set_bus_param; 222}
235 icd->ops->query_bus_param = priv->query_bus_param;
236 priv->set_bus_param = NULL;
237 priv->query_bus_param = NULL;
238 223
239 v4l2_device_unregister_subdev(&priv->subdev); 224static void sh_csi2_client_disconnect(struct sh_csi2 *priv)
225{
226 struct soc_camera_device *icd = (struct soc_camera_device *)priv->subdev.grp_id;
240 227
241 pm_runtime_put(v4l2_get_subdevdata(&priv->subdev)); 228 priv->client = NULL;
242 break; 229 priv->subdev.grp_id = 0;
243 }
244 230
245 return NOTIFY_OK; 231 /* Driver is about to be unbound */
232 icd->ops->set_bus_param = priv->set_bus_param;
233 icd->ops->query_bus_param = priv->query_bus_param;
234 priv->set_bus_param = NULL;
235 priv->query_bus_param = NULL;
236
237 pm_runtime_put(v4l2_get_subdevdata(&priv->subdev));
246} 238}
247 239
240static int sh_csi2_s_power(struct v4l2_subdev *sd, int on)
241{
242 struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
243
244 if (on)
245 return sh_csi2_client_connect(priv);
246
247 sh_csi2_client_disconnect(priv);
248 return 0;
249}
250
251static struct v4l2_subdev_core_ops sh_csi2_subdev_core_ops = {
252 .s_power = sh_csi2_s_power,
253};
254
255static struct v4l2_subdev_ops sh_csi2_subdev_ops = {
256 .core = &sh_csi2_subdev_core_ops,
257 .video = &sh_csi2_subdev_video_ops,
258};
259
248static __devinit int sh_csi2_probe(struct platform_device *pdev) 260static __devinit int sh_csi2_probe(struct platform_device *pdev)
249{ 261{
250 struct resource *res; 262 struct resource *res;
@@ -274,14 +286,6 @@ static __devinit int sh_csi2_probe(struct platform_device *pdev)
274 return -ENOMEM; 286 return -ENOMEM;
275 287
276 priv->irq = irq; 288 priv->irq = irq;
277 priv->notifier.notifier_call = sh_csi2_notify;
278
279 /* We MUST attach after the MIPI sensor */
280 ret = bus_register_notifier(&soc_camera_bus_type, &priv->notifier);
281 if (ret < 0) {
282 dev_err(&pdev->dev, "CSI2 cannot register notifier\n");
283 goto ernotify;
284 }
285 289
286 if (!request_mem_region(res->start, resource_size(res), pdev->name)) { 290 if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
287 dev_err(&pdev->dev, "CSI2 register region already claimed\n"); 291 dev_err(&pdev->dev, "CSI2 register region already claimed\n");
@@ -297,11 +301,17 @@ static __devinit int sh_csi2_probe(struct platform_device *pdev)
297 } 301 }
298 302
299 priv->pdev = pdev; 303 priv->pdev = pdev;
304 platform_set_drvdata(pdev, priv);
300 305
301 v4l2_subdev_init(&priv->subdev, &sh_csi2_subdev_ops); 306 v4l2_subdev_init(&priv->subdev, &sh_csi2_subdev_ops);
302 v4l2_set_subdevdata(&priv->subdev, &pdev->dev); 307 v4l2_set_subdevdata(&priv->subdev, &pdev->dev);
303 308
304 platform_set_drvdata(pdev, priv); 309 snprintf(priv->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s.mipi-csi",
310 dev_name(pdata->v4l2_dev->dev));
311 ret = v4l2_device_register_subdev(pdata->v4l2_dev, &priv->subdev);
312 dev_dbg(&pdev->dev, "%s(%p): ret(register_subdev) = %d\n", __func__, priv, ret);
313 if (ret < 0)
314 goto esdreg;
305 315
306 pm_runtime_enable(&pdev->dev); 316 pm_runtime_enable(&pdev->dev);
307 317
@@ -309,11 +319,11 @@ static __devinit int sh_csi2_probe(struct platform_device *pdev)
309 319
310 return 0; 320 return 0;
311 321
322esdreg:
323 iounmap(priv->base);
312eremap: 324eremap:
313 release_mem_region(res->start, resource_size(res)); 325 release_mem_region(res->start, resource_size(res));
314ereqreg: 326ereqreg:
315 bus_unregister_notifier(&soc_camera_bus_type, &priv->notifier);
316ernotify:
317 kfree(priv); 327 kfree(priv);
318 328
319 return ret; 329 return ret;
@@ -324,7 +334,7 @@ static __devexit int sh_csi2_remove(struct platform_device *pdev)
324 struct sh_csi2 *priv = platform_get_drvdata(pdev); 334 struct sh_csi2 *priv = platform_get_drvdata(pdev);
325 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 335 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
326 336
327 bus_unregister_notifier(&soc_camera_bus_type, &priv->notifier); 337 v4l2_device_unregister_subdev(&priv->subdev);
328 pm_runtime_disable(&pdev->dev); 338 pm_runtime_disable(&pdev->dev);
329 iounmap(priv->base); 339 iounmap(priv->base);
330 release_mem_region(res->start, resource_size(res)); 340 release_mem_region(res->start, resource_size(res));
@@ -335,8 +345,9 @@ static __devexit int sh_csi2_remove(struct platform_device *pdev)
335} 345}
336 346
337static struct platform_driver __refdata sh_csi2_pdrv = { 347static struct platform_driver __refdata sh_csi2_pdrv = {
338 .remove = __devexit_p(sh_csi2_remove), 348 .remove = __devexit_p(sh_csi2_remove),
339 .driver = { 349 .probe = sh_csi2_probe,
350 .driver = {
340 .name = "sh-mobile-csi2", 351 .name = "sh-mobile-csi2",
341 .owner = THIS_MODULE, 352 .owner = THIS_MODULE,
342 }, 353 },
@@ -344,7 +355,7 @@ static struct platform_driver __refdata sh_csi2_pdrv = {
344 355
345static int __init sh_csi2_init(void) 356static int __init sh_csi2_init(void)
346{ 357{
347 return platform_driver_probe(&sh_csi2_pdrv, sh_csi2_probe); 358 return platform_driver_register(&sh_csi2_pdrv);
348} 359}
349 360
350static void __exit sh_csi2_exit(void) 361static void __exit sh_csi2_exit(void)
diff --git a/drivers/media/video/sh_vou.c b/drivers/media/video/sh_vou.c
index 07cf0c6c7c1f..6a729879d89e 100644
--- a/drivers/media/video/sh_vou.c
+++ b/drivers/media/video/sh_vou.c
@@ -19,7 +19,6 @@
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/pm_runtime.h> 20#include <linux/pm_runtime.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/version.h>
23#include <linux/videodev2.h> 22#include <linux/videodev2.h>
24 23
25#include <media/sh_vou.h> 24#include <media/sh_vou.h>
@@ -393,7 +392,6 @@ static int sh_vou_querycap(struct file *file, void *priv,
393 dev_dbg(vou_file->vbq.dev, "%s()\n", __func__); 392 dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
394 393
395 strlcpy(cap->card, "SuperH VOU", sizeof(cap->card)); 394 strlcpy(cap->card, "SuperH VOU", sizeof(cap->card));
396 cap->version = KERNEL_VERSION(0, 1, 0);
397 cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; 395 cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
398 return 0; 396 return 0;
399} 397}
@@ -1490,4 +1488,5 @@ module_exit(sh_vou_exit);
1490MODULE_DESCRIPTION("SuperH VOU driver"); 1488MODULE_DESCRIPTION("SuperH VOU driver");
1491MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); 1489MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
1492MODULE_LICENSE("GPL v2"); 1490MODULE_LICENSE("GPL v2");
1491MODULE_VERSION("0.1.0");
1493MODULE_ALIAS("platform:sh-vou"); 1492MODULE_ALIAS("platform:sh-vou");
diff --git a/drivers/media/video/sn9c102/sn9c102.h b/drivers/media/video/sn9c102/sn9c102.h
index cbfc44433b99..22ea211ab54f 100644
--- a/drivers/media/video/sn9c102/sn9c102.h
+++ b/drivers/media/video/sn9c102/sn9c102.h
@@ -21,7 +21,6 @@
21#ifndef _SN9C102_H_ 21#ifndef _SN9C102_H_
22#define _SN9C102_H_ 22#define _SN9C102_H_
23 23
24#include <linux/version.h>
25#include <linux/usb.h> 24#include <linux/usb.h>
26#include <linux/videodev2.h> 25#include <linux/videodev2.h>
27#include <media/v4l2-common.h> 26#include <media/v4l2-common.h>
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index 0e07c493e6f0..16cb07c5c27b 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -33,6 +33,7 @@
33#include <linux/stat.h> 33#include <linux/stat.h>
34#include <linux/mm.h> 34#include <linux/mm.h>
35#include <linux/vmalloc.h> 35#include <linux/vmalloc.h>
36#include <linux/version.h>
36#include <linux/page-flags.h> 37#include <linux/page-flags.h>
37#include <asm/byteorder.h> 38#include <asm/byteorder.h>
38#include <asm/page.h> 39#include <asm/page.h>
@@ -47,8 +48,7 @@
47#define SN9C102_MODULE_AUTHOR "(C) 2004-2007 Luca Risolia" 48#define SN9C102_MODULE_AUTHOR "(C) 2004-2007 Luca Risolia"
48#define SN9C102_AUTHOR_EMAIL "<luca.risolia@studio.unibo.it>" 49#define SN9C102_AUTHOR_EMAIL "<luca.risolia@studio.unibo.it>"
49#define SN9C102_MODULE_LICENSE "GPL" 50#define SN9C102_MODULE_LICENSE "GPL"
50#define SN9C102_MODULE_VERSION "1:1.47pre49" 51#define SN9C102_MODULE_VERSION "1:1.48"
51#define SN9C102_MODULE_VERSION_CODE KERNEL_VERSION(1, 1, 47)
52 52
53/*****************************************************************************/ 53/*****************************************************************************/
54 54
@@ -2158,7 +2158,7 @@ sn9c102_vidioc_querycap(struct sn9c102_device* cam, void __user * arg)
2158{ 2158{
2159 struct v4l2_capability cap = { 2159 struct v4l2_capability cap = {
2160 .driver = "sn9c102", 2160 .driver = "sn9c102",
2161 .version = SN9C102_MODULE_VERSION_CODE, 2161 .version = LINUX_VERSION_CODE,
2162 .capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | 2162 .capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
2163 V4L2_CAP_STREAMING, 2163 V4L2_CAP_STREAMING,
2164 }; 2164 };
@@ -3187,16 +3187,8 @@ static long sn9c102_ioctl_v4l2(struct file *filp,
3187 case VIDIOC_S_AUDIO: 3187 case VIDIOC_S_AUDIO:
3188 return sn9c102_vidioc_s_audio(cam, arg); 3188 return sn9c102_vidioc_s_audio(cam, arg);
3189 3189
3190 case VIDIOC_G_STD:
3191 case VIDIOC_S_STD:
3192 case VIDIOC_QUERYSTD:
3193 case VIDIOC_ENUMSTD:
3194 case VIDIOC_QUERYMENU:
3195 case VIDIOC_ENUM_FRAMEINTERVALS:
3196 return -EINVAL;
3197
3198 default: 3190 default:
3199 return -EINVAL; 3191 return -ENOTTY;
3200 3192
3201 } 3193 }
3202} 3194}
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index 4e4d4122d9a6..5bdfe7e16bc1 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -60,14 +60,14 @@ static int soc_camera_power_set(struct soc_camera_device *icd,
60 ret = regulator_bulk_enable(icl->num_regulators, 60 ret = regulator_bulk_enable(icl->num_regulators,
61 icl->regulators); 61 icl->regulators);
62 if (ret < 0) { 62 if (ret < 0) {
63 dev_err(&icd->dev, "Cannot enable regulators\n"); 63 dev_err(icd->pdev, "Cannot enable regulators\n");
64 return ret; 64 return ret;
65 } 65 }
66 66
67 if (icl->power) 67 if (icl->power)
68 ret = icl->power(icd->pdev, power_on); 68 ret = icl->power(icd->pdev, power_on);
69 if (ret < 0) { 69 if (ret < 0) {
70 dev_err(&icd->dev, 70 dev_err(icd->pdev,
71 "Platform failed to power-on the camera.\n"); 71 "Platform failed to power-on the camera.\n");
72 72
73 regulator_bulk_disable(icl->num_regulators, 73 regulator_bulk_disable(icl->num_regulators,
@@ -79,7 +79,7 @@ static int soc_camera_power_set(struct soc_camera_device *icd,
79 if (icl->power) 79 if (icl->power)
80 ret = icl->power(icd->pdev, 0); 80 ret = icl->power(icd->pdev, 0);
81 if (ret < 0) { 81 if (ret < 0) {
82 dev_err(&icd->dev, 82 dev_err(icd->pdev,
83 "Platform failed to power-off the camera.\n"); 83 "Platform failed to power-off the camera.\n");
84 return ret; 84 return ret;
85 } 85 }
@@ -87,7 +87,7 @@ static int soc_camera_power_set(struct soc_camera_device *icd,
87 ret = regulator_bulk_disable(icl->num_regulators, 87 ret = regulator_bulk_disable(icl->num_regulators,
88 icl->regulators); 88 icl->regulators);
89 if (ret < 0) { 89 if (ret < 0) {
90 dev_err(&icd->dev, "Cannot disable regulators\n"); 90 dev_err(icd->pdev, "Cannot disable regulators\n");
91 return ret; 91 return ret;
92 } 92 }
93 } 93 }
@@ -147,11 +147,11 @@ EXPORT_SYMBOL(soc_camera_apply_sensor_flags);
147static int soc_camera_try_fmt(struct soc_camera_device *icd, 147static int soc_camera_try_fmt(struct soc_camera_device *icd,
148 struct v4l2_format *f) 148 struct v4l2_format *f)
149{ 149{
150 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 150 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
151 struct v4l2_pix_format *pix = &f->fmt.pix; 151 struct v4l2_pix_format *pix = &f->fmt.pix;
152 int ret; 152 int ret;
153 153
154 dev_dbg(&icd->dev, "TRY_FMT(%c%c%c%c, %ux%u)\n", 154 dev_dbg(icd->pdev, "TRY_FMT(%c%c%c%c, %ux%u)\n",
155 pixfmtstr(pix->pixelformat), pix->width, pix->height); 155 pixfmtstr(pix->pixelformat), pix->width, pix->height);
156 156
157 pix->bytesperline = 0; 157 pix->bytesperline = 0;
@@ -199,22 +199,15 @@ static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv,
199static int soc_camera_enum_input(struct file *file, void *priv, 199static int soc_camera_enum_input(struct file *file, void *priv,
200 struct v4l2_input *inp) 200 struct v4l2_input *inp)
201{ 201{
202 struct soc_camera_device *icd = file->private_data;
203 int ret = 0;
204
205 if (inp->index != 0) 202 if (inp->index != 0)
206 return -EINVAL; 203 return -EINVAL;
207 204
208 if (icd->ops->enum_input) 205 /* default is camera */
209 ret = icd->ops->enum_input(icd, inp); 206 inp->type = V4L2_INPUT_TYPE_CAMERA;
210 else { 207 inp->std = V4L2_STD_UNKNOWN;
211 /* default is camera */ 208 strcpy(inp->name, "Camera");
212 inp->type = V4L2_INPUT_TYPE_CAMERA;
213 inp->std = V4L2_STD_UNKNOWN;
214 strcpy(inp->name, "Camera");
215 }
216 209
217 return ret; 210 return 0;
218} 211}
219 212
220static int soc_camera_g_input(struct file *file, void *priv, unsigned int *i) 213static int soc_camera_g_input(struct file *file, void *priv, unsigned int *i)
@@ -244,7 +237,7 @@ static int soc_camera_enum_fsizes(struct file *file, void *fh,
244 struct v4l2_frmsizeenum *fsize) 237 struct v4l2_frmsizeenum *fsize)
245{ 238{
246 struct soc_camera_device *icd = file->private_data; 239 struct soc_camera_device *icd = file->private_data;
247 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 240 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
248 241
249 return ici->ops->enum_fsizes(icd, fsize); 242 return ici->ops->enum_fsizes(icd, fsize);
250} 243}
@@ -254,7 +247,7 @@ static int soc_camera_reqbufs(struct file *file, void *priv,
254{ 247{
255 int ret; 248 int ret;
256 struct soc_camera_device *icd = file->private_data; 249 struct soc_camera_device *icd = file->private_data;
257 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 250 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
258 251
259 WARN_ON(priv != file->private_data); 252 WARN_ON(priv != file->private_data);
260 253
@@ -281,7 +274,7 @@ static int soc_camera_querybuf(struct file *file, void *priv,
281 struct v4l2_buffer *p) 274 struct v4l2_buffer *p)
282{ 275{
283 struct soc_camera_device *icd = file->private_data; 276 struct soc_camera_device *icd = file->private_data;
284 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 277 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
285 278
286 WARN_ON(priv != file->private_data); 279 WARN_ON(priv != file->private_data);
287 280
@@ -295,7 +288,7 @@ static int soc_camera_qbuf(struct file *file, void *priv,
295 struct v4l2_buffer *p) 288 struct v4l2_buffer *p)
296{ 289{
297 struct soc_camera_device *icd = file->private_data; 290 struct soc_camera_device *icd = file->private_data;
298 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 291 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
299 292
300 WARN_ON(priv != file->private_data); 293 WARN_ON(priv != file->private_data);
301 294
@@ -312,7 +305,7 @@ static int soc_camera_dqbuf(struct file *file, void *priv,
312 struct v4l2_buffer *p) 305 struct v4l2_buffer *p)
313{ 306{
314 struct soc_camera_device *icd = file->private_data; 307 struct soc_camera_device *icd = file->private_data;
315 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 308 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
316 309
317 WARN_ON(priv != file->private_data); 310 WARN_ON(priv != file->private_data);
318 311
@@ -329,7 +322,7 @@ static int soc_camera_dqbuf(struct file *file, void *priv,
329static int soc_camera_init_user_formats(struct soc_camera_device *icd) 322static int soc_camera_init_user_formats(struct soc_camera_device *icd)
330{ 323{
331 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 324 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
332 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 325 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
333 unsigned int i, fmts = 0, raw_fmts = 0; 326 unsigned int i, fmts = 0, raw_fmts = 0;
334 int ret; 327 int ret;
335 enum v4l2_mbus_pixelcode code; 328 enum v4l2_mbus_pixelcode code;
@@ -363,7 +356,7 @@ static int soc_camera_init_user_formats(struct soc_camera_device *icd)
363 if (!icd->user_formats) 356 if (!icd->user_formats)
364 return -ENOMEM; 357 return -ENOMEM;
365 358
366 dev_dbg(&icd->dev, "Found %d supported formats.\n", fmts); 359 dev_dbg(icd->pdev, "Found %d supported formats.\n", fmts);
367 360
368 /* Second pass - actually fill data formats */ 361 /* Second pass - actually fill data formats */
369 fmts = 0; 362 fmts = 0;
@@ -395,7 +388,7 @@ egfmt:
395/* Always entered with .video_lock held */ 388/* Always entered with .video_lock held */
396static void soc_camera_free_user_formats(struct soc_camera_device *icd) 389static void soc_camera_free_user_formats(struct soc_camera_device *icd)
397{ 390{
398 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 391 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
399 392
400 if (ici->ops->put_formats) 393 if (ici->ops->put_formats)
401 ici->ops->put_formats(icd); 394 ici->ops->put_formats(icd);
@@ -409,11 +402,11 @@ static void soc_camera_free_user_formats(struct soc_camera_device *icd)
409static int soc_camera_set_fmt(struct soc_camera_device *icd, 402static int soc_camera_set_fmt(struct soc_camera_device *icd,
410 struct v4l2_format *f) 403 struct v4l2_format *f)
411{ 404{
412 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 405 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
413 struct v4l2_pix_format *pix = &f->fmt.pix; 406 struct v4l2_pix_format *pix = &f->fmt.pix;
414 int ret; 407 int ret;
415 408
416 dev_dbg(&icd->dev, "S_FMT(%c%c%c%c, %ux%u)\n", 409 dev_dbg(icd->pdev, "S_FMT(%c%c%c%c, %ux%u)\n",
417 pixfmtstr(pix->pixelformat), pix->width, pix->height); 410 pixfmtstr(pix->pixelformat), pix->width, pix->height);
418 411
419 /* We always call try_fmt() before set_fmt() or set_crop() */ 412 /* We always call try_fmt() before set_fmt() or set_crop() */
@@ -426,7 +419,7 @@ static int soc_camera_set_fmt(struct soc_camera_device *icd,
426 return ret; 419 return ret;
427 } else if (!icd->current_fmt || 420 } else if (!icd->current_fmt ||
428 icd->current_fmt->host_fmt->fourcc != pix->pixelformat) { 421 icd->current_fmt->host_fmt->fourcc != pix->pixelformat) {
429 dev_err(&icd->dev, 422 dev_err(icd->pdev,
430 "Host driver hasn't set up current format correctly!\n"); 423 "Host driver hasn't set up current format correctly!\n");
431 return -EINVAL; 424 return -EINVAL;
432 } 425 }
@@ -440,7 +433,7 @@ static int soc_camera_set_fmt(struct soc_camera_device *icd,
440 if (ici->ops->init_videobuf) 433 if (ici->ops->init_videobuf)
441 icd->vb_vidq.field = pix->field; 434 icd->vb_vidq.field = pix->field;
442 435
443 dev_dbg(&icd->dev, "set width: %d height: %d\n", 436 dev_dbg(icd->pdev, "set width: %d height: %d\n",
444 icd->user_width, icd->user_height); 437 icd->user_width, icd->user_height);
445 438
446 /* set physical bus parameters */ 439 /* set physical bus parameters */
@@ -450,9 +443,7 @@ static int soc_camera_set_fmt(struct soc_camera_device *icd,
450static int soc_camera_open(struct file *file) 443static int soc_camera_open(struct file *file)
451{ 444{
452 struct video_device *vdev = video_devdata(file); 445 struct video_device *vdev = video_devdata(file);
453 struct soc_camera_device *icd = container_of(vdev->parent, 446 struct soc_camera_device *icd = dev_get_drvdata(vdev->parent);
454 struct soc_camera_device,
455 dev);
456 struct soc_camera_link *icl = to_soc_camera_link(icd); 447 struct soc_camera_link *icl = to_soc_camera_link(icd);
457 struct soc_camera_host *ici; 448 struct soc_camera_host *ici;
458 int ret; 449 int ret;
@@ -461,10 +452,10 @@ static int soc_camera_open(struct file *file)
461 /* No device driver attached */ 452 /* No device driver attached */
462 return -ENODEV; 453 return -ENODEV;
463 454
464 ici = to_soc_camera_host(icd->dev.parent); 455 ici = to_soc_camera_host(icd->parent);
465 456
466 if (!try_module_get(ici->ops->owner)) { 457 if (!try_module_get(ici->ops->owner)) {
467 dev_err(&icd->dev, "Couldn't lock capture bus driver.\n"); 458 dev_err(icd->pdev, "Couldn't lock capture bus driver.\n");
468 return -EINVAL; 459 return -EINVAL;
469 } 460 }
470 461
@@ -495,7 +486,7 @@ static int soc_camera_open(struct file *file)
495 486
496 ret = ici->ops->add(icd); 487 ret = ici->ops->add(icd);
497 if (ret < 0) { 488 if (ret < 0) {
498 dev_err(&icd->dev, "Couldn't activate the camera: %d\n", ret); 489 dev_err(icd->pdev, "Couldn't activate the camera: %d\n", ret);
499 goto eiciadd; 490 goto eiciadd;
500 } 491 }
501 492
@@ -524,7 +515,7 @@ static int soc_camera_open(struct file *file)
524 } 515 }
525 516
526 file->private_data = icd; 517 file->private_data = icd;
527 dev_dbg(&icd->dev, "camera device open\n"); 518 dev_dbg(icd->pdev, "camera device open\n");
528 519
529 return 0; 520 return 0;
530 521
@@ -549,7 +540,7 @@ epower:
549static int soc_camera_close(struct file *file) 540static int soc_camera_close(struct file *file)
550{ 541{
551 struct soc_camera_device *icd = file->private_data; 542 struct soc_camera_device *icd = file->private_data;
552 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 543 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
553 544
554 icd->use_count--; 545 icd->use_count--;
555 if (!icd->use_count) { 546 if (!icd->use_count) {
@@ -570,7 +561,7 @@ static int soc_camera_close(struct file *file)
570 561
571 module_put(ici->ops->owner); 562 module_put(ici->ops->owner);
572 563
573 dev_dbg(&icd->dev, "camera device close\n"); 564 dev_dbg(icd->pdev, "camera device close\n");
574 565
575 return 0; 566 return 0;
576} 567}
@@ -581,7 +572,7 @@ static ssize_t soc_camera_read(struct file *file, char __user *buf,
581 struct soc_camera_device *icd = file->private_data; 572 struct soc_camera_device *icd = file->private_data;
582 int err = -EINVAL; 573 int err = -EINVAL;
583 574
584 dev_err(&icd->dev, "camera device read not implemented\n"); 575 dev_err(icd->pdev, "camera device read not implemented\n");
585 576
586 return err; 577 return err;
587} 578}
@@ -589,10 +580,10 @@ static ssize_t soc_camera_read(struct file *file, char __user *buf,
589static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma) 580static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
590{ 581{
591 struct soc_camera_device *icd = file->private_data; 582 struct soc_camera_device *icd = file->private_data;
592 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 583 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
593 int err; 584 int err;
594 585
595 dev_dbg(&icd->dev, "mmap called, vma=0x%08lx\n", (unsigned long)vma); 586 dev_dbg(icd->pdev, "mmap called, vma=0x%08lx\n", (unsigned long)vma);
596 587
597 if (icd->streamer != file) 588 if (icd->streamer != file)
598 return -EBUSY; 589 return -EBUSY;
@@ -602,7 +593,7 @@ static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
602 else 593 else
603 err = vb2_mmap(&icd->vb2_vidq, vma); 594 err = vb2_mmap(&icd->vb2_vidq, vma);
604 595
605 dev_dbg(&icd->dev, "vma start=0x%08lx, size=%ld, ret=%d\n", 596 dev_dbg(icd->pdev, "vma start=0x%08lx, size=%ld, ret=%d\n",
606 (unsigned long)vma->vm_start, 597 (unsigned long)vma->vm_start,
607 (unsigned long)vma->vm_end - (unsigned long)vma->vm_start, 598 (unsigned long)vma->vm_end - (unsigned long)vma->vm_start,
608 err); 599 err);
@@ -613,13 +604,13 @@ static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
613static unsigned int soc_camera_poll(struct file *file, poll_table *pt) 604static unsigned int soc_camera_poll(struct file *file, poll_table *pt)
614{ 605{
615 struct soc_camera_device *icd = file->private_data; 606 struct soc_camera_device *icd = file->private_data;
616 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 607 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
617 608
618 if (icd->streamer != file) 609 if (icd->streamer != file)
619 return -EBUSY; 610 return -EBUSY;
620 611
621 if (ici->ops->init_videobuf && list_empty(&icd->vb_vidq.stream)) { 612 if (ici->ops->init_videobuf && list_empty(&icd->vb_vidq.stream)) {
622 dev_err(&icd->dev, "Trying to poll with no queued buffers!\n"); 613 dev_err(icd->pdev, "Trying to poll with no queued buffers!\n");
623 return POLLERR; 614 return POLLERR;
624 } 615 }
625 616
@@ -659,15 +650,15 @@ static int soc_camera_s_fmt_vid_cap(struct file *file, void *priv,
659 WARN_ON(priv != file->private_data); 650 WARN_ON(priv != file->private_data);
660 651
661 if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { 652 if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
662 dev_warn(&icd->dev, "Wrong buf-type %d\n", f->type); 653 dev_warn(icd->pdev, "Wrong buf-type %d\n", f->type);
663 return -EINVAL; 654 return -EINVAL;
664 } 655 }
665 656
666 if (icd->streamer && icd->streamer != file) 657 if (icd->streamer && icd->streamer != file)
667 return -EBUSY; 658 return -EBUSY;
668 659
669 if (is_streaming(to_soc_camera_host(icd->dev.parent), icd)) { 660 if (is_streaming(to_soc_camera_host(icd->parent), icd)) {
670 dev_err(&icd->dev, "S_FMT denied: queue initialised\n"); 661 dev_err(icd->pdev, "S_FMT denied: queue initialised\n");
671 return -EBUSY; 662 return -EBUSY;
672 } 663 }
673 664
@@ -716,7 +707,7 @@ static int soc_camera_g_fmt_vid_cap(struct file *file, void *priv,
716 pix->field = icd->field; 707 pix->field = icd->field;
717 pix->pixelformat = icd->current_fmt->host_fmt->fourcc; 708 pix->pixelformat = icd->current_fmt->host_fmt->fourcc;
718 pix->colorspace = icd->colorspace; 709 pix->colorspace = icd->colorspace;
719 dev_dbg(&icd->dev, "current_fmt->fourcc: 0x%08x\n", 710 dev_dbg(icd->pdev, "current_fmt->fourcc: 0x%08x\n",
720 icd->current_fmt->host_fmt->fourcc); 711 icd->current_fmt->host_fmt->fourcc);
721 return 0; 712 return 0;
722} 713}
@@ -725,7 +716,7 @@ static int soc_camera_querycap(struct file *file, void *priv,
725 struct v4l2_capability *cap) 716 struct v4l2_capability *cap)
726{ 717{
727 struct soc_camera_device *icd = file->private_data; 718 struct soc_camera_device *icd = file->private_data;
728 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 719 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
729 720
730 WARN_ON(priv != file->private_data); 721 WARN_ON(priv != file->private_data);
731 722
@@ -737,7 +728,7 @@ static int soc_camera_streamon(struct file *file, void *priv,
737 enum v4l2_buf_type i) 728 enum v4l2_buf_type i)
738{ 729{
739 struct soc_camera_device *icd = file->private_data; 730 struct soc_camera_device *icd = file->private_data;
740 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 731 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
741 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 732 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
742 int ret; 733 int ret;
743 734
@@ -766,7 +757,7 @@ static int soc_camera_streamoff(struct file *file, void *priv,
766{ 757{
767 struct soc_camera_device *icd = file->private_data; 758 struct soc_camera_device *icd = file->private_data;
768 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 759 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
769 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 760 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
770 761
771 WARN_ON(priv != file->private_data); 762 WARN_ON(priv != file->private_data);
772 763
@@ -794,7 +785,7 @@ static int soc_camera_queryctrl(struct file *file, void *priv,
794 struct v4l2_queryctrl *qc) 785 struct v4l2_queryctrl *qc)
795{ 786{
796 struct soc_camera_device *icd = file->private_data; 787 struct soc_camera_device *icd = file->private_data;
797 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 788 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
798 int i; 789 int i;
799 790
800 WARN_ON(priv != file->private_data); 791 WARN_ON(priv != file->private_data);
@@ -825,7 +816,7 @@ static int soc_camera_g_ctrl(struct file *file, void *priv,
825 struct v4l2_control *ctrl) 816 struct v4l2_control *ctrl)
826{ 817{
827 struct soc_camera_device *icd = file->private_data; 818 struct soc_camera_device *icd = file->private_data;
828 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 819 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
829 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 820 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
830 int ret; 821 int ret;
831 822
@@ -844,7 +835,7 @@ static int soc_camera_s_ctrl(struct file *file, void *priv,
844 struct v4l2_control *ctrl) 835 struct v4l2_control *ctrl)
845{ 836{
846 struct soc_camera_device *icd = file->private_data; 837 struct soc_camera_device *icd = file->private_data;
847 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 838 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
848 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 839 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
849 int ret; 840 int ret;
850 841
@@ -863,7 +854,7 @@ static int soc_camera_cropcap(struct file *file, void *fh,
863 struct v4l2_cropcap *a) 854 struct v4l2_cropcap *a)
864{ 855{
865 struct soc_camera_device *icd = file->private_data; 856 struct soc_camera_device *icd = file->private_data;
866 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 857 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
867 858
868 return ici->ops->cropcap(icd, a); 859 return ici->ops->cropcap(icd, a);
869} 860}
@@ -872,7 +863,7 @@ static int soc_camera_g_crop(struct file *file, void *fh,
872 struct v4l2_crop *a) 863 struct v4l2_crop *a)
873{ 864{
874 struct soc_camera_device *icd = file->private_data; 865 struct soc_camera_device *icd = file->private_data;
875 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 866 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
876 int ret; 867 int ret;
877 868
878 ret = ici->ops->get_crop(icd, a); 869 ret = ici->ops->get_crop(icd, a);
@@ -889,7 +880,7 @@ static int soc_camera_s_crop(struct file *file, void *fh,
889 struct v4l2_crop *a) 880 struct v4l2_crop *a)
890{ 881{
891 struct soc_camera_device *icd = file->private_data; 882 struct soc_camera_device *icd = file->private_data;
892 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 883 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
893 struct v4l2_rect *rect = &a->c; 884 struct v4l2_rect *rect = &a->c;
894 struct v4l2_crop current_crop; 885 struct v4l2_crop current_crop;
895 int ret; 886 int ret;
@@ -897,7 +888,7 @@ static int soc_camera_s_crop(struct file *file, void *fh,
897 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 888 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
898 return -EINVAL; 889 return -EINVAL;
899 890
900 dev_dbg(&icd->dev, "S_CROP(%ux%u@%u:%u)\n", 891 dev_dbg(icd->pdev, "S_CROP(%ux%u@%u:%u)\n",
901 rect->width, rect->height, rect->left, rect->top); 892 rect->width, rect->height, rect->left, rect->top);
902 893
903 /* If get_crop fails, we'll let host and / or client drivers decide */ 894 /* If get_crop fails, we'll let host and / or client drivers decide */
@@ -905,7 +896,7 @@ static int soc_camera_s_crop(struct file *file, void *fh,
905 896
906 /* Prohibit window size change with initialised buffers */ 897 /* Prohibit window size change with initialised buffers */
907 if (ret < 0) { 898 if (ret < 0) {
908 dev_err(&icd->dev, 899 dev_err(icd->pdev,
909 "S_CROP denied: getting current crop failed\n"); 900 "S_CROP denied: getting current crop failed\n");
910 } else if ((a->c.width == current_crop.c.width && 901 } else if ((a->c.width == current_crop.c.width &&
911 a->c.height == current_crop.c.height) || 902 a->c.height == current_crop.c.height) ||
@@ -915,7 +906,7 @@ static int soc_camera_s_crop(struct file *file, void *fh,
915 } else if (ici->ops->set_livecrop) { 906 } else if (ici->ops->set_livecrop) {
916 ret = ici->ops->set_livecrop(icd, a); 907 ret = ici->ops->set_livecrop(icd, a);
917 } else { 908 } else {
918 dev_err(&icd->dev, 909 dev_err(icd->pdev,
919 "S_CROP denied: queue initialised and sizes differ\n"); 910 "S_CROP denied: queue initialised and sizes differ\n");
920 ret = -EBUSY; 911 ret = -EBUSY;
921 } 912 }
@@ -927,7 +918,7 @@ static int soc_camera_g_parm(struct file *file, void *fh,
927 struct v4l2_streamparm *a) 918 struct v4l2_streamparm *a)
928{ 919{
929 struct soc_camera_device *icd = file->private_data; 920 struct soc_camera_device *icd = file->private_data;
930 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 921 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
931 922
932 if (ici->ops->get_parm) 923 if (ici->ops->get_parm)
933 return ici->ops->get_parm(icd, a); 924 return ici->ops->get_parm(icd, a);
@@ -939,7 +930,7 @@ static int soc_camera_s_parm(struct file *file, void *fh,
939 struct v4l2_streamparm *a) 930 struct v4l2_streamparm *a)
940{ 931{
941 struct soc_camera_device *icd = file->private_data; 932 struct soc_camera_device *icd = file->private_data;
942 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 933 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
943 934
944 if (ici->ops->set_parm) 935 if (ici->ops->set_parm)
945 return ici->ops->set_parm(icd, a); 936 return ici->ops->set_parm(icd, a);
@@ -976,6 +967,8 @@ static int soc_camera_s_register(struct file *file, void *fh,
976} 967}
977#endif 968#endif
978 969
970static int soc_camera_probe(struct soc_camera_device *icd);
971
979/* So far this function cannot fail */ 972/* So far this function cannot fail */
980static void scan_add_host(struct soc_camera_host *ici) 973static void scan_add_host(struct soc_camera_host *ici)
981{ 974{
@@ -986,15 +979,9 @@ static void scan_add_host(struct soc_camera_host *ici)
986 list_for_each_entry(icd, &devices, list) { 979 list_for_each_entry(icd, &devices, list) {
987 if (icd->iface == ici->nr) { 980 if (icd->iface == ici->nr) {
988 int ret; 981 int ret;
989 icd->dev.parent = ici->v4l2_dev.dev; 982
990 dev_set_name(&icd->dev, "%u-%u", icd->iface, 983 icd->parent = ici->v4l2_dev.dev;
991 icd->devnum); 984 ret = soc_camera_probe(icd);
992 ret = device_register(&icd->dev);
993 if (ret < 0) {
994 icd->dev.parent = NULL;
995 dev_err(&icd->dev,
996 "Cannot register device: %d\n", ret);
997 }
998 } 985 }
999 } 986 }
1000 987
@@ -1006,12 +993,12 @@ static int soc_camera_init_i2c(struct soc_camera_device *icd,
1006 struct soc_camera_link *icl) 993 struct soc_camera_link *icl)
1007{ 994{
1008 struct i2c_client *client; 995 struct i2c_client *client;
1009 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 996 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
1010 struct i2c_adapter *adap = i2c_get_adapter(icl->i2c_adapter_id); 997 struct i2c_adapter *adap = i2c_get_adapter(icl->i2c_adapter_id);
1011 struct v4l2_subdev *subdev; 998 struct v4l2_subdev *subdev;
1012 999
1013 if (!adap) { 1000 if (!adap) {
1014 dev_err(&icd->dev, "Cannot get I2C adapter #%d. No driver?\n", 1001 dev_err(icd->pdev, "Cannot get I2C adapter #%d. No driver?\n",
1015 icl->i2c_adapter_id); 1002 icl->i2c_adapter_id);
1016 goto ei2cga; 1003 goto ei2cga;
1017 } 1004 }
@@ -1026,7 +1013,7 @@ static int soc_camera_init_i2c(struct soc_camera_device *icd,
1026 client = v4l2_get_subdevdata(subdev); 1013 client = v4l2_get_subdevdata(subdev);
1027 1014
1028 /* Use to_i2c_client(dev) to recover the i2c client */ 1015 /* Use to_i2c_client(dev) to recover the i2c client */
1029 dev_set_drvdata(&icd->dev, &client->dev); 1016 icd->control = &client->dev;
1030 1017
1031 return 0; 1018 return 0;
1032ei2cnd: 1019ei2cnd:
@@ -1040,7 +1027,8 @@ static void soc_camera_free_i2c(struct soc_camera_device *icd)
1040 struct i2c_client *client = 1027 struct i2c_client *client =
1041 to_i2c_client(to_soc_camera_control(icd)); 1028 to_i2c_client(to_soc_camera_control(icd));
1042 struct i2c_adapter *adap = client->adapter; 1029 struct i2c_adapter *adap = client->adapter;
1043 dev_set_drvdata(&icd->dev, NULL); 1030
1031 icd->control = NULL;
1044 v4l2_device_unregister_subdev(i2c_get_clientdata(client)); 1032 v4l2_device_unregister_subdev(i2c_get_clientdata(client));
1045 i2c_unregister_device(client); 1033 i2c_unregister_device(client);
1046 i2c_put_adapter(adap); 1034 i2c_put_adapter(adap);
@@ -1053,17 +1041,16 @@ static void soc_camera_free_i2c(struct soc_camera_device *icd)
1053static int soc_camera_video_start(struct soc_camera_device *icd); 1041static int soc_camera_video_start(struct soc_camera_device *icd);
1054static int video_dev_create(struct soc_camera_device *icd); 1042static int video_dev_create(struct soc_camera_device *icd);
1055/* Called during host-driver probe */ 1043/* Called during host-driver probe */
1056static int soc_camera_probe(struct device *dev) 1044static int soc_camera_probe(struct soc_camera_device *icd)
1057{ 1045{
1058 struct soc_camera_device *icd = to_soc_camera_dev(dev); 1046 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
1059 struct soc_camera_host *ici = to_soc_camera_host(dev->parent);
1060 struct soc_camera_link *icl = to_soc_camera_link(icd); 1047 struct soc_camera_link *icl = to_soc_camera_link(icd);
1061 struct device *control = NULL; 1048 struct device *control = NULL;
1062 struct v4l2_subdev *sd; 1049 struct v4l2_subdev *sd;
1063 struct v4l2_mbus_framefmt mf; 1050 struct v4l2_mbus_framefmt mf;
1064 int ret; 1051 int ret;
1065 1052
1066 dev_info(dev, "Probing %s\n", dev_name(dev)); 1053 dev_info(icd->pdev, "Probing %s\n", dev_name(icd->pdev));
1067 1054
1068 ret = regulator_bulk_get(icd->pdev, icl->num_regulators, 1055 ret = regulator_bulk_get(icd->pdev, icl->num_regulators,
1069 icl->regulators); 1056 icl->regulators);
@@ -1099,7 +1086,7 @@ static int soc_camera_probe(struct device *dev)
1099 if (icl->module_name) 1086 if (icl->module_name)
1100 ret = request_module(icl->module_name); 1087 ret = request_module(icl->module_name);
1101 1088
1102 ret = icl->add_device(icl, &icd->dev); 1089 ret = icl->add_device(icd);
1103 if (ret < 0) 1090 if (ret < 0)
1104 goto eadddev; 1091 goto eadddev;
1105 1092
@@ -1110,7 +1097,7 @@ static int soc_camera_probe(struct device *dev)
1110 control = to_soc_camera_control(icd); 1097 control = to_soc_camera_control(icd);
1111 if (!control || !control->driver || !dev_get_drvdata(control) || 1098 if (!control || !control->driver || !dev_get_drvdata(control) ||
1112 !try_module_get(control->driver->owner)) { 1099 !try_module_get(control->driver->owner)) {
1113 icl->del_device(icl); 1100 icl->del_device(icd);
1114 goto enodrv; 1101 goto enodrv;
1115 } 1102 }
1116 } 1103 }
@@ -1125,8 +1112,6 @@ static int soc_camera_probe(struct device *dev)
1125 1112
1126 icd->field = V4L2_FIELD_ANY; 1113 icd->field = V4L2_FIELD_ANY;
1127 1114
1128 icd->vdev->lock = &icd->video_lock;
1129
1130 /* 1115 /*
1131 * ..._video_start() will create a device node, video_register_device() 1116 * ..._video_start() will create a device node, video_register_device()
1132 * itself is protected against concurrent open() calls, but we also have 1117 * itself is protected against concurrent open() calls, but we also have
@@ -1146,11 +1131,6 @@ static int soc_camera_probe(struct device *dev)
1146 icd->field = mf.field; 1131 icd->field = mf.field;
1147 } 1132 }
1148 1133
1149 /* Do we have to sysfs_remove_link() before device_unregister()? */
1150 if (sysfs_create_link(&icd->dev.kobj, &to_soc_camera_control(icd)->kobj,
1151 "control"))
1152 dev_warn(&icd->dev, "Failed creating the control symlink\n");
1153
1154 ici->ops->remove(icd); 1134 ici->ops->remove(icd);
1155 1135
1156 soc_camera_power_set(icd, icl, 0); 1136 soc_camera_power_set(icd, icl, 0);
@@ -1166,7 +1146,7 @@ eiufmt:
1166 if (icl->board_info) { 1146 if (icl->board_info) {
1167 soc_camera_free_i2c(icd); 1147 soc_camera_free_i2c(icd);
1168 } else { 1148 } else {
1169 icl->del_device(icl); 1149 icl->del_device(icd);
1170 module_put(control->driver->owner); 1150 module_put(control->driver->owner);
1171 } 1151 }
1172enodrv: 1152enodrv:
@@ -1186,13 +1166,12 @@ ereg:
1186 * This is called on device_unregister, which only means we have to disconnect 1166 * This is called on device_unregister, which only means we have to disconnect
1187 * from the host, but not remove ourselves from the device list 1167 * from the host, but not remove ourselves from the device list
1188 */ 1168 */
1189static int soc_camera_remove(struct device *dev) 1169static int soc_camera_remove(struct soc_camera_device *icd)
1190{ 1170{
1191 struct soc_camera_device *icd = to_soc_camera_dev(dev);
1192 struct soc_camera_link *icl = to_soc_camera_link(icd); 1171 struct soc_camera_link *icl = to_soc_camera_link(icd);
1193 struct video_device *vdev = icd->vdev; 1172 struct video_device *vdev = icd->vdev;
1194 1173
1195 BUG_ON(!dev->parent); 1174 BUG_ON(!icd->parent);
1196 1175
1197 if (vdev) { 1176 if (vdev) {
1198 video_unregister_device(vdev); 1177 video_unregister_device(vdev);
@@ -1202,10 +1181,9 @@ static int soc_camera_remove(struct device *dev)
1202 if (icl->board_info) { 1181 if (icl->board_info) {
1203 soc_camera_free_i2c(icd); 1182 soc_camera_free_i2c(icd);
1204 } else { 1183 } else {
1205 struct device_driver *drv = to_soc_camera_control(icd) ? 1184 struct device_driver *drv = to_soc_camera_control(icd)->driver;
1206 to_soc_camera_control(icd)->driver : NULL;
1207 if (drv) { 1185 if (drv) {
1208 icl->del_device(icl); 1186 icl->del_device(icd);
1209 module_put(drv->owner); 1187 module_put(drv->owner);
1210 } 1188 }
1211 } 1189 }
@@ -1216,49 +1194,6 @@ static int soc_camera_remove(struct device *dev)
1216 return 0; 1194 return 0;
1217} 1195}
1218 1196
1219static int soc_camera_suspend(struct device *dev, pm_message_t state)
1220{
1221 struct soc_camera_device *icd = to_soc_camera_dev(dev);
1222 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
1223 int ret = 0;
1224
1225 if (ici->ops->suspend)
1226 ret = ici->ops->suspend(icd, state);
1227
1228 return ret;
1229}
1230
1231static int soc_camera_resume(struct device *dev)
1232{
1233 struct soc_camera_device *icd = to_soc_camera_dev(dev);
1234 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
1235 int ret = 0;
1236
1237 if (ici->ops->resume)
1238 ret = ici->ops->resume(icd);
1239
1240 return ret;
1241}
1242
1243struct bus_type soc_camera_bus_type = {
1244 .name = "soc-camera",
1245 .probe = soc_camera_probe,
1246 .remove = soc_camera_remove,
1247 .suspend = soc_camera_suspend,
1248 .resume = soc_camera_resume,
1249};
1250EXPORT_SYMBOL_GPL(soc_camera_bus_type);
1251
1252static struct device_driver ic_drv = {
1253 .name = "camera",
1254 .bus = &soc_camera_bus_type,
1255 .owner = THIS_MODULE,
1256};
1257
1258static void dummy_release(struct device *dev)
1259{
1260}
1261
1262static int default_cropcap(struct soc_camera_device *icd, 1197static int default_cropcap(struct soc_camera_device *icd,
1263 struct v4l2_cropcap *a) 1198 struct v4l2_cropcap *a)
1264{ 1199{
@@ -1317,13 +1252,6 @@ static int default_enum_fsizes(struct soc_camera_device *icd,
1317 return 0; 1252 return 0;
1318} 1253}
1319 1254
1320static void soc_camera_device_init(struct device *dev, void *pdata)
1321{
1322 dev->platform_data = pdata;
1323 dev->bus = &soc_camera_bus_type;
1324 dev->release = dummy_release;
1325}
1326
1327int soc_camera_host_register(struct soc_camera_host *ici) 1255int soc_camera_host_register(struct soc_camera_host *ici)
1328{ 1256{
1329 struct soc_camera_host *ix; 1257 struct soc_camera_host *ix;
@@ -1389,24 +1317,9 @@ void soc_camera_host_unregister(struct soc_camera_host *ici)
1389 mutex_lock(&list_lock); 1317 mutex_lock(&list_lock);
1390 1318
1391 list_del(&ici->list); 1319 list_del(&ici->list);
1392 1320 list_for_each_entry(icd, &devices, list)
1393 list_for_each_entry(icd, &devices, list) { 1321 if (icd->iface == ici->nr && to_soc_camera_control(icd))
1394 if (icd->iface == ici->nr) { 1322 soc_camera_remove(icd);
1395 void *pdata = icd->dev.platform_data;
1396 /* The bus->remove will be called */
1397 device_unregister(&icd->dev);
1398 /*
1399 * Not before device_unregister(), .remove
1400 * needs parent to call ici->ops->remove().
1401 * If the host module is loaded again, device_register()
1402 * would complain "already initialised," since 2.6.32
1403 * this is also needed to prevent use-after-free of the
1404 * device private data.
1405 */
1406 memset(&icd->dev, 0, sizeof(icd->dev));
1407 soc_camera_device_init(&icd->dev, pdata);
1408 }
1409 }
1410 1323
1411 mutex_unlock(&list_lock); 1324 mutex_unlock(&list_lock);
1412 1325
@@ -1448,11 +1361,6 @@ static int soc_camera_device_register(struct soc_camera_device *icd)
1448 return 0; 1361 return 0;
1449} 1362}
1450 1363
1451static void soc_camera_device_unregister(struct soc_camera_device *icd)
1452{
1453 list_del(&icd->list);
1454}
1455
1456static const struct v4l2_ioctl_ops soc_camera_ioctl_ops = { 1364static const struct v4l2_ioctl_ops soc_camera_ioctl_ops = {
1457 .vidioc_querycap = soc_camera_querycap, 1365 .vidioc_querycap = soc_camera_querycap,
1458 .vidioc_g_fmt_vid_cap = soc_camera_g_fmt_vid_cap, 1366 .vidioc_g_fmt_vid_cap = soc_camera_g_fmt_vid_cap,
@@ -1487,7 +1395,7 @@ static const struct v4l2_ioctl_ops soc_camera_ioctl_ops = {
1487 1395
1488static int video_dev_create(struct soc_camera_device *icd) 1396static int video_dev_create(struct soc_camera_device *icd)
1489{ 1397{
1490 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 1398 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
1491 struct video_device *vdev = video_device_alloc(); 1399 struct video_device *vdev = video_device_alloc();
1492 1400
1493 if (!vdev) 1401 if (!vdev)
@@ -1495,12 +1403,13 @@ static int video_dev_create(struct soc_camera_device *icd)
1495 1403
1496 strlcpy(vdev->name, ici->drv_name, sizeof(vdev->name)); 1404 strlcpy(vdev->name, ici->drv_name, sizeof(vdev->name));
1497 1405
1498 vdev->parent = &icd->dev; 1406 vdev->parent = icd->pdev;
1499 vdev->current_norm = V4L2_STD_UNKNOWN; 1407 vdev->current_norm = V4L2_STD_UNKNOWN;
1500 vdev->fops = &soc_camera_fops; 1408 vdev->fops = &soc_camera_fops;
1501 vdev->ioctl_ops = &soc_camera_ioctl_ops; 1409 vdev->ioctl_ops = &soc_camera_ioctl_ops;
1502 vdev->release = video_device_release; 1410 vdev->release = video_device_release;
1503 vdev->tvnorms = V4L2_STD_UNKNOWN; 1411 vdev->tvnorms = V4L2_STD_UNKNOWN;
1412 vdev->lock = &icd->video_lock;
1504 1413
1505 icd->vdev = vdev; 1414 icd->vdev = vdev;
1506 1415
@@ -1515,7 +1424,7 @@ static int soc_camera_video_start(struct soc_camera_device *icd)
1515 const struct device_type *type = icd->vdev->dev.type; 1424 const struct device_type *type = icd->vdev->dev.type;
1516 int ret; 1425 int ret;
1517 1426
1518 if (!icd->dev.parent) 1427 if (!icd->parent)
1519 return -ENODEV; 1428 return -ENODEV;
1520 1429
1521 if (!icd->ops || 1430 if (!icd->ops ||
@@ -1525,7 +1434,7 @@ static int soc_camera_video_start(struct soc_camera_device *icd)
1525 1434
1526 ret = video_register_device(icd->vdev, VFL_TYPE_GRABBER, -1); 1435 ret = video_register_device(icd->vdev, VFL_TYPE_GRABBER, -1);
1527 if (ret < 0) { 1436 if (ret < 0) {
1528 dev_err(&icd->dev, "video_register_device failed: %d\n", ret); 1437 dev_err(icd->pdev, "video_register_device failed: %d\n", ret);
1529 return ret; 1438 return ret;
1530 } 1439 }
1531 1440
@@ -1549,6 +1458,7 @@ static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev)
1549 return -ENOMEM; 1458 return -ENOMEM;
1550 1459
1551 icd->iface = icl->bus_id; 1460 icd->iface = icl->bus_id;
1461 icd->link = icl;
1552 icd->pdev = &pdev->dev; 1462 icd->pdev = &pdev->dev;
1553 platform_set_drvdata(pdev, icd); 1463 platform_set_drvdata(pdev, icd);
1554 1464
@@ -1556,8 +1466,6 @@ static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev)
1556 if (ret < 0) 1466 if (ret < 0)
1557 goto escdevreg; 1467 goto escdevreg;
1558 1468
1559 soc_camera_device_init(&icd->dev, icl);
1560
1561 icd->user_width = DEFAULT_WIDTH; 1469 icd->user_width = DEFAULT_WIDTH;
1562 icd->user_height = DEFAULT_HEIGHT; 1470 icd->user_height = DEFAULT_HEIGHT;
1563 1471
@@ -1581,7 +1489,7 @@ static int __devexit soc_camera_pdrv_remove(struct platform_device *pdev)
1581 if (!icd) 1489 if (!icd)
1582 return -EINVAL; 1490 return -EINVAL;
1583 1491
1584 soc_camera_device_unregister(icd); 1492 list_del(&icd->list);
1585 1493
1586 kfree(icd); 1494 kfree(icd);
1587 1495
@@ -1598,31 +1506,12 @@ static struct platform_driver __refdata soc_camera_pdrv = {
1598 1506
1599static int __init soc_camera_init(void) 1507static int __init soc_camera_init(void)
1600{ 1508{
1601 int ret = bus_register(&soc_camera_bus_type); 1509 return platform_driver_probe(&soc_camera_pdrv, soc_camera_pdrv_probe);
1602 if (ret)
1603 return ret;
1604 ret = driver_register(&ic_drv);
1605 if (ret)
1606 goto edrvr;
1607
1608 ret = platform_driver_probe(&soc_camera_pdrv, soc_camera_pdrv_probe);
1609 if (ret)
1610 goto epdr;
1611
1612 return 0;
1613
1614epdr:
1615 driver_unregister(&ic_drv);
1616edrvr:
1617 bus_unregister(&soc_camera_bus_type);
1618 return ret;
1619} 1510}
1620 1511
1621static void __exit soc_camera_exit(void) 1512static void __exit soc_camera_exit(void)
1622{ 1513{
1623 platform_driver_unregister(&soc_camera_pdrv); 1514 platform_driver_unregister(&soc_camera_pdrv);
1624 driver_unregister(&ic_drv);
1625 bus_unregister(&soc_camera_bus_type);
1626} 1515}
1627 1516
1628module_init(soc_camera_init); 1517module_init(soc_camera_init);
diff --git a/drivers/media/video/soc_camera_platform.c b/drivers/media/video/soc_camera_platform.c
index bf406e89c992..8069cd6bc5e8 100644
--- a/drivers/media/video/soc_camera_platform.c
+++ b/drivers/media/video/soc_camera_platform.c
@@ -146,7 +146,7 @@ static int soc_camera_platform_probe(struct platform_device *pdev)
146 if (!p) 146 if (!p)
147 return -EINVAL; 147 return -EINVAL;
148 148
149 if (!p->dev) { 149 if (!p->icd) {
150 dev_err(&pdev->dev, 150 dev_err(&pdev->dev,
151 "Platform has not set soc_camera_device pointer!\n"); 151 "Platform has not set soc_camera_device pointer!\n");
152 return -EINVAL; 152 return -EINVAL;
@@ -156,16 +156,16 @@ static int soc_camera_platform_probe(struct platform_device *pdev)
156 if (!priv) 156 if (!priv)
157 return -ENOMEM; 157 return -ENOMEM;
158 158
159 icd = to_soc_camera_dev(p->dev); 159 icd = p->icd;
160 160
161 /* soc-camera convention: control's drvdata points to the subdev */ 161 /* soc-camera convention: control's drvdata points to the subdev */
162 platform_set_drvdata(pdev, &priv->subdev); 162 platform_set_drvdata(pdev, &priv->subdev);
163 /* Set the control device reference */ 163 /* Set the control device reference */
164 dev_set_drvdata(&icd->dev, &pdev->dev); 164 icd->control = &pdev->dev;
165 165
166 icd->ops = &soc_camera_platform_ops; 166 icd->ops = &soc_camera_platform_ops;
167 167
168 ici = to_soc_camera_host(icd->dev.parent); 168 ici = to_soc_camera_host(icd->parent);
169 169
170 v4l2_subdev_init(&priv->subdev, &platform_subdev_ops); 170 v4l2_subdev_init(&priv->subdev, &platform_subdev_ops);
171 v4l2_set_subdevdata(&priv->subdev, p); 171 v4l2_set_subdevdata(&priv->subdev, p);
@@ -188,7 +188,7 @@ static int soc_camera_platform_remove(struct platform_device *pdev)
188{ 188{
189 struct soc_camera_platform_priv *priv = get_priv(pdev); 189 struct soc_camera_platform_priv *priv = get_priv(pdev);
190 struct soc_camera_platform_info *p = pdev->dev.platform_data; 190 struct soc_camera_platform_info *p = pdev->dev.platform_data;
191 struct soc_camera_device *icd = to_soc_camera_dev(p->dev); 191 struct soc_camera_device *icd = p->icd;
192 192
193 v4l2_device_unregister_subdev(&priv->subdev); 193 v4l2_device_unregister_subdev(&priv->subdev);
194 icd->ops = NULL; 194 icd->ops = NULL;
diff --git a/drivers/media/video/sr030pc30.c b/drivers/media/video/sr030pc30.c
index c901721a1db3..8afb0e8a2e00 100644
--- a/drivers/media/video/sr030pc30.c
+++ b/drivers/media/video/sr030pc30.c
@@ -726,8 +726,10 @@ static int sr030pc30_s_power(struct v4l2_subdev *sd, int on)
726 const struct sr030pc30_platform_data *pdata = info->pdata; 726 const struct sr030pc30_platform_data *pdata = info->pdata;
727 int ret; 727 int ret;
728 728
729 if (WARN(pdata == NULL, "No platform data!\n")) 729 if (pdata == NULL) {
730 return -ENOMEM; 730 WARN(1, "No platform data!\n");
731 return -EINVAL;
732 }
731 733
732 /* 734 /*
733 * Put sensor into power sleep mode before switching off 735 * Put sensor into power sleep mode before switching off
@@ -746,6 +748,7 @@ static int sr030pc30_s_power(struct v4l2_subdev *sd, int on)
746 if (on) { 748 if (on) {
747 ret = sr030pc30_base_config(sd); 749 ret = sr030pc30_base_config(sd);
748 } else { 750 } else {
751 ret = 0;
749 info->curr_win = NULL; 752 info->curr_win = NULL;
750 info->curr_fmt = NULL; 753 info->curr_fmt = NULL;
751 } 754 }
diff --git a/drivers/media/video/tda7432.c b/drivers/media/video/tda7432.c
index 3941f954daf4..bd218545da9c 100644
--- a/drivers/media/video/tda7432.c
+++ b/drivers/media/video/tda7432.c
@@ -49,10 +49,11 @@ static int maxvol;
49static int loudness; /* disable loudness by default */ 49static int loudness; /* disable loudness by default */
50static int debug; /* insmod parameter */ 50static int debug; /* insmod parameter */
51module_param(debug, int, S_IRUGO | S_IWUSR); 51module_param(debug, int, S_IRUGO | S_IWUSR);
52MODULE_PARM_DESC(debug, "Set debugging level from 0 to 3. Default is off(0).");
52module_param(loudness, int, S_IRUGO); 53module_param(loudness, int, S_IRUGO);
53MODULE_PARM_DESC(maxvol,"Set maximium volume to +20db (0), default is 0db(1)"); 54MODULE_PARM_DESC(loudness, "Turn loudness on(1) else off(0). Default is off(0).");
54module_param(maxvol, int, S_IRUGO | S_IWUSR); 55module_param(maxvol, int, S_IRUGO | S_IWUSR);
55 56MODULE_PARM_DESC(maxvol, "Set maximium volume to +20dB(0) else +0dB(1). Default is +20dB(0).");
56 57
57 58
58/* Structure of address and subaddresses for the tda7432 */ 59/* Structure of address and subaddresses for the tda7432 */
diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
index fc611ebeb82c..84cd1b65b765 100644
--- a/drivers/media/video/timblogiw.c
+++ b/drivers/media/video/timblogiw.c
@@ -20,7 +20,6 @@
20 * Timberdale FPGA LogiWin Video In 20 * Timberdale FPGA LogiWin Video In
21 */ 21 */
22 22
23#include <linux/version.h>
24#include <linux/platform_device.h> 23#include <linux/platform_device.h>
25#include <linux/slab.h> 24#include <linux/slab.h>
26#include <linux/dmaengine.h> 25#include <linux/dmaengine.h>
diff --git a/drivers/media/video/tlg2300/pd-common.h b/drivers/media/video/tlg2300/pd-common.h
index 46066bdc73f9..56564e6aaac2 100644
--- a/drivers/media/video/tlg2300/pd-common.h
+++ b/drivers/media/video/tlg2300/pd-common.h
@@ -1,7 +1,6 @@
1#ifndef PD_COMMON_H 1#ifndef PD_COMMON_H
2#define PD_COMMON_H 2#define PD_COMMON_H
3 3
4#include <linux/version.h>
5#include <linux/fs.h> 4#include <linux/fs.h>
6#include <linux/wait.h> 5#include <linux/wait.h>
7#include <linux/list.h> 6#include <linux/list.h>
diff --git a/drivers/media/video/tlg2300/pd-dvb.c b/drivers/media/video/tlg2300/pd-dvb.c
index edd78f8b1baa..d0da11ae19df 100644
--- a/drivers/media/video/tlg2300/pd-dvb.c
+++ b/drivers/media/video/tlg2300/pd-dvb.c
@@ -7,7 +7,7 @@
7 7
8#include "vendorcmds.h" 8#include "vendorcmds.h"
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <asm/atomic.h> 10#include <linux/atomic.h>
11 11
12static void dvb_urb_cleanup(struct pd_dvb_adapter *pd_dvb); 12static void dvb_urb_cleanup(struct pd_dvb_adapter *pd_dvb);
13 13
diff --git a/drivers/media/video/tlg2300/pd-main.c b/drivers/media/video/tlg2300/pd-main.c
index 99c81a9a4f46..129f135d5a5f 100644
--- a/drivers/media/video/tlg2300/pd-main.c
+++ b/drivers/media/video/tlg2300/pd-main.c
@@ -531,3 +531,4 @@ module_exit(poseidon_exit);
531MODULE_AUTHOR("Telegent Systems"); 531MODULE_AUTHOR("Telegent Systems");
532MODULE_DESCRIPTION("For tlg2300-based USB device "); 532MODULE_DESCRIPTION("For tlg2300-based USB device ");
533MODULE_LICENSE("GPL"); 533MODULE_LICENSE("GPL");
534MODULE_VERSION("0.0.2");
diff --git a/drivers/media/video/tlg2300/pd-radio.c b/drivers/media/video/tlg2300/pd-radio.c
index fae84c2a0c39..4fad1dfb92cf 100644
--- a/drivers/media/video/tlg2300/pd-radio.c
+++ b/drivers/media/video/tlg2300/pd-radio.c
@@ -6,7 +6,6 @@
6#include <linux/usb.h> 6#include <linux/usb.h>
7#include <linux/i2c.h> 7#include <linux/i2c.h>
8#include <media/v4l2-dev.h> 8#include <media/v4l2-dev.h>
9#include <linux/version.h>
10#include <linux/mm.h> 9#include <linux/mm.h>
11#include <linux/mutex.h> 10#include <linux/mutex.h>
12#include <media/v4l2-ioctl.h> 11#include <media/v4l2-ioctl.h>
@@ -149,7 +148,6 @@ static int vidioc_querycap(struct file *file, void *priv,
149 strlcpy(v->driver, "tele-radio", sizeof(v->driver)); 148 strlcpy(v->driver, "tele-radio", sizeof(v->driver));
150 strlcpy(v->card, "Telegent Poseidon", sizeof(v->card)); 149 strlcpy(v->card, "Telegent Poseidon", sizeof(v->card));
151 usb_make_path(p->udev, v->bus_info, sizeof(v->bus_info)); 150 usb_make_path(p->udev, v->bus_info, sizeof(v->bus_info));
152 v->version = KERNEL_VERSION(0, 0, 1);
153 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO; 151 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
154 return 0; 152 return 0;
155} 153}
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index a03945ab9f08..11cc980b0cd5 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -39,6 +39,7 @@
39#include "tda9887.h" 39#include "tda9887.h"
40#include "xc5000.h" 40#include "xc5000.h"
41#include "tda18271.h" 41#include "tda18271.h"
42#include "xc4000.h"
42 43
43#define UNSET (-1U) 44#define UNSET (-1U)
44 45
@@ -391,6 +392,23 @@ static void set_type(struct i2c_client *c, unsigned int type,
391 tune_now = 0; 392 tune_now = 0;
392 break; 393 break;
393 } 394 }
395 case TUNER_XC4000:
396 {
397 struct xc4000_config xc4000_cfg = {
398 .i2c_address = t->i2c->addr,
399 /* FIXME: the correct parameters will be set */
400 /* only when the digital dvb_attach() occurs */
401 .default_pm = 0,
402 .dvb_amplitude = 0,
403 .set_smoothedcvbs = 0,
404 .if_khz = 0
405 };
406 if (!dvb_attach(xc4000_attach,
407 &t->fe, t->i2c->adapter, &xc4000_cfg))
408 goto attach_failed;
409 tune_now = 0;
410 break;
411 }
394 default: 412 default:
395 if (!dvb_attach(simple_tuner_attach, &t->fe, 413 if (!dvb_attach(simple_tuner_attach, &t->fe,
396 t->i2c->adapter, t->i2c->addr, t->type)) 414 t->i2c->adapter, t->i2c->addr, t->type))
diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c
index 0347bbe36459..742482e30011 100644
--- a/drivers/media/video/tw9910.c
+++ b/drivers/media/video/tw9910.c
@@ -552,16 +552,6 @@ static int tw9910_s_std(struct v4l2_subdev *sd, v4l2_std_id norm)
552 return ret; 552 return ret;
553} 553}
554 554
555static int tw9910_enum_input(struct soc_camera_device *icd,
556 struct v4l2_input *inp)
557{
558 inp->type = V4L2_INPUT_TYPE_TUNER;
559 inp->std = V4L2_STD_UNKNOWN;
560 strcpy(inp->name, "Video");
561
562 return 0;
563}
564
565static int tw9910_g_chip_ident(struct v4l2_subdev *sd, 555static int tw9910_g_chip_ident(struct v4l2_subdev *sd,
566 struct v4l2_dbg_chip_ident *id) 556 struct v4l2_dbg_chip_ident *id)
567{ 557{
@@ -846,13 +836,9 @@ static int tw9910_video_probe(struct soc_camera_device *icd,
846 struct tw9910_priv *priv = to_tw9910(client); 836 struct tw9910_priv *priv = to_tw9910(client);
847 s32 id; 837 s32 id;
848 838
849 /* 839 /* We must have a parent by now. And it cannot be a wrong one. */
850 * We must have a parent by now. And it cannot be a wrong one. 840 BUG_ON(!icd->parent ||
851 * So this entire test is completely redundant. 841 to_soc_camera_host(icd->parent)->nr != icd->iface);
852 */
853 if (!icd->dev.parent ||
854 to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
855 return -ENODEV;
856 842
857 /* 843 /*
858 * tw9910 only use 8 or 16 bit bus width 844 * tw9910 only use 8 or 16 bit bus width
@@ -891,7 +877,6 @@ static int tw9910_video_probe(struct soc_camera_device *icd,
891static struct soc_camera_ops tw9910_ops = { 877static struct soc_camera_ops tw9910_ops = {
892 .set_bus_param = tw9910_set_bus_param, 878 .set_bus_param = tw9910_set_bus_param,
893 .query_bus_param = tw9910_query_bus_param, 879 .query_bus_param = tw9910_query_bus_param,
894 .enum_input = tw9910_enum_input,
895}; 880};
896 881
897static struct v4l2_subdev_core_ops tw9910_subdev_core_ops = { 882static struct v4l2_subdev_core_ops tw9910_subdev_core_ops = {
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index ea8ea8a48dfe..5a74f5e07d7d 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -45,7 +45,6 @@
45 * 45 *
46 */ 46 */
47 47
48#include <linux/version.h>
49#include <linux/kernel.h> 48#include <linux/kernel.h>
50#include <linux/list.h> 49#include <linux/list.h>
51#include <linux/timer.h> 50#include <linux/timer.h>
@@ -77,15 +76,7 @@
77#define DRIVER_ALIAS "USBVision" 76#define DRIVER_ALIAS "USBVision"
78#define DRIVER_DESC "USBVision USB Video Device Driver for Linux" 77#define DRIVER_DESC "USBVision USB Video Device Driver for Linux"
79#define DRIVER_LICENSE "GPL" 78#define DRIVER_LICENSE "GPL"
80#define USBVISION_DRIVER_VERSION_MAJOR 0 79#define USBVISION_VERSION_STRING "0.9.11"
81#define USBVISION_DRIVER_VERSION_MINOR 9
82#define USBVISION_DRIVER_VERSION_PATCHLEVEL 10
83#define USBVISION_DRIVER_VERSION KERNEL_VERSION(USBVISION_DRIVER_VERSION_MAJOR,\
84USBVISION_DRIVER_VERSION_MINOR,\
85USBVISION_DRIVER_VERSION_PATCHLEVEL)
86#define USBVISION_VERSION_STRING __stringify(USBVISION_DRIVER_VERSION_MAJOR) \
87"." __stringify(USBVISION_DRIVER_VERSION_MINOR) \
88"." __stringify(USBVISION_DRIVER_VERSION_PATCHLEVEL)
89 80
90#define ENABLE_HEXDUMP 0 /* Enable if you need it */ 81#define ENABLE_HEXDUMP 0 /* Enable if you need it */
91 82
@@ -516,7 +507,6 @@ static int vidioc_querycap(struct file *file, void *priv,
516 usbvision_device_data[usbvision->dev_model].model_string, 507 usbvision_device_data[usbvision->dev_model].model_string,
517 sizeof(vc->card)); 508 sizeof(vc->card));
518 usb_make_path(usbvision->dev, vc->bus_info, sizeof(vc->bus_info)); 509 usb_make_path(usbvision->dev, vc->bus_info, sizeof(vc->bus_info));
519 vc->version = USBVISION_DRIVER_VERSION;
520 vc->capabilities = V4L2_CAP_VIDEO_CAPTURE | 510 vc->capabilities = V4L2_CAP_VIDEO_CAPTURE |
521 V4L2_CAP_AUDIO | 511 V4L2_CAP_AUDIO |
522 V4L2_CAP_READWRITE | 512 V4L2_CAP_READWRITE |
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index a4db26fa2f53..10c2364f3e8a 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -20,7 +20,7 @@
20#include <linux/videodev2.h> 20#include <linux/videodev2.h>
21#include <linux/vmalloc.h> 21#include <linux/vmalloc.h>
22#include <linux/wait.h> 22#include <linux/wait.h>
23#include <asm/atomic.h> 23#include <linux/atomic.h>
24 24
25#include "uvcvideo.h" 25#include "uvcvideo.h"
26 26
@@ -1664,8 +1664,8 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
1664 return -EINVAL; 1664 return -EINVAL;
1665 } 1665 }
1666 1666
1667 /* Search for the matching (GUID/CS) control in the given device */ 1667 /* Search for the matching (GUID/CS) control on the current chain */
1668 list_for_each_entry(entity, &dev->entities, list) { 1668 list_for_each_entry(entity, &chain->entities, chain) {
1669 unsigned int i; 1669 unsigned int i;
1670 1670
1671 if (UVC_ENTITY_TYPE(entity) != UVC_VC_EXTENSION_UNIT || 1671 if (UVC_ENTITY_TYPE(entity) != UVC_VC_EXTENSION_UNIT ||
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index b6eae48d7fb8..d29f9c2d0854 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -31,6 +31,7 @@
31#include <linux/videodev2.h> 31#include <linux/videodev2.h>
32#include <linux/vmalloc.h> 32#include <linux/vmalloc.h>
33#include <linux/wait.h> 33#include <linux/wait.h>
34#include <linux/version.h>
34#include <asm/atomic.h> 35#include <asm/atomic.h>
35#include <asm/unaligned.h> 36#include <asm/unaligned.h>
36 37
@@ -1857,7 +1858,7 @@ static int uvc_probe(struct usb_interface *intf,
1857 sizeof(dev->mdev.serial)); 1858 sizeof(dev->mdev.serial));
1858 strcpy(dev->mdev.bus_info, udev->devpath); 1859 strcpy(dev->mdev.bus_info, udev->devpath);
1859 dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice); 1860 dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
1860 dev->mdev.driver_version = DRIVER_VERSION_NUMBER; 1861 dev->mdev.driver_version = LINUX_VERSION_CODE;
1861 if (media_device_register(&dev->mdev) < 0) 1862 if (media_device_register(&dev->mdev) < 0)
1862 goto error; 1863 goto error;
1863 1864
@@ -2130,6 +2131,15 @@ static struct usb_device_id uvc_ids[] = {
2130 .bInterfaceProtocol = 0, 2131 .bInterfaceProtocol = 0,
2131 .driver_info = UVC_QUIRK_PROBE_MINMAX 2132 .driver_info = UVC_QUIRK_PROBE_MINMAX
2132 | UVC_QUIRK_BUILTIN_ISIGHT }, 2133 | UVC_QUIRK_BUILTIN_ISIGHT },
2134 /* Foxlink ("HP Webcam" on HP Mini 5103) */
2135 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
2136 | USB_DEVICE_ID_MATCH_INT_INFO,
2137 .idVendor = 0x05c8,
2138 .idProduct = 0x0403,
2139 .bInterfaceClass = USB_CLASS_VIDEO,
2140 .bInterfaceSubClass = 1,
2141 .bInterfaceProtocol = 0,
2142 .driver_info = UVC_QUIRK_FIX_BANDWIDTH },
2133 /* Genesys Logic USB 2.0 PC Camera */ 2143 /* Genesys Logic USB 2.0 PC Camera */
2134 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 2144 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
2135 | USB_DEVICE_ID_MATCH_INT_INFO, 2145 | USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index f90ce9fce539..677691c44500 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -19,7 +19,7 @@
19#include <linux/videodev2.h> 19#include <linux/videodev2.h>
20#include <linux/vmalloc.h> 20#include <linux/vmalloc.h>
21#include <linux/wait.h> 21#include <linux/wait.h>
22#include <asm/atomic.h> 22#include <linux/atomic.h>
23 23
24#include "uvcvideo.h" 24#include "uvcvideo.h"
25 25
diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
index 543a80395b7f..ea71d5f1f6db 100644
--- a/drivers/media/video/uvc/uvc_v4l2.c
+++ b/drivers/media/video/uvc/uvc_v4l2.c
@@ -21,7 +21,7 @@
21#include <linux/vmalloc.h> 21#include <linux/vmalloc.h>
22#include <linux/mm.h> 22#include <linux/mm.h>
23#include <linux/wait.h> 23#include <linux/wait.h>
24#include <asm/atomic.h> 24#include <linux/atomic.h>
25 25
26#include <media/v4l2-common.h> 26#include <media/v4l2-common.h>
27#include <media/v4l2-ioctl.h> 27#include <media/v4l2-ioctl.h>
@@ -83,7 +83,7 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
83 default: 83 default:
84 uvc_trace(UVC_TRACE_CONTROL, "Unsupported V4L2 control type " 84 uvc_trace(UVC_TRACE_CONTROL, "Unsupported V4L2 control type "
85 "%u.\n", xmap->v4l2_type); 85 "%u.\n", xmap->v4l2_type);
86 ret = -EINVAL; 86 ret = -ENOTTY;
87 goto done; 87 goto done;
88 } 88 }
89 89
@@ -571,7 +571,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
571 strlcpy(cap->card, vdev->name, sizeof cap->card); 571 strlcpy(cap->card, vdev->name, sizeof cap->card);
572 usb_make_path(stream->dev->udev, 572 usb_make_path(stream->dev->udev,
573 cap->bus_info, sizeof(cap->bus_info)); 573 cap->bus_info, sizeof(cap->bus_info));
574 cap->version = DRIVER_VERSION_NUMBER; 574 cap->version = LINUX_VERSION_CODE;
575 if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 575 if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
576 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE 576 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE
577 | V4L2_CAP_STREAMING; 577 | V4L2_CAP_STREAMING;
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index 49994793cc77..8244167c8915 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -19,7 +19,7 @@
19#include <linux/videodev2.h> 19#include <linux/videodev2.h>
20#include <linux/vmalloc.h> 20#include <linux/vmalloc.h>
21#include <linux/wait.h> 21#include <linux/wait.h>
22#include <asm/atomic.h> 22#include <linux/atomic.h>
23#include <asm/unaligned.h> 23#include <asm/unaligned.h>
24 24
25#include <media/v4l2-common.h> 25#include <media/v4l2-common.h>
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index 20107fd3574d..df32a43ca86a 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -183,8 +183,7 @@ struct uvc_xu_control {
183 * Driver specific constants. 183 * Driver specific constants.
184 */ 184 */
185 185
186#define DRIVER_VERSION_NUMBER KERNEL_VERSION(1, 1, 0) 186#define DRIVER_VERSION "1.1.1"
187#define DRIVER_VERSION "v1.1.0"
188 187
189/* Number of isochronous URBs. */ 188/* Number of isochronous URBs. */
190#define UVC_URBS 5 189#define UVC_URBS 5
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 06b9f9f82013..5c6100fb4072 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -105,6 +105,9 @@ int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl,
105 menu_items[ctrl->value][0] == '\0') 105 menu_items[ctrl->value][0] == '\0')
106 return -EINVAL; 106 return -EINVAL;
107 } 107 }
108 if (qctrl->type == V4L2_CTRL_TYPE_BITMASK &&
109 (ctrl->value & ~qctrl->maximum))
110 return -ERANGE;
108 return 0; 111 return 0;
109} 112}
110EXPORT_SYMBOL(v4l2_ctrl_check); 113EXPORT_SYMBOL(v4l2_ctrl_check);
diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
index 7c2694738b31..61979b70f388 100644
--- a/drivers/media/video/v4l2-compat-ioctl32.c
+++ b/drivers/media/video/v4l2-compat-ioctl32.c
@@ -662,6 +662,32 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
662 return 0; 662 return 0;
663} 663}
664 664
665struct v4l2_event32 {
666 __u32 type;
667 union {
668 __u8 data[64];
669 } u;
670 __u32 pending;
671 __u32 sequence;
672 struct compat_timespec timestamp;
673 __u32 id;
674 __u32 reserved[8];
675};
676
677static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *up)
678{
679 if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_event32)) ||
680 put_user(kp->type, &up->type) ||
681 copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
682 put_user(kp->pending, &up->pending) ||
683 put_user(kp->sequence, &up->sequence) ||
684 put_compat_timespec(&kp->timestamp, &up->timestamp) ||
685 put_user(kp->id, &up->id) ||
686 copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
687 return -EFAULT;
688 return 0;
689}
690
665#define VIDIOC_G_FMT32 _IOWR('V', 4, struct v4l2_format32) 691#define VIDIOC_G_FMT32 _IOWR('V', 4, struct v4l2_format32)
666#define VIDIOC_S_FMT32 _IOWR('V', 5, struct v4l2_format32) 692#define VIDIOC_S_FMT32 _IOWR('V', 5, struct v4l2_format32)
667#define VIDIOC_QUERYBUF32 _IOWR('V', 9, struct v4l2_buffer32) 693#define VIDIOC_QUERYBUF32 _IOWR('V', 9, struct v4l2_buffer32)
@@ -675,6 +701,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
675#define VIDIOC_G_EXT_CTRLS32 _IOWR('V', 71, struct v4l2_ext_controls32) 701#define VIDIOC_G_EXT_CTRLS32 _IOWR('V', 71, struct v4l2_ext_controls32)
676#define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32) 702#define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32)
677#define VIDIOC_TRY_EXT_CTRLS32 _IOWR('V', 73, struct v4l2_ext_controls32) 703#define VIDIOC_TRY_EXT_CTRLS32 _IOWR('V', 73, struct v4l2_ext_controls32)
704#define VIDIOC_DQEVENT32 _IOR ('V', 89, struct v4l2_event32)
678 705
679#define VIDIOC_OVERLAY32 _IOW ('V', 14, s32) 706#define VIDIOC_OVERLAY32 _IOW ('V', 14, s32)
680#define VIDIOC_STREAMON32 _IOW ('V', 18, s32) 707#define VIDIOC_STREAMON32 _IOW ('V', 18, s32)
@@ -693,6 +720,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
693 struct v4l2_input v2i; 720 struct v4l2_input v2i;
694 struct v4l2_standard v2s; 721 struct v4l2_standard v2s;
695 struct v4l2_ext_controls v2ecs; 722 struct v4l2_ext_controls v2ecs;
723 struct v4l2_event v2ev;
696 unsigned long vx; 724 unsigned long vx;
697 int vi; 725 int vi;
698 } karg; 726 } karg;
@@ -715,6 +743,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
715 case VIDIOC_G_EXT_CTRLS32: cmd = VIDIOC_G_EXT_CTRLS; break; 743 case VIDIOC_G_EXT_CTRLS32: cmd = VIDIOC_G_EXT_CTRLS; break;
716 case VIDIOC_S_EXT_CTRLS32: cmd = VIDIOC_S_EXT_CTRLS; break; 744 case VIDIOC_S_EXT_CTRLS32: cmd = VIDIOC_S_EXT_CTRLS; break;
717 case VIDIOC_TRY_EXT_CTRLS32: cmd = VIDIOC_TRY_EXT_CTRLS; break; 745 case VIDIOC_TRY_EXT_CTRLS32: cmd = VIDIOC_TRY_EXT_CTRLS; break;
746 case VIDIOC_DQEVENT32: cmd = VIDIOC_DQEVENT; break;
718 case VIDIOC_OVERLAY32: cmd = VIDIOC_OVERLAY; break; 747 case VIDIOC_OVERLAY32: cmd = VIDIOC_OVERLAY; break;
719 case VIDIOC_STREAMON32: cmd = VIDIOC_STREAMON; break; 748 case VIDIOC_STREAMON32: cmd = VIDIOC_STREAMON; break;
720 case VIDIOC_STREAMOFF32: cmd = VIDIOC_STREAMOFF; break; 749 case VIDIOC_STREAMOFF32: cmd = VIDIOC_STREAMOFF; break;
@@ -778,6 +807,9 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
778 err = get_v4l2_ext_controls32(&karg.v2ecs, up); 807 err = get_v4l2_ext_controls32(&karg.v2ecs, up);
779 compatible_arg = 0; 808 compatible_arg = 0;
780 break; 809 break;
810 case VIDIOC_DQEVENT:
811 compatible_arg = 0;
812 break;
781 } 813 }
782 if (err) 814 if (err)
783 return err; 815 return err;
@@ -818,6 +850,10 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
818 err = put_v4l2_framebuffer32(&karg.v2fb, up); 850 err = put_v4l2_framebuffer32(&karg.v2fb, up);
819 break; 851 break;
820 852
853 case VIDIOC_DQEVENT:
854 err = put_v4l2_event32(&karg.v2ev, up);
855 break;
856
821 case VIDIOC_G_FMT: 857 case VIDIOC_G_FMT:
822 case VIDIOC_S_FMT: 858 case VIDIOC_S_FMT:
823 case VIDIOC_TRY_FMT: 859 case VIDIOC_TRY_FMT:
@@ -920,6 +956,7 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
920 case VIDIOC_S_DV_TIMINGS: 956 case VIDIOC_S_DV_TIMINGS:
921 case VIDIOC_G_DV_TIMINGS: 957 case VIDIOC_G_DV_TIMINGS:
922 case VIDIOC_DQEVENT: 958 case VIDIOC_DQEVENT:
959 case VIDIOC_DQEVENT32:
923 case VIDIOC_SUBSCRIBE_EVENT: 960 case VIDIOC_SUBSCRIBE_EVENT:
924 case VIDIOC_UNSUBSCRIBE_EVENT: 961 case VIDIOC_UNSUBSCRIBE_EVENT:
925 ret = do_video_ioctl(file, cmd, arg); 962 ret = do_video_ioctl(file, cmd, arg);
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
index 2412f08527aa..06b6014d4fb4 100644
--- a/drivers/media/video/v4l2-ctrls.c
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -23,17 +23,39 @@
23#include <media/v4l2-ioctl.h> 23#include <media/v4l2-ioctl.h>
24#include <media/v4l2-device.h> 24#include <media/v4l2-device.h>
25#include <media/v4l2-ctrls.h> 25#include <media/v4l2-ctrls.h>
26#include <media/v4l2-event.h>
26#include <media/v4l2-dev.h> 27#include <media/v4l2-dev.h>
27 28
29#define has_op(master, op) \
30 (master->ops && master->ops->op)
31#define call_op(master, op) \
32 (has_op(master, op) ? master->ops->op(master) : 0)
33
28/* Internal temporary helper struct, one for each v4l2_ext_control */ 34/* Internal temporary helper struct, one for each v4l2_ext_control */
29struct ctrl_helper { 35struct v4l2_ctrl_helper {
36 /* Pointer to the control reference of the master control */
37 struct v4l2_ctrl_ref *mref;
30 /* The control corresponding to the v4l2_ext_control ID field. */ 38 /* The control corresponding to the v4l2_ext_control ID field. */
31 struct v4l2_ctrl *ctrl; 39 struct v4l2_ctrl *ctrl;
32 /* Used internally to mark whether this control was already 40 /* v4l2_ext_control index of the next control belonging to the
33 processed. */ 41 same cluster, or 0 if there isn't any. */
34 bool handled; 42 u32 next;
35}; 43};
36 44
45/* Small helper function to determine if the autocluster is set to manual
46 mode. In that case the is_volatile flag should be ignored. */
47static bool is_cur_manual(const struct v4l2_ctrl *master)
48{
49 return master->is_auto && master->cur.val == master->manual_mode_value;
50}
51
52/* Same as above, but this checks the against the new value instead of the
53 current value. */
54static bool is_new_manual(const struct v4l2_ctrl *master)
55{
56 return master->is_auto && master->val == master->manual_mode_value;
57}
58
37/* Returns NULL or a character pointer array containing the menu for 59/* Returns NULL or a character pointer array containing the menu for
38 the given control ID. The pointer array ends with a NULL pointer. 60 the given control ID. The pointer array ends with a NULL pointer.
39 An empty string signifies a menu entry that is invalid. This allows 61 An empty string signifies a menu entry that is invalid. This allows
@@ -181,7 +203,7 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
181 }; 203 };
182 static const char * const mpeg_stream_vbi_fmt[] = { 204 static const char * const mpeg_stream_vbi_fmt[] = {
183 "No VBI", 205 "No VBI",
184 "Private packet, IVTV format", 206 "Private Packet, IVTV Format",
185 NULL 207 NULL
186 }; 208 };
187 static const char * const camera_power_line_frequency[] = { 209 static const char * const camera_power_line_frequency[] = {
@@ -204,18 +226,130 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
204 "Negative", 226 "Negative",
205 "Emboss", 227 "Emboss",
206 "Sketch", 228 "Sketch",
207 "Sky blue", 229 "Sky Blue",
208 "Grass green", 230 "Grass Green",
209 "Skin whiten", 231 "Skin Whiten",
210 "Vivid", 232 "Vivid",
211 NULL 233 NULL
212 }; 234 };
213 static const char * const tune_preemphasis[] = { 235 static const char * const tune_preemphasis[] = {
214 "No preemphasis", 236 "No Preemphasis",
215 "50 useconds", 237 "50 useconds",
216 "75 useconds", 238 "75 useconds",
217 NULL, 239 NULL,
218 }; 240 };
241 static const char * const header_mode[] = {
242 "Separate Buffer",
243 "Joined With 1st Frame",
244 NULL,
245 };
246 static const char * const multi_slice[] = {
247 "Single",
248 "Max Macroblocks",
249 "Max Bytes",
250 NULL,
251 };
252 static const char * const entropy_mode[] = {
253 "CAVLC",
254 "CABAC",
255 NULL,
256 };
257 static const char * const mpeg_h264_level[] = {
258 "1",
259 "1b",
260 "1.1",
261 "1.2",
262 "1.3",
263 "2",
264 "2.1",
265 "2.2",
266 "3",
267 "3.1",
268 "3.2",
269 "4",
270 "4.1",
271 "4.2",
272 "5",
273 "5.1",
274 NULL,
275 };
276 static const char * const h264_loop_filter[] = {
277 "Enabled",
278 "Disabled",
279 "Disabled at Slice Boundary",
280 NULL,
281 };
282 static const char * const h264_profile[] = {
283 "Baseline",
284 "Constrained Baseline",
285 "Main",
286 "Extended",
287 "High",
288 "High 10",
289 "High 422",
290 "High 444 Predictive",
291 "High 10 Intra",
292 "High 422 Intra",
293 "High 444 Intra",
294 "CAVLC 444 Intra",
295 "Scalable Baseline",
296 "Scalable High",
297 "Scalable High Intra",
298 "Multiview High",
299 NULL,
300 };
301 static const char * const vui_sar_idc[] = {
302 "Unspecified",
303 "1:1",
304 "12:11",
305 "10:11",
306 "16:11",
307 "40:33",
308 "24:11",
309 "20:11",
310 "32:11",
311 "80:33",
312 "18:11",
313 "15:11",
314 "64:33",
315 "160:99",
316 "4:3",
317 "3:2",
318 "2:1",
319 "Extended SAR",
320 NULL,
321 };
322 static const char * const mpeg_mpeg4_level[] = {
323 "0",
324 "0b",
325 "1",
326 "2",
327 "3",
328 "3b",
329 "4",
330 "5",
331 NULL,
332 };
333 static const char * const mpeg4_profile[] = {
334 "Simple",
335 "Adcanved Simple",
336 "Core",
337 "Simple Scalable",
338 "Advanced Coding Efficency",
339 NULL,
340 };
341
342 static const char * const flash_led_mode[] = {
343 "Off",
344 "Flash",
345 "Torch",
346 NULL,
347 };
348 static const char * const flash_strobe_source[] = {
349 "Software",
350 "External",
351 NULL,
352 };
219 353
220 switch (id) { 354 switch (id) {
221 case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: 355 case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
@@ -256,6 +390,28 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
256 return colorfx; 390 return colorfx;
257 case V4L2_CID_TUNE_PREEMPHASIS: 391 case V4L2_CID_TUNE_PREEMPHASIS:
258 return tune_preemphasis; 392 return tune_preemphasis;
393 case V4L2_CID_FLASH_LED_MODE:
394 return flash_led_mode;
395 case V4L2_CID_FLASH_STROBE_SOURCE:
396 return flash_strobe_source;
397 case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
398 return header_mode;
399 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
400 return multi_slice;
401 case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
402 return entropy_mode;
403 case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
404 return mpeg_h264_level;
405 case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
406 return h264_loop_filter;
407 case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
408 return h264_profile;
409 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
410 return vui_sar_idc;
411 case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
412 return mpeg_mpeg4_level;
413 case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
414 return mpeg4_profile;
259 default: 415 default:
260 return NULL; 416 return NULL;
261 } 417 }
@@ -307,6 +463,8 @@ const char *v4l2_ctrl_get_name(u32 id)
307 case V4L2_CID_CHROMA_GAIN: return "Chroma Gain"; 463 case V4L2_CID_CHROMA_GAIN: return "Chroma Gain";
308 case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1"; 464 case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1";
309 case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2"; 465 case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2";
466 case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Minimum Number of Capture Buffers";
467 case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Minimum Number of Output Buffers";
310 468
311 /* MPEG controls */ 469 /* MPEG controls */
312 /* Keep the order of the 'case's the same as in videodev2.h! */ 470 /* Keep the order of the 'case's the same as in videodev2.h! */
@@ -343,6 +501,48 @@ const char *v4l2_ctrl_get_name(u32 id)
343 case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return "Video Temporal Decimation"; 501 case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return "Video Temporal Decimation";
344 case V4L2_CID_MPEG_VIDEO_MUTE: return "Video Mute"; 502 case V4L2_CID_MPEG_VIDEO_MUTE: return "Video Mute";
345 case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV"; 503 case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV";
504 case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface";
505 case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable";
506 case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "The Number of Intra Refresh MBs";
507 case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable";
508 case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control";
509 case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode";
510 case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "The Max Number of Reference Picture";
511 case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value";
512 case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P frame QP Value";
513 case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B frame QP Value";
514 case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: return "H263 Minimum QP Value";
515 case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: return "H263 Maximum QP Value";
516 case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "H264 I-Frame QP Value";
517 case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P frame QP Value";
518 case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B frame QP Value";
519 case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: return "H264 Maximum QP Value";
520 case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: return "H264 Minimum QP Value";
521 case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: return "H264 8x8 Transform Enable";
522 case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: return "H264 CPB Buffer Size";
523 case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entorpy Mode";
524 case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I Period";
525 case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level";
526 case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: return "H264 Loop Filter Alpha Offset";
527 case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: return "H264 Loop Filter Beta Offset";
528 case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: return "H264 Loop Filter Mode";
529 case V4L2_CID_MPEG_VIDEO_H264_PROFILE: return "H264 Profile";
530 case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT: return "Vertical Size of SAR";
531 case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH: return "Horizontal Size of SAR";
532 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable";
533 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC";
534 case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value";
535 case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P frame QP Value";
536 case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B frame QP Value";
537 case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: return "MPEG4 Minimum QP Value";
538 case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: return "MPEG4 Maximum QP Value";
539 case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return "MPEG4 Level";
540 case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return "MPEG4 Profile";
541 case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: return "Quarter Pixel Search Enable";
542 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "The Maximum Bytes Per Slice";
543 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "The Number of MB in a Slice";
544 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "The Slice Partitioning Method";
545 case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size";
346 546
347 /* CAMERA controls */ 547 /* CAMERA controls */
348 /* Keep the order of the 'case's the same as in videodev2.h! */ 548 /* Keep the order of the 'case's the same as in videodev2.h! */
@@ -389,6 +589,21 @@ const char *v4l2_ctrl_get_name(u32 id)
389 case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level"; 589 case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level";
390 case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor"; 590 case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor";
391 591
592 /* Flash controls */
593 case V4L2_CID_FLASH_CLASS: return "Flash controls";
594 case V4L2_CID_FLASH_LED_MODE: return "LED mode";
595 case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe source";
596 case V4L2_CID_FLASH_STROBE: return "Strobe";
597 case V4L2_CID_FLASH_STROBE_STOP: return "Stop strobe";
598 case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe status";
599 case V4L2_CID_FLASH_TIMEOUT: return "Strobe timeout";
600 case V4L2_CID_FLASH_INTENSITY: return "Intensity, flash mode";
601 case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, torch mode";
602 case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, indicator";
603 case V4L2_CID_FLASH_FAULT: return "Faults";
604 case V4L2_CID_FLASH_CHARGE: return "Charge";
605 case V4L2_CID_FLASH_READY: return "Ready to strobe";
606
392 default: 607 default:
393 return NULL; 608 return NULL;
394 } 609 }
@@ -423,12 +638,24 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
423 case V4L2_CID_PILOT_TONE_ENABLED: 638 case V4L2_CID_PILOT_TONE_ENABLED:
424 case V4L2_CID_ILLUMINATORS_1: 639 case V4L2_CID_ILLUMINATORS_1:
425 case V4L2_CID_ILLUMINATORS_2: 640 case V4L2_CID_ILLUMINATORS_2:
641 case V4L2_CID_FLASH_STROBE_STATUS:
642 case V4L2_CID_FLASH_CHARGE:
643 case V4L2_CID_FLASH_READY:
644 case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
645 case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:
646 case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
647 case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:
648 case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
649 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
650 case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL:
426 *type = V4L2_CTRL_TYPE_BOOLEAN; 651 *type = V4L2_CTRL_TYPE_BOOLEAN;
427 *min = 0; 652 *min = 0;
428 *max = *step = 1; 653 *max = *step = 1;
429 break; 654 break;
430 case V4L2_CID_PAN_RESET: 655 case V4L2_CID_PAN_RESET:
431 case V4L2_CID_TILT_RESET: 656 case V4L2_CID_TILT_RESET:
657 case V4L2_CID_FLASH_STROBE:
658 case V4L2_CID_FLASH_STROBE_STOP:
432 *type = V4L2_CTRL_TYPE_BUTTON; 659 *type = V4L2_CTRL_TYPE_BUTTON;
433 *flags |= V4L2_CTRL_FLAG_WRITE_ONLY; 660 *flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
434 *min = *max = *step = *def = 0; 661 *min = *max = *step = *def = 0;
@@ -452,6 +679,17 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
452 case V4L2_CID_EXPOSURE_AUTO: 679 case V4L2_CID_EXPOSURE_AUTO:
453 case V4L2_CID_COLORFX: 680 case V4L2_CID_COLORFX:
454 case V4L2_CID_TUNE_PREEMPHASIS: 681 case V4L2_CID_TUNE_PREEMPHASIS:
682 case V4L2_CID_FLASH_LED_MODE:
683 case V4L2_CID_FLASH_STROBE_SOURCE:
684 case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
685 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
686 case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
687 case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
688 case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
689 case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
690 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
691 case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
692 case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
455 *type = V4L2_CTRL_TYPE_MENU; 693 *type = V4L2_CTRL_TYPE_MENU;
456 break; 694 break;
457 case V4L2_CID_RDS_TX_PS_NAME: 695 case V4L2_CID_RDS_TX_PS_NAME:
@@ -462,6 +700,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
462 case V4L2_CID_CAMERA_CLASS: 700 case V4L2_CID_CAMERA_CLASS:
463 case V4L2_CID_MPEG_CLASS: 701 case V4L2_CID_MPEG_CLASS:
464 case V4L2_CID_FM_TX_CLASS: 702 case V4L2_CID_FM_TX_CLASS:
703 case V4L2_CID_FLASH_CLASS:
465 *type = V4L2_CTRL_TYPE_CTRL_CLASS; 704 *type = V4L2_CTRL_TYPE_CTRL_CLASS;
466 /* You can neither read not write these */ 705 /* You can neither read not write these */
467 *flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY; 706 *flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY;
@@ -474,6 +713,14 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
474 /* Max is calculated as RGB888 that is 2^24 */ 713 /* Max is calculated as RGB888 that is 2^24 */
475 *max = 0xFFFFFF; 714 *max = 0xFFFFFF;
476 break; 715 break;
716 case V4L2_CID_FLASH_FAULT:
717 *type = V4L2_CTRL_TYPE_BITMASK;
718 break;
719 case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
720 case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
721 *type = V4L2_CTRL_TYPE_INTEGER;
722 *flags |= V4L2_CTRL_FLAG_READ_ONLY;
723 break;
477 default: 724 default:
478 *type = V4L2_CTRL_TYPE_INTEGER; 725 *type = V4L2_CTRL_TYPE_INTEGER;
479 break; 726 break;
@@ -519,6 +766,10 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
519 case V4L2_CID_ZOOM_RELATIVE: 766 case V4L2_CID_ZOOM_RELATIVE:
520 *flags |= V4L2_CTRL_FLAG_WRITE_ONLY; 767 *flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
521 break; 768 break;
769 case V4L2_CID_FLASH_STROBE_STATUS:
770 case V4L2_CID_FLASH_READY:
771 *flags |= V4L2_CTRL_FLAG_READ_ONLY;
772 break;
522 } 773 }
523} 774}
524EXPORT_SYMBOL(v4l2_ctrl_fill); 775EXPORT_SYMBOL(v4l2_ctrl_fill);
@@ -537,6 +788,42 @@ static bool type_is_int(const struct v4l2_ctrl *ctrl)
537 } 788 }
538} 789}
539 790
791static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
792{
793 memset(ev->reserved, 0, sizeof(ev->reserved));
794 ev->type = V4L2_EVENT_CTRL;
795 ev->id = ctrl->id;
796 ev->u.ctrl.changes = changes;
797 ev->u.ctrl.type = ctrl->type;
798 ev->u.ctrl.flags = ctrl->flags;
799 if (ctrl->type == V4L2_CTRL_TYPE_STRING)
800 ev->u.ctrl.value64 = 0;
801 else
802 ev->u.ctrl.value64 = ctrl->cur.val64;
803 ev->u.ctrl.minimum = ctrl->minimum;
804 ev->u.ctrl.maximum = ctrl->maximum;
805 if (ctrl->type == V4L2_CTRL_TYPE_MENU)
806 ev->u.ctrl.step = 1;
807 else
808 ev->u.ctrl.step = ctrl->step;
809 ev->u.ctrl.default_value = ctrl->default_value;
810}
811
812static void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes)
813{
814 struct v4l2_event ev;
815 struct v4l2_subscribed_event *sev;
816
817 if (list_empty(&ctrl->ev_subs))
818 return;
819 fill_event(&ev, ctrl, changes);
820
821 list_for_each_entry(sev, &ctrl->ev_subs, node)
822 if (sev->fh && (sev->fh != fh ||
823 (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK)))
824 v4l2_event_queue_fh(sev->fh, &ev);
825}
826
540/* Helper function: copy the current control value back to the caller */ 827/* Helper function: copy the current control value back to the caller */
541static int cur_to_user(struct v4l2_ext_control *c, 828static int cur_to_user(struct v4l2_ext_control *c,
542 struct v4l2_ctrl *ctrl) 829 struct v4l2_ctrl *ctrl)
@@ -624,22 +911,45 @@ static int new_to_user(struct v4l2_ext_control *c,
624} 911}
625 912
626/* Copy the new value to the current value. */ 913/* Copy the new value to the current value. */
627static void new_to_cur(struct v4l2_ctrl *ctrl) 914static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
915 bool update_inactive)
628{ 916{
917 bool changed = false;
918
629 if (ctrl == NULL) 919 if (ctrl == NULL)
630 return; 920 return;
631 switch (ctrl->type) { 921 switch (ctrl->type) {
922 case V4L2_CTRL_TYPE_BUTTON:
923 changed = true;
924 break;
632 case V4L2_CTRL_TYPE_STRING: 925 case V4L2_CTRL_TYPE_STRING:
633 /* strings are always 0-terminated */ 926 /* strings are always 0-terminated */
927 changed = strcmp(ctrl->string, ctrl->cur.string);
634 strcpy(ctrl->cur.string, ctrl->string); 928 strcpy(ctrl->cur.string, ctrl->string);
635 break; 929 break;
636 case V4L2_CTRL_TYPE_INTEGER64: 930 case V4L2_CTRL_TYPE_INTEGER64:
931 changed = ctrl->val64 != ctrl->cur.val64;
637 ctrl->cur.val64 = ctrl->val64; 932 ctrl->cur.val64 = ctrl->val64;
638 break; 933 break;
639 default: 934 default:
935 changed = ctrl->val != ctrl->cur.val;
640 ctrl->cur.val = ctrl->val; 936 ctrl->cur.val = ctrl->val;
641 break; 937 break;
642 } 938 }
939 if (update_inactive) {
940 ctrl->flags &= ~V4L2_CTRL_FLAG_INACTIVE;
941 if (!is_cur_manual(ctrl->cluster[0]))
942 ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
943 }
944 if (changed || update_inactive) {
945 /* If a control was changed that was not one of the controls
946 modified by the application, then send the event to all. */
947 if (!ctrl->is_new)
948 fh = NULL;
949 send_event(fh, ctrl,
950 (changed ? V4L2_EVENT_CTRL_CH_VALUE : 0) |
951 (update_inactive ? V4L2_EVENT_CTRL_CH_FLAGS : 0));
952 }
643} 953}
644 954
645/* Copy the current value to the new value */ 955/* Copy the current value to the new value */
@@ -692,13 +1002,11 @@ static int cluster_changed(struct v4l2_ctrl *master)
692 return diff; 1002 return diff;
693} 1003}
694 1004
695/* Validate a new control */ 1005/* Validate integer-type control */
696static int validate_new(struct v4l2_ctrl *ctrl) 1006static int validate_new_int(const struct v4l2_ctrl *ctrl, s32 *pval)
697{ 1007{
698 s32 val = ctrl->val; 1008 s32 val = *pval;
699 char *s = ctrl->string;
700 u32 offset; 1009 u32 offset;
701 size_t len;
702 1010
703 switch (ctrl->type) { 1011 switch (ctrl->type) {
704 case V4L2_CTRL_TYPE_INTEGER: 1012 case V4L2_CTRL_TYPE_INTEGER:
@@ -711,11 +1019,11 @@ static int validate_new(struct v4l2_ctrl *ctrl)
711 offset = val - ctrl->minimum; 1019 offset = val - ctrl->minimum;
712 offset = ctrl->step * (offset / ctrl->step); 1020 offset = ctrl->step * (offset / ctrl->step);
713 val = ctrl->minimum + offset; 1021 val = ctrl->minimum + offset;
714 ctrl->val = val; 1022 *pval = val;
715 return 0; 1023 return 0;
716 1024
717 case V4L2_CTRL_TYPE_BOOLEAN: 1025 case V4L2_CTRL_TYPE_BOOLEAN:
718 ctrl->val = !!ctrl->val; 1026 *pval = !!val;
719 return 0; 1027 return 0;
720 1028
721 case V4L2_CTRL_TYPE_MENU: 1029 case V4L2_CTRL_TYPE_MENU:
@@ -726,11 +1034,35 @@ static int validate_new(struct v4l2_ctrl *ctrl)
726 return -EINVAL; 1034 return -EINVAL;
727 return 0; 1035 return 0;
728 1036
1037 case V4L2_CTRL_TYPE_BITMASK:
1038 *pval &= ctrl->maximum;
1039 return 0;
1040
729 case V4L2_CTRL_TYPE_BUTTON: 1041 case V4L2_CTRL_TYPE_BUTTON:
730 case V4L2_CTRL_TYPE_CTRL_CLASS: 1042 case V4L2_CTRL_TYPE_CTRL_CLASS:
731 ctrl->val64 = 0; 1043 *pval = 0;
732 return 0; 1044 return 0;
733 1045
1046 default:
1047 return -EINVAL;
1048 }
1049}
1050
1051/* Validate a new control */
1052static int validate_new(const struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c)
1053{
1054 char *s = c->string;
1055 size_t len;
1056
1057 switch (ctrl->type) {
1058 case V4L2_CTRL_TYPE_INTEGER:
1059 case V4L2_CTRL_TYPE_BOOLEAN:
1060 case V4L2_CTRL_TYPE_MENU:
1061 case V4L2_CTRL_TYPE_BITMASK:
1062 case V4L2_CTRL_TYPE_BUTTON:
1063 case V4L2_CTRL_TYPE_CTRL_CLASS:
1064 return validate_new_int(ctrl, &c->value);
1065
734 case V4L2_CTRL_TYPE_INTEGER64: 1066 case V4L2_CTRL_TYPE_INTEGER64:
735 return 0; 1067 return 0;
736 1068
@@ -780,6 +1112,7 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
780{ 1112{
781 struct v4l2_ctrl_ref *ref, *next_ref; 1113 struct v4l2_ctrl_ref *ref, *next_ref;
782 struct v4l2_ctrl *ctrl, *next_ctrl; 1114 struct v4l2_ctrl *ctrl, *next_ctrl;
1115 struct v4l2_subscribed_event *sev, *next_sev;
783 1116
784 if (hdl == NULL || hdl->buckets == NULL) 1117 if (hdl == NULL || hdl->buckets == NULL)
785 return; 1118 return;
@@ -793,6 +1126,8 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
793 /* Free all controls owned by the handler */ 1126 /* Free all controls owned by the handler */
794 list_for_each_entry_safe(ctrl, next_ctrl, &hdl->ctrls, node) { 1127 list_for_each_entry_safe(ctrl, next_ctrl, &hdl->ctrls, node) {
795 list_del(&ctrl->node); 1128 list_del(&ctrl->node);
1129 list_for_each_entry_safe(sev, next_sev, &ctrl->ev_subs, node)
1130 list_del(&sev->node);
796 kfree(ctrl); 1131 kfree(ctrl);
797 } 1132 }
798 kfree(hdl->buckets); 1133 kfree(hdl->buckets);
@@ -962,13 +1297,17 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
962 1297
963 /* Sanity checks */ 1298 /* Sanity checks */
964 if (id == 0 || name == NULL || id >= V4L2_CID_PRIVATE_BASE || 1299 if (id == 0 || name == NULL || id >= V4L2_CID_PRIVATE_BASE ||
965 max < min ||
966 (type == V4L2_CTRL_TYPE_INTEGER && step == 0) || 1300 (type == V4L2_CTRL_TYPE_INTEGER && step == 0) ||
1301 (type == V4L2_CTRL_TYPE_BITMASK && max == 0) ||
967 (type == V4L2_CTRL_TYPE_MENU && qmenu == NULL) || 1302 (type == V4L2_CTRL_TYPE_MENU && qmenu == NULL) ||
968 (type == V4L2_CTRL_TYPE_STRING && max == 0)) { 1303 (type == V4L2_CTRL_TYPE_STRING && max == 0)) {
969 handler_set_err(hdl, -ERANGE); 1304 handler_set_err(hdl, -ERANGE);
970 return NULL; 1305 return NULL;
971 } 1306 }
1307 if (type != V4L2_CTRL_TYPE_BITMASK && max < min) {
1308 handler_set_err(hdl, -ERANGE);
1309 return NULL;
1310 }
972 if ((type == V4L2_CTRL_TYPE_INTEGER || 1311 if ((type == V4L2_CTRL_TYPE_INTEGER ||
973 type == V4L2_CTRL_TYPE_MENU || 1312 type == V4L2_CTRL_TYPE_MENU ||
974 type == V4L2_CTRL_TYPE_BOOLEAN) && 1313 type == V4L2_CTRL_TYPE_BOOLEAN) &&
@@ -976,6 +1315,10 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
976 handler_set_err(hdl, -ERANGE); 1315 handler_set_err(hdl, -ERANGE);
977 return NULL; 1316 return NULL;
978 } 1317 }
1318 if (type == V4L2_CTRL_TYPE_BITMASK && ((def & ~max) || min || step)) {
1319 handler_set_err(hdl, -ERANGE);
1320 return NULL;
1321 }
979 1322
980 if (type == V4L2_CTRL_TYPE_BUTTON) 1323 if (type == V4L2_CTRL_TYPE_BUTTON)
981 flags |= V4L2_CTRL_FLAG_WRITE_ONLY; 1324 flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
@@ -991,6 +1334,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
991 } 1334 }
992 1335
993 INIT_LIST_HEAD(&ctrl->node); 1336 INIT_LIST_HEAD(&ctrl->node);
1337 INIT_LIST_HEAD(&ctrl->ev_subs);
994 ctrl->handler = hdl; 1338 ctrl->handler = hdl;
995 ctrl->ops = ops; 1339 ctrl->ops = ops;
996 ctrl->id = id; 1340 ctrl->id = id;
@@ -1132,6 +1476,9 @@ int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
1132 /* Skip handler-private controls. */ 1476 /* Skip handler-private controls. */
1133 if (ctrl->is_private) 1477 if (ctrl->is_private)
1134 continue; 1478 continue;
1479 /* And control classes */
1480 if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
1481 continue;
1135 ret = handler_new_ref(hdl, ctrl); 1482 ret = handler_new_ref(hdl, ctrl);
1136 if (ret) 1483 if (ret)
1137 break; 1484 break;
@@ -1147,7 +1494,7 @@ void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls)
1147 int i; 1494 int i;
1148 1495
1149 /* The first control is the master control and it must not be NULL */ 1496 /* The first control is the master control and it must not be NULL */
1150 BUG_ON(controls[0] == NULL); 1497 BUG_ON(ncontrols == 0 || controls[0] == NULL);
1151 1498
1152 for (i = 0; i < ncontrols; i++) { 1499 for (i = 0; i < ncontrols; i++) {
1153 if (controls[i]) { 1500 if (controls[i]) {
@@ -1158,18 +1505,47 @@ void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls)
1158} 1505}
1159EXPORT_SYMBOL(v4l2_ctrl_cluster); 1506EXPORT_SYMBOL(v4l2_ctrl_cluster);
1160 1507
1508void v4l2_ctrl_auto_cluster(unsigned ncontrols, struct v4l2_ctrl **controls,
1509 u8 manual_val, bool set_volatile)
1510{
1511 struct v4l2_ctrl *master = controls[0];
1512 u32 flag;
1513 int i;
1514
1515 v4l2_ctrl_cluster(ncontrols, controls);
1516 WARN_ON(ncontrols <= 1);
1517 WARN_ON(manual_val < master->minimum || manual_val > master->maximum);
1518 master->is_auto = true;
1519 master->manual_mode_value = manual_val;
1520 master->flags |= V4L2_CTRL_FLAG_UPDATE;
1521 flag = is_cur_manual(master) ? 0 : V4L2_CTRL_FLAG_INACTIVE;
1522
1523 for (i = 1; i < ncontrols; i++)
1524 if (controls[i]) {
1525 controls[i]->is_volatile = set_volatile;
1526 controls[i]->flags |= flag;
1527 }
1528}
1529EXPORT_SYMBOL(v4l2_ctrl_auto_cluster);
1530
1161/* Activate/deactivate a control. */ 1531/* Activate/deactivate a control. */
1162void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active) 1532void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active)
1163{ 1533{
1534 /* invert since the actual flag is called 'inactive' */
1535 bool inactive = !active;
1536 bool old;
1537
1164 if (ctrl == NULL) 1538 if (ctrl == NULL)
1165 return; 1539 return;
1166 1540
1167 if (!active) 1541 if (inactive)
1168 /* set V4L2_CTRL_FLAG_INACTIVE */ 1542 /* set V4L2_CTRL_FLAG_INACTIVE */
1169 set_bit(4, &ctrl->flags); 1543 old = test_and_set_bit(4, &ctrl->flags);
1170 else 1544 else
1171 /* clear V4L2_CTRL_FLAG_INACTIVE */ 1545 /* clear V4L2_CTRL_FLAG_INACTIVE */
1172 clear_bit(4, &ctrl->flags); 1546 old = test_and_clear_bit(4, &ctrl->flags);
1547 if (old != inactive)
1548 send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS);
1173} 1549}
1174EXPORT_SYMBOL(v4l2_ctrl_activate); 1550EXPORT_SYMBOL(v4l2_ctrl_activate);
1175 1551
@@ -1181,15 +1557,21 @@ EXPORT_SYMBOL(v4l2_ctrl_activate);
1181 these controls. */ 1557 these controls. */
1182void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed) 1558void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed)
1183{ 1559{
1560 bool old;
1561
1184 if (ctrl == NULL) 1562 if (ctrl == NULL)
1185 return; 1563 return;
1186 1564
1565 v4l2_ctrl_lock(ctrl);
1187 if (grabbed) 1566 if (grabbed)
1188 /* set V4L2_CTRL_FLAG_GRABBED */ 1567 /* set V4L2_CTRL_FLAG_GRABBED */
1189 set_bit(1, &ctrl->flags); 1568 old = test_and_set_bit(1, &ctrl->flags);
1190 else 1569 else
1191 /* clear V4L2_CTRL_FLAG_GRABBED */ 1570 /* clear V4L2_CTRL_FLAG_GRABBED */
1192 clear_bit(1, &ctrl->flags); 1571 old = test_and_clear_bit(1, &ctrl->flags);
1572 if (old != grabbed)
1573 send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS);
1574 v4l2_ctrl_unlock(ctrl);
1193} 1575}
1194EXPORT_SYMBOL(v4l2_ctrl_grab); 1576EXPORT_SYMBOL(v4l2_ctrl_grab);
1195 1577
@@ -1217,6 +1599,9 @@ static void log_ctrl(const struct v4l2_ctrl *ctrl,
1217 case V4L2_CTRL_TYPE_MENU: 1599 case V4L2_CTRL_TYPE_MENU:
1218 printk(KERN_CONT "%s", ctrl->qmenu[ctrl->cur.val]); 1600 printk(KERN_CONT "%s", ctrl->qmenu[ctrl->cur.val]);
1219 break; 1601 break;
1602 case V4L2_CTRL_TYPE_BITMASK:
1603 printk(KERN_CONT "0x%08x", ctrl->cur.val);
1604 break;
1220 case V4L2_CTRL_TYPE_INTEGER64: 1605 case V4L2_CTRL_TYPE_INTEGER64:
1221 printk(KERN_CONT "%lld", ctrl->cur.val64); 1606 printk(KERN_CONT "%lld", ctrl->cur.val64);
1222 break; 1607 break;
@@ -1277,26 +1662,21 @@ int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
1277 int i; 1662 int i;
1278 1663
1279 /* Skip if this control was already handled by a cluster. */ 1664 /* Skip if this control was already handled by a cluster. */
1280 if (ctrl->done) 1665 /* Skip button controls and read-only controls. */
1666 if (ctrl->done || ctrl->type == V4L2_CTRL_TYPE_BUTTON ||
1667 (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY))
1281 continue; 1668 continue;
1282 1669
1283 for (i = 0; i < master->ncontrols; i++) { 1670 for (i = 0; i < master->ncontrols; i++) {
1284 if (master->cluster[i]) { 1671 if (master->cluster[i]) {
1285 cur_to_new(master->cluster[i]); 1672 cur_to_new(master->cluster[i]);
1286 master->cluster[i]->is_new = 1; 1673 master->cluster[i]->is_new = 1;
1674 master->cluster[i]->done = true;
1287 } 1675 }
1288 } 1676 }
1289 1677 ret = call_op(master, s_ctrl);
1290 /* Skip button controls and read-only controls. */
1291 if (ctrl->type == V4L2_CTRL_TYPE_BUTTON ||
1292 (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY))
1293 continue;
1294 ret = master->ops->s_ctrl(master);
1295 if (ret) 1678 if (ret)
1296 break; 1679 break;
1297 for (i = 0; i < master->ncontrols; i++)
1298 if (master->cluster[i])
1299 master->cluster[i]->done = true;
1300 } 1680 }
1301 mutex_unlock(&hdl->lock); 1681 mutex_unlock(&hdl->lock);
1302 return ret; 1682 return ret;
@@ -1447,18 +1827,19 @@ EXPORT_SYMBOL(v4l2_subdev_querymenu);
1447 Find the controls in the control array and do some basic checks. */ 1827 Find the controls in the control array and do some basic checks. */
1448static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl, 1828static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
1449 struct v4l2_ext_controls *cs, 1829 struct v4l2_ext_controls *cs,
1450 struct ctrl_helper *helpers, 1830 struct v4l2_ctrl_helper *helpers)
1451 bool try)
1452{ 1831{
1832 struct v4l2_ctrl_helper *h;
1833 bool have_clusters = false;
1453 u32 i; 1834 u32 i;
1454 1835
1455 for (i = 0; i < cs->count; i++) { 1836 for (i = 0, h = helpers; i < cs->count; i++, h++) {
1456 struct v4l2_ext_control *c = &cs->controls[i]; 1837 struct v4l2_ext_control *c = &cs->controls[i];
1838 struct v4l2_ctrl_ref *ref;
1457 struct v4l2_ctrl *ctrl; 1839 struct v4l2_ctrl *ctrl;
1458 u32 id = c->id & V4L2_CTRL_ID_MASK; 1840 u32 id = c->id & V4L2_CTRL_ID_MASK;
1459 1841
1460 if (try) 1842 cs->error_idx = i;
1461 cs->error_idx = i;
1462 1843
1463 if (cs->ctrl_class && V4L2_CTRL_ID2CLASS(id) != cs->ctrl_class) 1844 if (cs->ctrl_class && V4L2_CTRL_ID2CLASS(id) != cs->ctrl_class)
1464 return -EINVAL; 1845 return -EINVAL;
@@ -1467,53 +1848,59 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
1467 extended controls */ 1848 extended controls */
1468 if (id >= V4L2_CID_PRIVATE_BASE) 1849 if (id >= V4L2_CID_PRIVATE_BASE)
1469 return -EINVAL; 1850 return -EINVAL;
1470 ctrl = v4l2_ctrl_find(hdl, id); 1851 ref = find_ref_lock(hdl, id);
1471 if (ctrl == NULL) 1852 if (ref == NULL)
1472 return -EINVAL; 1853 return -EINVAL;
1854 ctrl = ref->ctrl;
1473 if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED) 1855 if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED)
1474 return -EINVAL; 1856 return -EINVAL;
1475 1857
1476 helpers[i].ctrl = ctrl; 1858 if (ctrl->cluster[0]->ncontrols > 1)
1477 helpers[i].handled = false; 1859 have_clusters = true;
1860 if (ctrl->cluster[0] != ctrl)
1861 ref = find_ref_lock(hdl, ctrl->cluster[0]->id);
1862 /* Store the ref to the master control of the cluster */
1863 h->mref = ref;
1864 h->ctrl = ctrl;
1865 /* Initially set next to 0, meaning that there is no other
1866 control in this helper array belonging to the same
1867 cluster */
1868 h->next = 0;
1478 } 1869 }
1479 return 0;
1480}
1481 1870
1482typedef int (*cluster_func)(struct v4l2_ext_control *c, 1871 /* We are done if there were no controls that belong to a multi-
1483 struct v4l2_ctrl *ctrl); 1872 control cluster. */
1873 if (!have_clusters)
1874 return 0;
1484 1875
1485/* Walk over all controls in v4l2_ext_controls belonging to the same cluster 1876 /* The code below figures out in O(n) time which controls in the list
1486 and call the provided function. */ 1877 belong to the same cluster. */
1487static int cluster_walk(unsigned from,
1488 struct v4l2_ext_controls *cs,
1489 struct ctrl_helper *helpers,
1490 cluster_func f)
1491{
1492 struct v4l2_ctrl **cluster = helpers[from].ctrl->cluster;
1493 int ret = 0;
1494 int i;
1495 1878
1496 /* Find any controls from the same cluster and call the function */ 1879 /* This has to be done with the handler lock taken. */
1497 for (i = from; !ret && i < cs->count; i++) { 1880 mutex_lock(&hdl->lock);
1498 struct v4l2_ctrl *ctrl = helpers[i].ctrl;
1499 1881
1500 if (!helpers[i].handled && ctrl->cluster == cluster) 1882 /* First zero the helper field in the master control references */
1501 ret = f(&cs->controls[i], ctrl); 1883 for (i = 0; i < cs->count; i++)
1884 helpers[i].mref->helper = 0;
1885 for (i = 0, h = helpers; i < cs->count; i++, h++) {
1886 struct v4l2_ctrl_ref *mref = h->mref;
1887
1888 /* If the mref->helper is set, then it points to an earlier
1889 helper that belongs to the same cluster. */
1890 if (mref->helper) {
1891 /* Set the next field of mref->helper to the current
1892 index: this means that that earlier helper now
1893 points to the next helper in the same cluster. */
1894 mref->helper->next = i;
1895 /* mref should be set only for the first helper in the
1896 cluster, clear the others. */
1897 h->mref = NULL;
1898 }
1899 /* Point the mref helper to the current helper struct. */
1900 mref->helper = h;
1502 } 1901 }
1503 return ret; 1902 mutex_unlock(&hdl->lock);
1504} 1903 return 0;
1505
1506static void cluster_done(unsigned from,
1507 struct v4l2_ext_controls *cs,
1508 struct ctrl_helper *helpers)
1509{
1510 struct v4l2_ctrl **cluster = helpers[from].ctrl->cluster;
1511 int i;
1512
1513 /* Find any controls from the same cluster and mark them as handled */
1514 for (i = from; i < cs->count; i++)
1515 if (helpers[i].ctrl->cluster == cluster)
1516 helpers[i].handled = true;
1517} 1904}
1518 1905
1519/* Handles the corner case where cs->count == 0. It checks whether the 1906/* Handles the corner case where cs->count == 0. It checks whether the
@@ -1531,10 +1918,10 @@ static int class_check(struct v4l2_ctrl_handler *hdl, u32 ctrl_class)
1531/* Get extended controls. Allocates the helpers array if needed. */ 1918/* Get extended controls. Allocates the helpers array if needed. */
1532int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs) 1919int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs)
1533{ 1920{
1534 struct ctrl_helper helper[4]; 1921 struct v4l2_ctrl_helper helper[4];
1535 struct ctrl_helper *helpers = helper; 1922 struct v4l2_ctrl_helper *helpers = helper;
1536 int ret; 1923 int ret;
1537 int i; 1924 int i, j;
1538 1925
1539 cs->error_idx = cs->count; 1926 cs->error_idx = cs->count;
1540 cs->ctrl_class = V4L2_CTRL_ID2CLASS(cs->ctrl_class); 1927 cs->ctrl_class = V4L2_CTRL_ID2CLASS(cs->ctrl_class);
@@ -1551,30 +1938,46 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs
1551 return -ENOMEM; 1938 return -ENOMEM;
1552 } 1939 }
1553 1940
1554 ret = prepare_ext_ctrls(hdl, cs, helpers, false); 1941 ret = prepare_ext_ctrls(hdl, cs, helpers);
1942 cs->error_idx = cs->count;
1555 1943
1556 for (i = 0; !ret && i < cs->count; i++) 1944 for (i = 0; !ret && i < cs->count; i++)
1557 if (helpers[i].ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY) 1945 if (helpers[i].ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
1558 ret = -EACCES; 1946 ret = -EACCES;
1559 1947
1560 for (i = 0; !ret && i < cs->count; i++) { 1948 for (i = 0; !ret && i < cs->count; i++) {
1561 struct v4l2_ctrl *ctrl = helpers[i].ctrl; 1949 int (*ctrl_to_user)(struct v4l2_ext_control *c,
1562 struct v4l2_ctrl *master = ctrl->cluster[0]; 1950 struct v4l2_ctrl *ctrl) = cur_to_user;
1951 struct v4l2_ctrl *master;
1563 1952
1564 if (helpers[i].handled) 1953 if (helpers[i].mref == NULL)
1565 continue; 1954 continue;
1566 1955
1956 master = helpers[i].mref->ctrl;
1567 cs->error_idx = i; 1957 cs->error_idx = i;
1568 1958
1569 v4l2_ctrl_lock(master); 1959 v4l2_ctrl_lock(master);
1570 /* g_volatile_ctrl will update the current control values */ 1960
1571 if (ctrl->is_volatile && master->ops->g_volatile_ctrl) 1961 /* g_volatile_ctrl will update the new control values */
1572 ret = master->ops->g_volatile_ctrl(master); 1962 if (has_op(master, g_volatile_ctrl) && !is_cur_manual(master)) {
1573 /* If OK, then copy the current control values to the caller */ 1963 for (j = 0; j < master->ncontrols; j++)
1574 if (!ret) 1964 cur_to_new(master->cluster[j]);
1575 ret = cluster_walk(i, cs, helpers, cur_to_user); 1965 ret = call_op(master, g_volatile_ctrl);
1966 ctrl_to_user = new_to_user;
1967 }
1968 /* If OK, then copy the current (for non-volatile controls)
1969 or the new (for volatile controls) control values to the
1970 caller */
1971 if (!ret) {
1972 u32 idx = i;
1973
1974 do {
1975 ret = ctrl_to_user(cs->controls + idx,
1976 helpers[idx].ctrl);
1977 idx = helpers[idx].next;
1978 } while (!ret && idx);
1979 }
1576 v4l2_ctrl_unlock(master); 1980 v4l2_ctrl_unlock(master);
1577 cluster_done(i, cs, helpers);
1578 } 1981 }
1579 1982
1580 if (cs->count > ARRAY_SIZE(helper)) 1983 if (cs->count > ARRAY_SIZE(helper))
@@ -1594,15 +1997,21 @@ static int get_ctrl(struct v4l2_ctrl *ctrl, s32 *val)
1594{ 1997{
1595 struct v4l2_ctrl *master = ctrl->cluster[0]; 1998 struct v4l2_ctrl *master = ctrl->cluster[0];
1596 int ret = 0; 1999 int ret = 0;
2000 int i;
1597 2001
1598 if (ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY) 2002 if (ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
1599 return -EACCES; 2003 return -EACCES;
1600 2004
1601 v4l2_ctrl_lock(master); 2005 v4l2_ctrl_lock(master);
1602 /* g_volatile_ctrl will update the current control values */ 2006 /* g_volatile_ctrl will update the current control values */
1603 if (ctrl->is_volatile && master->ops->g_volatile_ctrl) 2007 if (ctrl->is_volatile && !is_cur_manual(master)) {
1604 ret = master->ops->g_volatile_ctrl(master); 2008 for (i = 0; i < master->ncontrols; i++)
1605 *val = ctrl->cur.val; 2009 cur_to_new(master->cluster[i]);
2010 ret = call_op(master, g_volatile_ctrl);
2011 *val = ctrl->val;
2012 } else {
2013 *val = ctrl->cur.val;
2014 }
1606 v4l2_ctrl_unlock(master); 2015 v4l2_ctrl_unlock(master);
1607 return ret; 2016 return ret;
1608} 2017}
@@ -1638,72 +2047,61 @@ EXPORT_SYMBOL(v4l2_ctrl_g_ctrl);
1638/* Core function that calls try/s_ctrl and ensures that the new value is 2047/* Core function that calls try/s_ctrl and ensures that the new value is
1639 copied to the current value on a set. 2048 copied to the current value on a set.
1640 Must be called with ctrl->handler->lock held. */ 2049 Must be called with ctrl->handler->lock held. */
1641static int try_or_set_control_cluster(struct v4l2_ctrl *master, bool set) 2050static int try_or_set_cluster(struct v4l2_fh *fh,
2051 struct v4l2_ctrl *master, bool set)
1642{ 2052{
1643 bool try = !set; 2053 bool update_flag;
1644 int ret = 0; 2054 int ret;
1645 int i; 2055 int i;
1646 2056
1647 /* Go through the cluster and either validate the new value or 2057 /* Go through the cluster and either validate the new value or
1648 (if no new value was set), copy the current value to the new 2058 (if no new value was set), copy the current value to the new
1649 value, ensuring a consistent view for the control ops when 2059 value, ensuring a consistent view for the control ops when
1650 called. */ 2060 called. */
1651 for (i = 0; !ret && i < master->ncontrols; i++) { 2061 for (i = 0; i < master->ncontrols; i++) {
1652 struct v4l2_ctrl *ctrl = master->cluster[i]; 2062 struct v4l2_ctrl *ctrl = master->cluster[i];
1653 2063
1654 if (ctrl == NULL) 2064 if (ctrl == NULL)
1655 continue; 2065 continue;
1656 2066
1657 if (ctrl->is_new) { 2067 if (!ctrl->is_new) {
1658 /* Double check this: it may have changed since the 2068 cur_to_new(ctrl);
1659 last check in try_or_set_ext_ctrls(). */
1660 if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
1661 return -EBUSY;
1662
1663 /* Validate if required */
1664 if (!set)
1665 ret = validate_new(ctrl);
1666 continue; 2069 continue;
1667 } 2070 }
1668 /* No new value was set, so copy the current and force 2071 /* Check again: it may have changed since the
1669 a call to try_ctrl later, since the values for the cluster 2072 previous check in try_or_set_ext_ctrls(). */
1670 may now have changed and the end result might be invalid. */ 2073 if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
1671 try = true; 2074 return -EBUSY;
1672 cur_to_new(ctrl);
1673 } 2075 }
1674 2076
1675 /* For larger clusters you have to call try_ctrl again to 2077 ret = call_op(master, try_ctrl);
1676 verify that the controls are still valid after the
1677 'cur_to_new' above. */
1678 if (!ret && master->ops->try_ctrl && try)
1679 ret = master->ops->try_ctrl(master);
1680 2078
1681 /* Don't set if there is no change */ 2079 /* Don't set if there is no change */
1682 if (!ret && set && cluster_changed(master)) { 2080 if (ret || !set || !cluster_changed(master))
1683 ret = master->ops->s_ctrl(master); 2081 return ret;
1684 /* If OK, then make the new values permanent. */ 2082 ret = call_op(master, s_ctrl);
1685 if (!ret) 2083 if (ret)
1686 for (i = 0; i < master->ncontrols; i++) 2084 return ret;
1687 new_to_cur(master->cluster[i]); 2085
1688 } 2086 /* If OK, then make the new values permanent. */
1689 return ret; 2087 update_flag = is_cur_manual(master) != is_new_manual(master);
2088 for (i = 0; i < master->ncontrols; i++)
2089 new_to_cur(fh, master->cluster[i], update_flag && i > 0);
2090 return 0;
1690} 2091}
1691 2092
1692/* Try or set controls. */ 2093/* Validate controls. */
1693static int try_or_set_ext_ctrls(struct v4l2_ctrl_handler *hdl, 2094static int validate_ctrls(struct v4l2_ext_controls *cs,
1694 struct v4l2_ext_controls *cs, 2095 struct v4l2_ctrl_helper *helpers, bool set)
1695 struct ctrl_helper *helpers,
1696 bool set)
1697{ 2096{
1698 unsigned i, j; 2097 unsigned i;
1699 int ret = 0; 2098 int ret = 0;
1700 2099
1701 cs->error_idx = cs->count; 2100 cs->error_idx = cs->count;
1702 for (i = 0; i < cs->count; i++) { 2101 for (i = 0; i < cs->count; i++) {
1703 struct v4l2_ctrl *ctrl = helpers[i].ctrl; 2102 struct v4l2_ctrl *ctrl = helpers[i].ctrl;
1704 2103
1705 if (!set) 2104 cs->error_idx = i;
1706 cs->error_idx = i;
1707 2105
1708 if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY) 2106 if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
1709 return -EACCES; 2107 return -EACCES;
@@ -1715,50 +2113,22 @@ static int try_or_set_ext_ctrls(struct v4l2_ctrl_handler *hdl,
1715 best-effort to avoid that. */ 2113 best-effort to avoid that. */
1716 if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)) 2114 if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
1717 return -EBUSY; 2115 return -EBUSY;
2116 ret = validate_new(ctrl, &cs->controls[i]);
2117 if (ret)
2118 return ret;
1718 } 2119 }
1719 2120 return 0;
1720 for (i = 0; !ret && i < cs->count; i++) {
1721 struct v4l2_ctrl *ctrl = helpers[i].ctrl;
1722 struct v4l2_ctrl *master = ctrl->cluster[0];
1723
1724 cs->error_idx = i;
1725
1726 if (helpers[i].handled)
1727 continue;
1728
1729 v4l2_ctrl_lock(ctrl);
1730
1731 /* Reset the 'is_new' flags of the cluster */
1732 for (j = 0; j < master->ncontrols; j++)
1733 if (master->cluster[j])
1734 master->cluster[j]->is_new = 0;
1735
1736 /* Copy the new caller-supplied control values.
1737 user_to_new() sets 'is_new' to 1. */
1738 ret = cluster_walk(i, cs, helpers, user_to_new);
1739
1740 if (!ret)
1741 ret = try_or_set_control_cluster(master, set);
1742
1743 /* Copy the new values back to userspace. */
1744 if (!ret)
1745 ret = cluster_walk(i, cs, helpers, new_to_user);
1746
1747 v4l2_ctrl_unlock(ctrl);
1748 cluster_done(i, cs, helpers);
1749 }
1750 return ret;
1751} 2121}
1752 2122
1753/* Try or try-and-set controls */ 2123/* Try or try-and-set controls */
1754static int try_set_ext_ctrls(struct v4l2_ctrl_handler *hdl, 2124static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
1755 struct v4l2_ext_controls *cs, 2125 struct v4l2_ext_controls *cs,
1756 bool set) 2126 bool set)
1757{ 2127{
1758 struct ctrl_helper helper[4]; 2128 struct v4l2_ctrl_helper helper[4];
1759 struct ctrl_helper *helpers = helper; 2129 struct v4l2_ctrl_helper *helpers = helper;
2130 unsigned i, j;
1760 int ret; 2131 int ret;
1761 int i;
1762 2132
1763 cs->error_idx = cs->count; 2133 cs->error_idx = cs->count;
1764 cs->ctrl_class = V4L2_CTRL_ID2CLASS(cs->ctrl_class); 2134 cs->ctrl_class = V4L2_CTRL_ID2CLASS(cs->ctrl_class);
@@ -1774,25 +2144,49 @@ static int try_set_ext_ctrls(struct v4l2_ctrl_handler *hdl,
1774 if (!helpers) 2144 if (!helpers)
1775 return -ENOMEM; 2145 return -ENOMEM;
1776 } 2146 }
1777 ret = prepare_ext_ctrls(hdl, cs, helpers, !set); 2147 ret = prepare_ext_ctrls(hdl, cs, helpers);
1778 if (ret) 2148 if (!ret)
1779 goto free; 2149 ret = validate_ctrls(cs, helpers, set);
1780 2150 if (ret && set)
1781 /* First 'try' all controls and abort on error */
1782 ret = try_or_set_ext_ctrls(hdl, cs, helpers, false);
1783 /* If this is a 'set' operation and the initial 'try' failed,
1784 then set error_idx to count to tell the application that no
1785 controls changed value yet. */
1786 if (set)
1787 cs->error_idx = cs->count; 2151 cs->error_idx = cs->count;
1788 if (!ret && set) { 2152 for (i = 0; !ret && i < cs->count; i++) {
1789 /* Reset 'handled' state */ 2153 struct v4l2_ctrl *master;
1790 for (i = 0; i < cs->count; i++) 2154 u32 idx = i;
1791 helpers[i].handled = false; 2155
1792 ret = try_or_set_ext_ctrls(hdl, cs, helpers, true); 2156 if (helpers[i].mref == NULL)
2157 continue;
2158
2159 cs->error_idx = i;
2160 master = helpers[i].mref->ctrl;
2161 v4l2_ctrl_lock(master);
2162
2163 /* Reset the 'is_new' flags of the cluster */
2164 for (j = 0; j < master->ncontrols; j++)
2165 if (master->cluster[j])
2166 master->cluster[j]->is_new = 0;
2167
2168 /* Copy the new caller-supplied control values.
2169 user_to_new() sets 'is_new' to 1. */
2170 do {
2171 ret = user_to_new(cs->controls + idx, helpers[idx].ctrl);
2172 idx = helpers[idx].next;
2173 } while (!ret && idx);
2174
2175 if (!ret)
2176 ret = try_or_set_cluster(fh, master, set);
2177
2178 /* Copy the new values back to userspace. */
2179 if (!ret) {
2180 idx = i;
2181 do {
2182 ret = new_to_user(cs->controls + idx,
2183 helpers[idx].ctrl);
2184 idx = helpers[idx].next;
2185 } while (!ret && idx);
2186 }
2187 v4l2_ctrl_unlock(master);
1793 } 2188 }
1794 2189
1795free:
1796 if (cs->count > ARRAY_SIZE(helper)) 2190 if (cs->count > ARRAY_SIZE(helper))
1797 kfree(helpers); 2191 kfree(helpers);
1798 return ret; 2192 return ret;
@@ -1800,37 +2194,39 @@ free:
1800 2194
1801int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs) 2195int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs)
1802{ 2196{
1803 return try_set_ext_ctrls(hdl, cs, false); 2197 return try_set_ext_ctrls(NULL, hdl, cs, false);
1804} 2198}
1805EXPORT_SYMBOL(v4l2_try_ext_ctrls); 2199EXPORT_SYMBOL(v4l2_try_ext_ctrls);
1806 2200
1807int v4l2_s_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs) 2201int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
2202 struct v4l2_ext_controls *cs)
1808{ 2203{
1809 return try_set_ext_ctrls(hdl, cs, true); 2204 return try_set_ext_ctrls(fh, hdl, cs, true);
1810} 2205}
1811EXPORT_SYMBOL(v4l2_s_ext_ctrls); 2206EXPORT_SYMBOL(v4l2_s_ext_ctrls);
1812 2207
1813int v4l2_subdev_try_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs) 2208int v4l2_subdev_try_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs)
1814{ 2209{
1815 return try_set_ext_ctrls(sd->ctrl_handler, cs, false); 2210 return try_set_ext_ctrls(NULL, sd->ctrl_handler, cs, false);
1816} 2211}
1817EXPORT_SYMBOL(v4l2_subdev_try_ext_ctrls); 2212EXPORT_SYMBOL(v4l2_subdev_try_ext_ctrls);
1818 2213
1819int v4l2_subdev_s_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs) 2214int v4l2_subdev_s_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs)
1820{ 2215{
1821 return try_set_ext_ctrls(sd->ctrl_handler, cs, true); 2216 return try_set_ext_ctrls(NULL, sd->ctrl_handler, cs, true);
1822} 2217}
1823EXPORT_SYMBOL(v4l2_subdev_s_ext_ctrls); 2218EXPORT_SYMBOL(v4l2_subdev_s_ext_ctrls);
1824 2219
1825/* Helper function for VIDIOC_S_CTRL compatibility */ 2220/* Helper function for VIDIOC_S_CTRL compatibility */
1826static int set_ctrl(struct v4l2_ctrl *ctrl, s32 *val) 2221static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, s32 *val)
1827{ 2222{
1828 struct v4l2_ctrl *master = ctrl->cluster[0]; 2223 struct v4l2_ctrl *master = ctrl->cluster[0];
1829 int ret; 2224 int ret;
1830 int i; 2225 int i;
1831 2226
1832 if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY) 2227 ret = validate_new_int(ctrl, val);
1833 return -EACCES; 2228 if (ret)
2229 return ret;
1834 2230
1835 v4l2_ctrl_lock(ctrl); 2231 v4l2_ctrl_lock(ctrl);
1836 2232
@@ -1841,28 +2237,30 @@ static int set_ctrl(struct v4l2_ctrl *ctrl, s32 *val)
1841 2237
1842 ctrl->val = *val; 2238 ctrl->val = *val;
1843 ctrl->is_new = 1; 2239 ctrl->is_new = 1;
1844 ret = try_or_set_control_cluster(master, false); 2240 ret = try_or_set_cluster(fh, master, true);
1845 if (!ret)
1846 ret = try_or_set_control_cluster(master, true);
1847 *val = ctrl->cur.val; 2241 *val = ctrl->cur.val;
1848 v4l2_ctrl_unlock(ctrl); 2242 v4l2_ctrl_unlock(ctrl);
1849 return ret; 2243 return ret;
1850} 2244}
1851 2245
1852int v4l2_s_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *control) 2246int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
2247 struct v4l2_control *control)
1853{ 2248{
1854 struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id); 2249 struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id);
1855 2250
1856 if (ctrl == NULL || !type_is_int(ctrl)) 2251 if (ctrl == NULL || !type_is_int(ctrl))
1857 return -EINVAL; 2252 return -EINVAL;
1858 2253
1859 return set_ctrl(ctrl, &control->value); 2254 if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
2255 return -EACCES;
2256
2257 return set_ctrl(fh, ctrl, &control->value);
1860} 2258}
1861EXPORT_SYMBOL(v4l2_s_ctrl); 2259EXPORT_SYMBOL(v4l2_s_ctrl);
1862 2260
1863int v4l2_subdev_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *control) 2261int v4l2_subdev_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *control)
1864{ 2262{
1865 return v4l2_s_ctrl(sd->ctrl_handler, control); 2263 return v4l2_s_ctrl(NULL, sd->ctrl_handler, control);
1866} 2264}
1867EXPORT_SYMBOL(v4l2_subdev_s_ctrl); 2265EXPORT_SYMBOL(v4l2_subdev_s_ctrl);
1868 2266
@@ -1870,6 +2268,34 @@ int v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
1870{ 2268{
1871 /* It's a driver bug if this happens. */ 2269 /* It's a driver bug if this happens. */
1872 WARN_ON(!type_is_int(ctrl)); 2270 WARN_ON(!type_is_int(ctrl));
1873 return set_ctrl(ctrl, &val); 2271 return set_ctrl(NULL, ctrl, &val);
1874} 2272}
1875EXPORT_SYMBOL(v4l2_ctrl_s_ctrl); 2273EXPORT_SYMBOL(v4l2_ctrl_s_ctrl);
2274
2275void v4l2_ctrl_add_event(struct v4l2_ctrl *ctrl,
2276 struct v4l2_subscribed_event *sev)
2277{
2278 v4l2_ctrl_lock(ctrl);
2279 list_add_tail(&sev->node, &ctrl->ev_subs);
2280 if (ctrl->type != V4L2_CTRL_TYPE_CTRL_CLASS &&
2281 (sev->flags & V4L2_EVENT_SUB_FL_SEND_INITIAL)) {
2282 struct v4l2_event ev;
2283 u32 changes = V4L2_EVENT_CTRL_CH_FLAGS;
2284
2285 if (!(ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY))
2286 changes |= V4L2_EVENT_CTRL_CH_VALUE;
2287 fill_event(&ev, ctrl, changes);
2288 v4l2_event_queue_fh(sev->fh, &ev);
2289 }
2290 v4l2_ctrl_unlock(ctrl);
2291}
2292EXPORT_SYMBOL(v4l2_ctrl_add_event);
2293
2294void v4l2_ctrl_del_event(struct v4l2_ctrl *ctrl,
2295 struct v4l2_subscribed_event *sev)
2296{
2297 v4l2_ctrl_lock(ctrl);
2298 list_del(&sev->node);
2299 v4l2_ctrl_unlock(ctrl);
2300}
2301EXPORT_SYMBOL(v4l2_ctrl_del_event);
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
index 4aae501f02d0..c72856c41434 100644
--- a/drivers/media/video/v4l2-device.c
+++ b/drivers/media/video/v4l2-device.c
@@ -209,6 +209,7 @@ int v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
209 vdev->v4l2_dev = v4l2_dev; 209 vdev->v4l2_dev = v4l2_dev;
210 vdev->fops = &v4l2_subdev_fops; 210 vdev->fops = &v4l2_subdev_fops;
211 vdev->release = video_device_release_empty; 211 vdev->release = video_device_release_empty;
212 vdev->ctrl_handler = sd->ctrl_handler;
212 err = __video_register_device(vdev, VFL_TYPE_SUBDEV, -1, 1, 213 err = __video_register_device(vdev, VFL_TYPE_SUBDEV, -1, 1,
213 sd->owner); 214 sd->owner);
214 if (err < 0) 215 if (err < 0)
diff --git a/drivers/media/video/v4l2-event.c b/drivers/media/video/v4l2-event.c
index 69fd343d4774..53b190cf225e 100644
--- a/drivers/media/video/v4l2-event.c
+++ b/drivers/media/video/v4l2-event.c
@@ -25,100 +25,39 @@
25#include <media/v4l2-dev.h> 25#include <media/v4l2-dev.h>
26#include <media/v4l2-fh.h> 26#include <media/v4l2-fh.h>
27#include <media/v4l2-event.h> 27#include <media/v4l2-event.h>
28#include <media/v4l2-ctrls.h>
28 29
29#include <linux/sched.h> 30#include <linux/sched.h>
30#include <linux/slab.h> 31#include <linux/slab.h>
31 32
32int v4l2_event_init(struct v4l2_fh *fh) 33static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
33{ 34{
34 fh->events = kzalloc(sizeof(*fh->events), GFP_KERNEL); 35 idx += sev->first;
35 if (fh->events == NULL) 36 return idx >= sev->elems ? idx - sev->elems : idx;
36 return -ENOMEM;
37
38 init_waitqueue_head(&fh->events->wait);
39
40 INIT_LIST_HEAD(&fh->events->free);
41 INIT_LIST_HEAD(&fh->events->available);
42 INIT_LIST_HEAD(&fh->events->subscribed);
43
44 fh->events->sequence = -1;
45
46 return 0;
47}
48EXPORT_SYMBOL_GPL(v4l2_event_init);
49
50int v4l2_event_alloc(struct v4l2_fh *fh, unsigned int n)
51{
52 struct v4l2_events *events = fh->events;
53 unsigned long flags;
54
55 if (!events) {
56 WARN_ON(1);
57 return -ENOMEM;
58 }
59
60 while (events->nallocated < n) {
61 struct v4l2_kevent *kev;
62
63 kev = kzalloc(sizeof(*kev), GFP_KERNEL);
64 if (kev == NULL)
65 return -ENOMEM;
66
67 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
68 list_add_tail(&kev->list, &events->free);
69 events->nallocated++;
70 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
71 }
72
73 return 0;
74}
75EXPORT_SYMBOL_GPL(v4l2_event_alloc);
76
77#define list_kfree(list, type, member) \
78 while (!list_empty(list)) { \
79 type *hi; \
80 hi = list_first_entry(list, type, member); \
81 list_del(&hi->member); \
82 kfree(hi); \
83 }
84
85void v4l2_event_free(struct v4l2_fh *fh)
86{
87 struct v4l2_events *events = fh->events;
88
89 if (!events)
90 return;
91
92 list_kfree(&events->free, struct v4l2_kevent, list);
93 list_kfree(&events->available, struct v4l2_kevent, list);
94 list_kfree(&events->subscribed, struct v4l2_subscribed_event, list);
95
96 kfree(events);
97 fh->events = NULL;
98} 37}
99EXPORT_SYMBOL_GPL(v4l2_event_free);
100 38
101static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event) 39static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
102{ 40{
103 struct v4l2_events *events = fh->events;
104 struct v4l2_kevent *kev; 41 struct v4l2_kevent *kev;
105 unsigned long flags; 42 unsigned long flags;
106 43
107 spin_lock_irqsave(&fh->vdev->fh_lock, flags); 44 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
108 45
109 if (list_empty(&events->available)) { 46 if (list_empty(&fh->available)) {
110 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); 47 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
111 return -ENOENT; 48 return -ENOENT;
112 } 49 }
113 50
114 WARN_ON(events->navailable == 0); 51 WARN_ON(fh->navailable == 0);
115 52
116 kev = list_first_entry(&events->available, struct v4l2_kevent, list); 53 kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
117 list_move(&kev->list, &events->free); 54 list_del(&kev->list);
118 events->navailable--; 55 fh->navailable--;
119 56
120 kev->event.pending = events->navailable; 57 kev->event.pending = fh->navailable;
121 *event = kev->event; 58 *event = kev->event;
59 kev->sev->first = sev_pos(kev->sev, 1);
60 kev->sev->in_use--;
122 61
123 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); 62 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
124 63
@@ -128,7 +67,6 @@ static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
128int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event, 67int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
129 int nonblocking) 68 int nonblocking)
130{ 69{
131 struct v4l2_events *events = fh->events;
132 int ret; 70 int ret;
133 71
134 if (nonblocking) 72 if (nonblocking)
@@ -139,8 +77,8 @@ int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
139 mutex_unlock(fh->vdev->lock); 77 mutex_unlock(fh->vdev->lock);
140 78
141 do { 79 do {
142 ret = wait_event_interruptible(events->wait, 80 ret = wait_event_interruptible(fh->wait,
143 events->navailable != 0); 81 fh->navailable != 0);
144 if (ret < 0) 82 if (ret < 0)
145 break; 83 break;
146 84
@@ -154,23 +92,72 @@ int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
154} 92}
155EXPORT_SYMBOL_GPL(v4l2_event_dequeue); 93EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
156 94
157/* Caller must hold fh->event->lock! */ 95/* Caller must hold fh->vdev->fh_lock! */
158static struct v4l2_subscribed_event *v4l2_event_subscribed( 96static struct v4l2_subscribed_event *v4l2_event_subscribed(
159 struct v4l2_fh *fh, u32 type) 97 struct v4l2_fh *fh, u32 type, u32 id)
160{ 98{
161 struct v4l2_events *events = fh->events;
162 struct v4l2_subscribed_event *sev; 99 struct v4l2_subscribed_event *sev;
163 100
164 assert_spin_locked(&fh->vdev->fh_lock); 101 assert_spin_locked(&fh->vdev->fh_lock);
165 102
166 list_for_each_entry(sev, &events->subscribed, list) { 103 list_for_each_entry(sev, &fh->subscribed, list)
167 if (sev->type == type) 104 if (sev->type == type && sev->id == id)
168 return sev; 105 return sev;
169 }
170 106
171 return NULL; 107 return NULL;
172} 108}
173 109
110static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
111 const struct timespec *ts)
112{
113 struct v4l2_subscribed_event *sev;
114 struct v4l2_kevent *kev;
115 bool copy_payload = true;
116
117 /* Are we subscribed? */
118 sev = v4l2_event_subscribed(fh, ev->type, ev->id);
119 if (sev == NULL)
120 return;
121
122 /* Increase event sequence number on fh. */
123 fh->sequence++;
124
125 /* Do we have any free events? */
126 if (sev->in_use == sev->elems) {
127 /* no, remove the oldest one */
128 kev = sev->events + sev_pos(sev, 0);
129 list_del(&kev->list);
130 sev->in_use--;
131 sev->first = sev_pos(sev, 1);
132 fh->navailable--;
133 if (sev->elems == 1) {
134 if (sev->replace) {
135 sev->replace(&kev->event, ev);
136 copy_payload = false;
137 }
138 } else if (sev->merge) {
139 struct v4l2_kevent *second_oldest =
140 sev->events + sev_pos(sev, 0);
141 sev->merge(&kev->event, &second_oldest->event);
142 }
143 }
144
145 /* Take one and fill it. */
146 kev = sev->events + sev_pos(sev, sev->in_use);
147 kev->event.type = ev->type;
148 if (copy_payload)
149 kev->event.u = ev->u;
150 kev->event.id = ev->id;
151 kev->event.timestamp = *ts;
152 kev->event.sequence = fh->sequence;
153 sev->in_use++;
154 list_add_tail(&kev->list, &fh->available);
155
156 fh->navailable++;
157
158 wake_up_all(&fh->wait);
159}
160
174void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev) 161void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
175{ 162{
176 struct v4l2_fh *fh; 163 struct v4l2_fh *fh;
@@ -181,81 +168,95 @@ void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
181 168
182 spin_lock_irqsave(&vdev->fh_lock, flags); 169 spin_lock_irqsave(&vdev->fh_lock, flags);
183 170
184 list_for_each_entry(fh, &vdev->fh_list, list) { 171 list_for_each_entry(fh, &vdev->fh_list, list)
185 struct v4l2_events *events = fh->events; 172 __v4l2_event_queue_fh(fh, ev, &timestamp);
186 struct v4l2_kevent *kev;
187 173
188 /* Are we subscribed? */ 174 spin_unlock_irqrestore(&vdev->fh_lock, flags);
189 if (!v4l2_event_subscribed(fh, ev->type)) 175}
190 continue; 176EXPORT_SYMBOL_GPL(v4l2_event_queue);
191 177
192 /* Increase event sequence number on fh. */ 178void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
193 events->sequence++; 179{
180 unsigned long flags;
181 struct timespec timestamp;
194 182
195 /* Do we have any free events? */ 183 ktime_get_ts(&timestamp);
196 if (list_empty(&events->free))
197 continue;
198 184
199 /* Take one and fill it. */ 185 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
200 kev = list_first_entry(&events->free, struct v4l2_kevent, list); 186 __v4l2_event_queue_fh(fh, ev, &timestamp);
201 kev->event.type = ev->type; 187 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
202 kev->event.u = ev->u; 188}
203 kev->event.timestamp = timestamp; 189EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
204 kev->event.sequence = events->sequence;
205 list_move_tail(&kev->list, &events->available);
206 190
207 events->navailable++; 191int v4l2_event_pending(struct v4l2_fh *fh)
192{
193 return fh->navailable;
194}
195EXPORT_SYMBOL_GPL(v4l2_event_pending);
208 196
209 wake_up_all(&events->wait); 197static void ctrls_replace(struct v4l2_event *old, const struct v4l2_event *new)
210 } 198{
199 u32 old_changes = old->u.ctrl.changes;
211 200
212 spin_unlock_irqrestore(&vdev->fh_lock, flags); 201 old->u.ctrl = new->u.ctrl;
202 old->u.ctrl.changes |= old_changes;
213} 203}
214EXPORT_SYMBOL_GPL(v4l2_event_queue);
215 204
216int v4l2_event_pending(struct v4l2_fh *fh) 205static void ctrls_merge(const struct v4l2_event *old, struct v4l2_event *new)
217{ 206{
218 return fh->events->navailable; 207 new->u.ctrl.changes |= old->u.ctrl.changes;
219} 208}
220EXPORT_SYMBOL_GPL(v4l2_event_pending);
221 209
222int v4l2_event_subscribe(struct v4l2_fh *fh, 210int v4l2_event_subscribe(struct v4l2_fh *fh,
223 struct v4l2_event_subscription *sub) 211 struct v4l2_event_subscription *sub, unsigned elems)
224{ 212{
225 struct v4l2_events *events = fh->events; 213 struct v4l2_subscribed_event *sev, *found_ev;
226 struct v4l2_subscribed_event *sev; 214 struct v4l2_ctrl *ctrl = NULL;
227 unsigned long flags; 215 unsigned long flags;
228 216 unsigned i;
229 if (fh->events == NULL) { 217
230 WARN_ON(1); 218 if (elems < 1)
231 return -ENOMEM; 219 elems = 1;
220 if (sub->type == V4L2_EVENT_CTRL) {
221 ctrl = v4l2_ctrl_find(fh->ctrl_handler, sub->id);
222 if (ctrl == NULL)
223 return -EINVAL;
232 } 224 }
233 225
234 sev = kmalloc(sizeof(*sev), GFP_KERNEL); 226 sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL);
235 if (!sev) 227 if (!sev)
236 return -ENOMEM; 228 return -ENOMEM;
237 229 for (i = 0; i < elems; i++)
238 spin_lock_irqsave(&fh->vdev->fh_lock, flags); 230 sev->events[i].sev = sev;
239 231 sev->type = sub->type;
240 if (v4l2_event_subscribed(fh, sub->type) == NULL) { 232 sev->id = sub->id;
241 INIT_LIST_HEAD(&sev->list); 233 sev->flags = sub->flags;
242 sev->type = sub->type; 234 sev->fh = fh;
243 235 sev->elems = elems;
244 list_add(&sev->list, &events->subscribed); 236 if (ctrl) {
245 sev = NULL; 237 sev->replace = ctrls_replace;
238 sev->merge = ctrls_merge;
246 } 239 }
247 240
241 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
242 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
243 if (!found_ev)
244 list_add(&sev->list, &fh->subscribed);
248 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); 245 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
249 246
250 kfree(sev); 247 /* v4l2_ctrl_add_event uses a mutex, so do this outside the spin lock */
248 if (found_ev)
249 kfree(sev);
250 else if (ctrl)
251 v4l2_ctrl_add_event(ctrl, sev);
251 252
252 return 0; 253 return 0;
253} 254}
254EXPORT_SYMBOL_GPL(v4l2_event_subscribe); 255EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
255 256
256static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh) 257void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
257{ 258{
258 struct v4l2_events *events = fh->events; 259 struct v4l2_event_subscription sub;
259 struct v4l2_subscribed_event *sev; 260 struct v4l2_subscribed_event *sev;
260 unsigned long flags; 261 unsigned long flags;
261 262
@@ -263,15 +264,18 @@ static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
263 sev = NULL; 264 sev = NULL;
264 265
265 spin_lock_irqsave(&fh->vdev->fh_lock, flags); 266 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
266 if (!list_empty(&events->subscribed)) { 267 if (!list_empty(&fh->subscribed)) {
267 sev = list_first_entry(&events->subscribed, 268 sev = list_first_entry(&fh->subscribed,
268 struct v4l2_subscribed_event, list); 269 struct v4l2_subscribed_event, list);
269 list_del(&sev->list); 270 sub.type = sev->type;
271 sub.id = sev->id;
270 } 272 }
271 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); 273 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
272 kfree(sev); 274 if (sev)
275 v4l2_event_unsubscribe(fh, &sub);
273 } while (sev); 276 } while (sev);
274} 277}
278EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
275 279
276int v4l2_event_unsubscribe(struct v4l2_fh *fh, 280int v4l2_event_unsubscribe(struct v4l2_fh *fh,
277 struct v4l2_event_subscription *sub) 281 struct v4l2_event_subscription *sub)
@@ -286,11 +290,19 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
286 290
287 spin_lock_irqsave(&fh->vdev->fh_lock, flags); 291 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
288 292
289 sev = v4l2_event_subscribed(fh, sub->type); 293 sev = v4l2_event_subscribed(fh, sub->type, sub->id);
290 if (sev != NULL) 294 if (sev != NULL) {
291 list_del(&sev->list); 295 list_del(&sev->list);
296 sev->fh = NULL;
297 }
292 298
293 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); 299 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
300 if (sev && sev->type == V4L2_EVENT_CTRL) {
301 struct v4l2_ctrl *ctrl = v4l2_ctrl_find(fh->ctrl_handler, sev->id);
302
303 if (ctrl)
304 v4l2_ctrl_del_event(ctrl, sev);
305 }
294 306
295 kfree(sev); 307 kfree(sev);
296 308
diff --git a/drivers/media/video/v4l2-fh.c b/drivers/media/video/v4l2-fh.c
index 717f71e6370e..122822d2b8b2 100644
--- a/drivers/media/video/v4l2-fh.c
+++ b/drivers/media/video/v4l2-fh.c
@@ -29,23 +29,18 @@
29#include <media/v4l2-event.h> 29#include <media/v4l2-event.h>
30#include <media/v4l2-ioctl.h> 30#include <media/v4l2-ioctl.h>
31 31
32int v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev) 32void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
33{ 33{
34 fh->vdev = vdev; 34 fh->vdev = vdev;
35 /* Inherit from video_device. May be overridden by the driver. */
36 fh->ctrl_handler = vdev->ctrl_handler;
35 INIT_LIST_HEAD(&fh->list); 37 INIT_LIST_HEAD(&fh->list);
36 set_bit(V4L2_FL_USES_V4L2_FH, &fh->vdev->flags); 38 set_bit(V4L2_FL_USES_V4L2_FH, &fh->vdev->flags);
37 fh->prio = V4L2_PRIORITY_UNSET; 39 fh->prio = V4L2_PRIORITY_UNSET;
38 40 init_waitqueue_head(&fh->wait);
39 /* 41 INIT_LIST_HEAD(&fh->available);
40 * fh->events only needs to be initialized if the driver 42 INIT_LIST_HEAD(&fh->subscribed);
41 * supports the VIDIOC_SUBSCRIBE_EVENT ioctl. 43 fh->sequence = -1;
42 */
43 if (vdev->ioctl_ops && vdev->ioctl_ops->vidioc_subscribe_event)
44 return v4l2_event_init(fh);
45
46 fh->events = NULL;
47
48 return 0;
49} 44}
50EXPORT_SYMBOL_GPL(v4l2_fh_init); 45EXPORT_SYMBOL_GPL(v4l2_fh_init);
51 46
@@ -91,10 +86,8 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
91{ 86{
92 if (fh->vdev == NULL) 87 if (fh->vdev == NULL)
93 return; 88 return;
94 89 v4l2_event_unsubscribe_all(fh);
95 fh->vdev = NULL; 90 fh->vdev = NULL;
96
97 v4l2_event_free(fh);
98} 91}
99EXPORT_SYMBOL_GPL(v4l2_fh_exit); 92EXPORT_SYMBOL_GPL(v4l2_fh_exit);
100 93
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 69e8c6ffcc49..002ce1363443 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -16,6 +16,7 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/version.h>
19 20
20#include <linux/videodev2.h> 21#include <linux/videodev2.h>
21 22
@@ -542,12 +543,12 @@ static long __video_do_ioctl(struct file *file,
542 struct v4l2_fh *vfh = NULL; 543 struct v4l2_fh *vfh = NULL;
543 struct v4l2_format f_copy; 544 struct v4l2_format f_copy;
544 int use_fh_prio = 0; 545 int use_fh_prio = 0;
545 long ret = -EINVAL; 546 long ret = -ENOTTY;
546 547
547 if (ops == NULL) { 548 if (ops == NULL) {
548 printk(KERN_WARNING "videodev: \"%s\" has no ioctl_ops.\n", 549 printk(KERN_WARNING "videodev: \"%s\" has no ioctl_ops.\n",
549 vfd->name); 550 vfd->name);
550 return -EINVAL; 551 return ret;
551 } 552 }
552 553
553 if ((vfd->debug & V4L2_DEBUG_IOCTL) && 554 if ((vfd->debug & V4L2_DEBUG_IOCTL) &&
@@ -605,6 +606,7 @@ static long __video_do_ioctl(struct file *file,
605 if (!ops->vidioc_querycap) 606 if (!ops->vidioc_querycap)
606 break; 607 break;
607 608
609 cap->version = LINUX_VERSION_CODE;
608 ret = ops->vidioc_querycap(file, fh, cap); 610 ret = ops->vidioc_querycap(file, fh, cap);
609 if (!ret) 611 if (!ret)
610 dbgarg(cmd, "driver=%s, card=%s, bus=%s, " 612 dbgarg(cmd, "driver=%s, card=%s, bus=%s, "
@@ -1418,7 +1420,9 @@ static long __video_do_ioctl(struct file *file,
1418 { 1420 {
1419 struct v4l2_queryctrl *p = arg; 1421 struct v4l2_queryctrl *p = arg;
1420 1422
1421 if (vfd->ctrl_handler) 1423 if (vfh && vfh->ctrl_handler)
1424 ret = v4l2_queryctrl(vfh->ctrl_handler, p);
1425 else if (vfd->ctrl_handler)
1422 ret = v4l2_queryctrl(vfd->ctrl_handler, p); 1426 ret = v4l2_queryctrl(vfd->ctrl_handler, p);
1423 else if (ops->vidioc_queryctrl) 1427 else if (ops->vidioc_queryctrl)
1424 ret = ops->vidioc_queryctrl(file, fh, p); 1428 ret = ops->vidioc_queryctrl(file, fh, p);
@@ -1438,7 +1442,9 @@ static long __video_do_ioctl(struct file *file,
1438 { 1442 {
1439 struct v4l2_control *p = arg; 1443 struct v4l2_control *p = arg;
1440 1444
1441 if (vfd->ctrl_handler) 1445 if (vfh && vfh->ctrl_handler)
1446 ret = v4l2_g_ctrl(vfh->ctrl_handler, p);
1447 else if (vfd->ctrl_handler)
1442 ret = v4l2_g_ctrl(vfd->ctrl_handler, p); 1448 ret = v4l2_g_ctrl(vfd->ctrl_handler, p);
1443 else if (ops->vidioc_g_ctrl) 1449 else if (ops->vidioc_g_ctrl)
1444 ret = ops->vidioc_g_ctrl(file, fh, p); 1450 ret = ops->vidioc_g_ctrl(file, fh, p);
@@ -1470,14 +1476,18 @@ static long __video_do_ioctl(struct file *file,
1470 struct v4l2_ext_controls ctrls; 1476 struct v4l2_ext_controls ctrls;
1471 struct v4l2_ext_control ctrl; 1477 struct v4l2_ext_control ctrl;
1472 1478
1473 if (!vfd->ctrl_handler && 1479 if (!(vfh && vfh->ctrl_handler) && !vfd->ctrl_handler &&
1474 !ops->vidioc_s_ctrl && !ops->vidioc_s_ext_ctrls) 1480 !ops->vidioc_s_ctrl && !ops->vidioc_s_ext_ctrls)
1475 break; 1481 break;
1476 1482
1477 dbgarg(cmd, "id=0x%x, value=%d\n", p->id, p->value); 1483 dbgarg(cmd, "id=0x%x, value=%d\n", p->id, p->value);
1478 1484
1485 if (vfh && vfh->ctrl_handler) {
1486 ret = v4l2_s_ctrl(vfh, vfh->ctrl_handler, p);
1487 break;
1488 }
1479 if (vfd->ctrl_handler) { 1489 if (vfd->ctrl_handler) {
1480 ret = v4l2_s_ctrl(vfd->ctrl_handler, p); 1490 ret = v4l2_s_ctrl(NULL, vfd->ctrl_handler, p);
1481 break; 1491 break;
1482 } 1492 }
1483 if (ops->vidioc_s_ctrl) { 1493 if (ops->vidioc_s_ctrl) {
@@ -1501,7 +1511,9 @@ static long __video_do_ioctl(struct file *file,
1501 struct v4l2_ext_controls *p = arg; 1511 struct v4l2_ext_controls *p = arg;
1502 1512
1503 p->error_idx = p->count; 1513 p->error_idx = p->count;
1504 if (vfd->ctrl_handler) 1514 if (vfh && vfh->ctrl_handler)
1515 ret = v4l2_g_ext_ctrls(vfh->ctrl_handler, p);
1516 else if (vfd->ctrl_handler)
1505 ret = v4l2_g_ext_ctrls(vfd->ctrl_handler, p); 1517 ret = v4l2_g_ext_ctrls(vfd->ctrl_handler, p);
1506 else if (ops->vidioc_g_ext_ctrls && check_ext_ctrls(p, 0)) 1518 else if (ops->vidioc_g_ext_ctrls && check_ext_ctrls(p, 0))
1507 ret = ops->vidioc_g_ext_ctrls(file, fh, p); 1519 ret = ops->vidioc_g_ext_ctrls(file, fh, p);
@@ -1515,11 +1527,14 @@ static long __video_do_ioctl(struct file *file,
1515 struct v4l2_ext_controls *p = arg; 1527 struct v4l2_ext_controls *p = arg;
1516 1528
1517 p->error_idx = p->count; 1529 p->error_idx = p->count;
1518 if (!vfd->ctrl_handler && !ops->vidioc_s_ext_ctrls) 1530 if (!(vfh && vfh->ctrl_handler) && !vfd->ctrl_handler &&
1531 !ops->vidioc_s_ext_ctrls)
1519 break; 1532 break;
1520 v4l_print_ext_ctrls(cmd, vfd, p, 1); 1533 v4l_print_ext_ctrls(cmd, vfd, p, 1);
1521 if (vfd->ctrl_handler) 1534 if (vfh && vfh->ctrl_handler)
1522 ret = v4l2_s_ext_ctrls(vfd->ctrl_handler, p); 1535 ret = v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, p);
1536 else if (vfd->ctrl_handler)
1537 ret = v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler, p);
1523 else if (check_ext_ctrls(p, 0)) 1538 else if (check_ext_ctrls(p, 0))
1524 ret = ops->vidioc_s_ext_ctrls(file, fh, p); 1539 ret = ops->vidioc_s_ext_ctrls(file, fh, p);
1525 break; 1540 break;
@@ -1529,10 +1544,13 @@ static long __video_do_ioctl(struct file *file,
1529 struct v4l2_ext_controls *p = arg; 1544 struct v4l2_ext_controls *p = arg;
1530 1545
1531 p->error_idx = p->count; 1546 p->error_idx = p->count;
1532 if (!vfd->ctrl_handler && !ops->vidioc_try_ext_ctrls) 1547 if (!(vfh && vfh->ctrl_handler) && !vfd->ctrl_handler &&
1548 !ops->vidioc_try_ext_ctrls)
1533 break; 1549 break;
1534 v4l_print_ext_ctrls(cmd, vfd, p, 1); 1550 v4l_print_ext_ctrls(cmd, vfd, p, 1);
1535 if (vfd->ctrl_handler) 1551 if (vfh && vfh->ctrl_handler)
1552 ret = v4l2_try_ext_ctrls(vfh->ctrl_handler, p);
1553 else if (vfd->ctrl_handler)
1536 ret = v4l2_try_ext_ctrls(vfd->ctrl_handler, p); 1554 ret = v4l2_try_ext_ctrls(vfd->ctrl_handler, p);
1537 else if (check_ext_ctrls(p, 0)) 1555 else if (check_ext_ctrls(p, 0))
1538 ret = ops->vidioc_try_ext_ctrls(file, fh, p); 1556 ret = ops->vidioc_try_ext_ctrls(file, fh, p);
@@ -1542,7 +1560,9 @@ static long __video_do_ioctl(struct file *file,
1542 { 1560 {
1543 struct v4l2_querymenu *p = arg; 1561 struct v4l2_querymenu *p = arg;
1544 1562
1545 if (vfd->ctrl_handler) 1563 if (vfh && vfh->ctrl_handler)
1564 ret = v4l2_querymenu(vfh->ctrl_handler, p);
1565 else if (vfd->ctrl_handler)
1546 ret = v4l2_querymenu(vfd->ctrl_handler, p); 1566 ret = v4l2_querymenu(vfd->ctrl_handler, p);
1547 else if (ops->vidioc_querymenu) 1567 else if (ops->vidioc_querymenu)
1548 ret = ops->vidioc_querymenu(file, fh, p); 1568 ret = ops->vidioc_querymenu(file, fh, p);
@@ -2276,7 +2296,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
2276 break; 2296 break;
2277 } 2297 }
2278 *user_ptr = (void __user *)buf->m.planes; 2298 *user_ptr = (void __user *)buf->m.planes;
2279 *kernel_ptr = (void **)&buf->m.planes; 2299 *kernel_ptr = (void *)&buf->m.planes;
2280 *array_size = sizeof(struct v4l2_plane) * buf->length; 2300 *array_size = sizeof(struct v4l2_plane) * buf->length;
2281 ret = 1; 2301 ret = 1;
2282 } 2302 }
@@ -2290,7 +2310,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
2290 2310
2291 if (ctrls->count != 0) { 2311 if (ctrls->count != 0) {
2292 *user_ptr = (void __user *)ctrls->controls; 2312 *user_ptr = (void __user *)ctrls->controls;
2293 *kernel_ptr = (void **)&ctrls->controls; 2313 *kernel_ptr = (void *)&ctrls->controls;
2294 *array_size = sizeof(struct v4l2_ext_control) 2314 *array_size = sizeof(struct v4l2_ext_control)
2295 * ctrls->count; 2315 * ctrls->count;
2296 ret = 1; 2316 ret = 1;
diff --git a/drivers/media/video/v4l2-subdev.c b/drivers/media/video/v4l2-subdev.c
index 812729ebf09e..b7967c9dc4ae 100644
--- a/drivers/media/video/v4l2-subdev.c
+++ b/drivers/media/video/v4l2-subdev.c
@@ -75,20 +75,7 @@ static int subdev_open(struct file *file)
75 return ret; 75 return ret;
76 } 76 }
77 77
78 ret = v4l2_fh_init(&subdev_fh->vfh, vdev); 78 v4l2_fh_init(&subdev_fh->vfh, vdev);
79 if (ret)
80 goto err;
81
82 if (sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS) {
83 ret = v4l2_event_init(&subdev_fh->vfh);
84 if (ret)
85 goto err;
86
87 ret = v4l2_event_alloc(&subdev_fh->vfh, sd->nevents);
88 if (ret)
89 goto err;
90 }
91
92 v4l2_fh_add(&subdev_fh->vfh); 79 v4l2_fh_add(&subdev_fh->vfh);
93 file->private_data = &subdev_fh->vfh; 80 file->private_data = &subdev_fh->vfh;
94#if defined(CONFIG_MEDIA_CONTROLLER) 81#if defined(CONFIG_MEDIA_CONTROLLER)
@@ -155,25 +142,25 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
155 142
156 switch (cmd) { 143 switch (cmd) {
157 case VIDIOC_QUERYCTRL: 144 case VIDIOC_QUERYCTRL:
158 return v4l2_queryctrl(sd->ctrl_handler, arg); 145 return v4l2_queryctrl(vfh->ctrl_handler, arg);
159 146
160 case VIDIOC_QUERYMENU: 147 case VIDIOC_QUERYMENU:
161 return v4l2_querymenu(sd->ctrl_handler, arg); 148 return v4l2_querymenu(vfh->ctrl_handler, arg);
162 149
163 case VIDIOC_G_CTRL: 150 case VIDIOC_G_CTRL:
164 return v4l2_g_ctrl(sd->ctrl_handler, arg); 151 return v4l2_g_ctrl(vfh->ctrl_handler, arg);
165 152
166 case VIDIOC_S_CTRL: 153 case VIDIOC_S_CTRL:
167 return v4l2_s_ctrl(sd->ctrl_handler, arg); 154 return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg);
168 155
169 case VIDIOC_G_EXT_CTRLS: 156 case VIDIOC_G_EXT_CTRLS:
170 return v4l2_g_ext_ctrls(sd->ctrl_handler, arg); 157 return v4l2_g_ext_ctrls(vfh->ctrl_handler, arg);
171 158
172 case VIDIOC_S_EXT_CTRLS: 159 case VIDIOC_S_EXT_CTRLS:
173 return v4l2_s_ext_ctrls(sd->ctrl_handler, arg); 160 return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, arg);
174 161
175 case VIDIOC_TRY_EXT_CTRLS: 162 case VIDIOC_TRY_EXT_CTRLS:
176 return v4l2_try_ext_ctrls(sd->ctrl_handler, arg); 163 return v4l2_try_ext_ctrls(vfh->ctrl_handler, arg);
177 164
178 case VIDIOC_DQEVENT: 165 case VIDIOC_DQEVENT:
179 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) 166 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
@@ -297,7 +284,7 @@ static unsigned int subdev_poll(struct file *file, poll_table *wait)
297 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) 284 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
298 return POLLERR; 285 return POLLERR;
299 286
300 poll_wait(file, &fh->events->wait, wait); 287 poll_wait(file, &fh->wait, wait);
301 288
302 if (v4l2_event_pending(fh)) 289 if (v4l2_event_pending(fh))
303 return POLLPRI; 290 return POLLPRI;
diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
index ddb8f4b46c03..f300deafd268 100644
--- a/drivers/media/video/videobuf-dma-sg.c
+++ b/drivers/media/video/videobuf-dma-sg.c
@@ -108,8 +108,9 @@ static struct scatterlist *videobuf_pages_to_sg(struct page **pages,
108 if (PageHighMem(pages[0])) 108 if (PageHighMem(pages[0]))
109 /* DMA to highmem pages might not work */ 109 /* DMA to highmem pages might not work */
110 goto highmem; 110 goto highmem;
111 sg_set_page(&sglist[0], pages[0], PAGE_SIZE - offset, offset); 111 sg_set_page(&sglist[0], pages[0],
112 size -= PAGE_SIZE - offset; 112 min_t(size_t, PAGE_SIZE - offset, size), offset);
113 size -= min_t(size_t, PAGE_SIZE - offset, size);
113 for (i = 1; i < nr_pages; i++) { 114 for (i = 1; i < nr_pages; i++) {
114 if (NULL == pages[i]) 115 if (NULL == pages[i])
115 goto nopage; 116 goto nopage;
diff --git a/drivers/media/video/videobuf2-dma-sg.c b/drivers/media/video/videobuf2-dma-sg.c
index 10a20d9509d9..065f468faf8f 100644
--- a/drivers/media/video/videobuf2-dma-sg.c
+++ b/drivers/media/video/videobuf2-dma-sg.c
@@ -48,12 +48,10 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size)
48 buf->sg_desc.size = size; 48 buf->sg_desc.size = size;
49 buf->sg_desc.num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 49 buf->sg_desc.num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
50 50
51 buf->sg_desc.sglist = vmalloc(buf->sg_desc.num_pages * 51 buf->sg_desc.sglist = vzalloc(buf->sg_desc.num_pages *
52 sizeof(*buf->sg_desc.sglist)); 52 sizeof(*buf->sg_desc.sglist));
53 if (!buf->sg_desc.sglist) 53 if (!buf->sg_desc.sglist)
54 goto fail_sglist_alloc; 54 goto fail_sglist_alloc;
55 memset(buf->sg_desc.sglist, 0, buf->sg_desc.num_pages *
56 sizeof(*buf->sg_desc.sglist));
57 sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages); 55 sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
58 56
59 buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *), 57 buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
@@ -136,13 +134,11 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
136 last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT; 134 last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
137 buf->sg_desc.num_pages = last - first + 1; 135 buf->sg_desc.num_pages = last - first + 1;
138 136
139 buf->sg_desc.sglist = vmalloc( 137 buf->sg_desc.sglist = vzalloc(
140 buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist)); 138 buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
141 if (!buf->sg_desc.sglist) 139 if (!buf->sg_desc.sglist)
142 goto userptr_fail_sglist_alloc; 140 goto userptr_fail_sglist_alloc;
143 141
144 memset(buf->sg_desc.sglist, 0,
145 buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
146 sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages); 142 sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
147 143
148 buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *), 144 buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
diff --git a/drivers/media/video/videobuf2-memops.c b/drivers/media/video/videobuf2-memops.c
index b03c3aea5bea..569eeb3dfd50 100644
--- a/drivers/media/video/videobuf2-memops.c
+++ b/drivers/media/video/videobuf2-memops.c
@@ -176,7 +176,7 @@ int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr,
176 176
177 vma->vm_ops->open(vma); 177 vma->vm_ops->open(vma);
178 178
179 printk(KERN_DEBUG "%s: mapped paddr 0x%08lx at 0x%08lx, size %ld\n", 179 pr_debug("%s: mapped paddr 0x%08lx at 0x%08lx, size %ld\n",
180 __func__, paddr, vma->vm_start, size); 180 __func__, paddr, vma->vm_start, size);
181 181
182 return 0; 182 return 0;
@@ -194,7 +194,7 @@ static void vb2_common_vm_open(struct vm_area_struct *vma)
194{ 194{
195 struct vb2_vmarea_handler *h = vma->vm_private_data; 195 struct vb2_vmarea_handler *h = vma->vm_private_data;
196 196
197 printk(KERN_DEBUG "%s: %p, refcount: %d, vma: %08lx-%08lx\n", 197 pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
198 __func__, h, atomic_read(h->refcount), vma->vm_start, 198 __func__, h, atomic_read(h->refcount), vma->vm_start,
199 vma->vm_end); 199 vma->vm_end);
200 200
@@ -212,7 +212,7 @@ static void vb2_common_vm_close(struct vm_area_struct *vma)
212{ 212{
213 struct vb2_vmarea_handler *h = vma->vm_private_data; 213 struct vb2_vmarea_handler *h = vma->vm_private_data;
214 214
215 printk(KERN_DEBUG "%s: %p, refcount: %d, vma: %08lx-%08lx\n", 215 pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
216 __func__, h, atomic_read(h->refcount), vma->vm_start, 216 __func__, h, atomic_read(h->refcount), vma->vm_start,
217 vma->vm_end); 217 vma->vm_end);
218 218
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index d63e9d978493..52a0a3736c82 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -36,7 +36,6 @@
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/mm.h> 37#include <linux/mm.h>
38#include <linux/time.h> 38#include <linux/time.h>
39#include <linux/version.h>
40#include <linux/kmod.h> 39#include <linux/kmod.h>
41 40
42#include <linux/i2c.h> 41#include <linux/i2c.h>
@@ -61,8 +60,7 @@
61// #define VINO_DEBUG 60// #define VINO_DEBUG
62// #define VINO_DEBUG_INT 61// #define VINO_DEBUG_INT
63 62
64#define VINO_MODULE_VERSION "0.0.6" 63#define VINO_MODULE_VERSION "0.0.7"
65#define VINO_VERSION_CODE KERNEL_VERSION(0, 0, 6)
66 64
67MODULE_DESCRIPTION("SGI VINO Video4Linux2 driver"); 65MODULE_DESCRIPTION("SGI VINO Video4Linux2 driver");
68MODULE_VERSION(VINO_MODULE_VERSION); 66MODULE_VERSION(VINO_MODULE_VERSION);
@@ -2934,7 +2932,6 @@ static int vino_querycap(struct file *file, void *__fh,
2934 strcpy(cap->driver, vino_driver_name); 2932 strcpy(cap->driver, vino_driver_name);
2935 strcpy(cap->card, vino_driver_description); 2933 strcpy(cap->card, vino_driver_description);
2936 strcpy(cap->bus_info, vino_bus_name); 2934 strcpy(cap->bus_info, vino_bus_name);
2937 cap->version = VINO_VERSION_CODE;
2938 cap->capabilities = 2935 cap->capabilities =
2939 V4L2_CAP_VIDEO_CAPTURE | 2936 V4L2_CAP_VIDEO_CAPTURE |
2940 V4L2_CAP_STREAMING; 2937 V4L2_CAP_STREAMING;
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index 2238a613d664..a848bd2af97f 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -22,7 +22,6 @@
22#include <linux/sched.h> 22#include <linux/sched.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/font.h> 24#include <linux/font.h>
25#include <linux/version.h>
26#include <linux/mutex.h> 25#include <linux/mutex.h>
27#include <linux/videodev2.h> 26#include <linux/videodev2.h>
28#include <linux/kthread.h> 27#include <linux/kthread.h>
@@ -32,6 +31,7 @@
32#include <media/v4l2-ioctl.h> 31#include <media/v4l2-ioctl.h>
33#include <media/v4l2-ctrls.h> 32#include <media/v4l2-ctrls.h>
34#include <media/v4l2-fh.h> 33#include <media/v4l2-fh.h>
34#include <media/v4l2-event.h>
35#include <media/v4l2-common.h> 35#include <media/v4l2-common.h>
36 36
37#define VIVI_MODULE_NAME "vivi" 37#define VIVI_MODULE_NAME "vivi"
@@ -44,15 +44,12 @@
44#define MAX_WIDTH 1920 44#define MAX_WIDTH 1920
45#define MAX_HEIGHT 1200 45#define MAX_HEIGHT 1200
46 46
47#define VIVI_MAJOR_VERSION 0 47#define VIVI_VERSION "0.8.1"
48#define VIVI_MINOR_VERSION 8
49#define VIVI_RELEASE 0
50#define VIVI_VERSION \
51 KERNEL_VERSION(VIVI_MAJOR_VERSION, VIVI_MINOR_VERSION, VIVI_RELEASE)
52 48
53MODULE_DESCRIPTION("Video Technology Magazine Virtual Video Capture Board"); 49MODULE_DESCRIPTION("Video Technology Magazine Virtual Video Capture Board");
54MODULE_AUTHOR("Mauro Carvalho Chehab, Ted Walther and John Sokol"); 50MODULE_AUTHOR("Mauro Carvalho Chehab, Ted Walther and John Sokol");
55MODULE_LICENSE("Dual BSD/GPL"); 51MODULE_LICENSE("Dual BSD/GPL");
52MODULE_VERSION(VIVI_VERSION);
56 53
57static unsigned video_nr = -1; 54static unsigned video_nr = -1;
58module_param(video_nr, uint, 0644); 55module_param(video_nr, uint, 0644);
@@ -167,6 +164,11 @@ struct vivi_dev {
167 struct v4l2_ctrl *contrast; 164 struct v4l2_ctrl *contrast;
168 struct v4l2_ctrl *saturation; 165 struct v4l2_ctrl *saturation;
169 struct v4l2_ctrl *hue; 166 struct v4l2_ctrl *hue;
167 struct {
168 /* autogain/gain cluster */
169 struct v4l2_ctrl *autogain;
170 struct v4l2_ctrl *gain;
171 };
170 struct v4l2_ctrl *volume; 172 struct v4l2_ctrl *volume;
171 struct v4l2_ctrl *button; 173 struct v4l2_ctrl *button;
172 struct v4l2_ctrl *boolean; 174 struct v4l2_ctrl *boolean;
@@ -174,6 +176,7 @@ struct vivi_dev {
174 struct v4l2_ctrl *int64; 176 struct v4l2_ctrl *int64;
175 struct v4l2_ctrl *menu; 177 struct v4l2_ctrl *menu;
176 struct v4l2_ctrl *string; 178 struct v4l2_ctrl *string;
179 struct v4l2_ctrl *bitmask;
177 180
178 spinlock_t slock; 181 spinlock_t slock;
179 struct mutex mutex; 182 struct mutex mutex;
@@ -457,6 +460,7 @@ static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
457 unsigned ms; 460 unsigned ms;
458 char str[100]; 461 char str[100];
459 int h, line = 1; 462 int h, line = 1;
463 s32 gain;
460 464
461 if (!vbuf) 465 if (!vbuf)
462 return; 466 return;
@@ -479,6 +483,7 @@ static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
479 dev->width, dev->height, dev->input); 483 dev->width, dev->height, dev->input);
480 gen_text(dev, vbuf, line++ * 16, 16, str); 484 gen_text(dev, vbuf, line++ * 16, 16, str);
481 485
486 gain = v4l2_ctrl_g_ctrl(dev->gain);
482 mutex_lock(&dev->ctrl_handler.lock); 487 mutex_lock(&dev->ctrl_handler.lock);
483 snprintf(str, sizeof(str), " brightness %3d, contrast %3d, saturation %3d, hue %d ", 488 snprintf(str, sizeof(str), " brightness %3d, contrast %3d, saturation %3d, hue %d ",
484 dev->brightness->cur.val, 489 dev->brightness->cur.val,
@@ -486,11 +491,13 @@ static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
486 dev->saturation->cur.val, 491 dev->saturation->cur.val,
487 dev->hue->cur.val); 492 dev->hue->cur.val);
488 gen_text(dev, vbuf, line++ * 16, 16, str); 493 gen_text(dev, vbuf, line++ * 16, 16, str);
489 snprintf(str, sizeof(str), " volume %3d ", dev->volume->cur.val); 494 snprintf(str, sizeof(str), " autogain %d, gain %3d, volume %3d ",
495 dev->autogain->cur.val, gain, dev->volume->cur.val);
490 gen_text(dev, vbuf, line++ * 16, 16, str); 496 gen_text(dev, vbuf, line++ * 16, 16, str);
491 snprintf(str, sizeof(str), " int32 %d, int64 %lld ", 497 snprintf(str, sizeof(str), " int32 %d, int64 %lld, bitmask %08x ",
492 dev->int32->cur.val, 498 dev->int32->cur.val,
493 dev->int64->cur.val64); 499 dev->int64->cur.val64,
500 dev->bitmask->cur.val);
494 gen_text(dev, vbuf, line++ * 16, 16, str); 501 gen_text(dev, vbuf, line++ * 16, 16, str);
495 snprintf(str, sizeof(str), " boolean %d, menu %s, string \"%s\" ", 502 snprintf(str, sizeof(str), " boolean %d, menu %s, string \"%s\" ",
496 dev->boolean->cur.val, 503 dev->boolean->cur.val,
@@ -524,11 +531,13 @@ static void vivi_thread_tick(struct vivi_dev *dev)
524 spin_lock_irqsave(&dev->slock, flags); 531 spin_lock_irqsave(&dev->slock, flags);
525 if (list_empty(&dma_q->active)) { 532 if (list_empty(&dma_q->active)) {
526 dprintk(dev, 1, "No active queue to serve\n"); 533 dprintk(dev, 1, "No active queue to serve\n");
527 goto unlock; 534 spin_unlock_irqrestore(&dev->slock, flags);
535 return;
528 } 536 }
529 537
530 buf = list_entry(dma_q->active.next, struct vivi_buffer, list); 538 buf = list_entry(dma_q->active.next, struct vivi_buffer, list);
531 list_del(&buf->list); 539 list_del(&buf->list);
540 spin_unlock_irqrestore(&dev->slock, flags);
532 541
533 do_gettimeofday(&buf->vb.v4l2_buf.timestamp); 542 do_gettimeofday(&buf->vb.v4l2_buf.timestamp);
534 543
@@ -538,8 +547,6 @@ static void vivi_thread_tick(struct vivi_dev *dev)
538 547
539 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE); 548 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
540 dprintk(dev, 2, "[%p/%d] done\n", buf, buf->vb.v4l2_buf.index); 549 dprintk(dev, 2, "[%p/%d] done\n", buf, buf->vb.v4l2_buf.index);
541unlock:
542 spin_unlock_irqrestore(&dev->slock, flags);
543} 550}
544 551
545#define frames_to_ms(frames) \ 552#define frames_to_ms(frames) \
@@ -812,7 +819,6 @@ static int vidioc_querycap(struct file *file, void *priv,
812 strcpy(cap->driver, "vivi"); 819 strcpy(cap->driver, "vivi");
813 strcpy(cap->card, "vivi"); 820 strcpy(cap->card, "vivi");
814 strlcpy(cap->bus_info, dev->v4l2_dev.name, sizeof(cap->bus_info)); 821 strlcpy(cap->bus_info, dev->v4l2_dev.name, sizeof(cap->bus_info));
815 cap->version = VIVI_VERSION;
816 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | \ 822 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | \
817 V4L2_CAP_READWRITE; 823 V4L2_CAP_READWRITE;
818 return 0; 824 return 0;
@@ -975,14 +981,37 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
975 if (i >= NUM_INPUTS) 981 if (i >= NUM_INPUTS)
976 return -EINVAL; 982 return -EINVAL;
977 983
984 if (i == dev->input)
985 return 0;
986
978 dev->input = i; 987 dev->input = i;
979 precalculate_bars(dev); 988 precalculate_bars(dev);
980 precalculate_line(dev); 989 precalculate_line(dev);
981 return 0; 990 return 0;
982} 991}
983 992
993static int vidioc_subscribe_event(struct v4l2_fh *fh,
994 struct v4l2_event_subscription *sub)
995{
996 switch (sub->type) {
997 case V4L2_EVENT_CTRL:
998 return v4l2_event_subscribe(fh, sub, 0);
999 default:
1000 return -EINVAL;
1001 }
1002}
1003
984/* --- controls ---------------------------------------------- */ 1004/* --- controls ---------------------------------------------- */
985 1005
1006static int vivi_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
1007{
1008 struct vivi_dev *dev = container_of(ctrl->handler, struct vivi_dev, ctrl_handler);
1009
1010 if (ctrl == dev->autogain)
1011 dev->gain->val = jiffies & 0xff;
1012 return 0;
1013}
1014
986static int vivi_s_ctrl(struct v4l2_ctrl *ctrl) 1015static int vivi_s_ctrl(struct v4l2_ctrl *ctrl)
987{ 1016{
988 struct vivi_dev *dev = container_of(ctrl->handler, struct vivi_dev, ctrl_handler); 1017 struct vivi_dev *dev = container_of(ctrl->handler, struct vivi_dev, ctrl_handler);
@@ -1010,10 +1039,17 @@ static unsigned int
1010vivi_poll(struct file *file, struct poll_table_struct *wait) 1039vivi_poll(struct file *file, struct poll_table_struct *wait)
1011{ 1040{
1012 struct vivi_dev *dev = video_drvdata(file); 1041 struct vivi_dev *dev = video_drvdata(file);
1042 struct v4l2_fh *fh = file->private_data;
1013 struct vb2_queue *q = &dev->vb_vidq; 1043 struct vb2_queue *q = &dev->vb_vidq;
1044 unsigned int res;
1014 1045
1015 dprintk(dev, 1, "%s\n", __func__); 1046 dprintk(dev, 1, "%s\n", __func__);
1016 return vb2_poll(q, file, wait); 1047 res = vb2_poll(q, file, wait);
1048 if (v4l2_event_pending(fh))
1049 res |= POLLPRI;
1050 else
1051 poll_wait(file, &fh->wait, wait);
1052 return res;
1017} 1053}
1018 1054
1019static int vivi_close(struct file *file) 1055static int vivi_close(struct file *file)
@@ -1045,6 +1081,7 @@ static int vivi_mmap(struct file *file, struct vm_area_struct *vma)
1045} 1081}
1046 1082
1047static const struct v4l2_ctrl_ops vivi_ctrl_ops = { 1083static const struct v4l2_ctrl_ops vivi_ctrl_ops = {
1084 .g_volatile_ctrl = vivi_g_volatile_ctrl,
1048 .s_ctrl = vivi_s_ctrl, 1085 .s_ctrl = vivi_s_ctrl,
1049}; 1086};
1050 1087
@@ -1117,9 +1154,20 @@ static const struct v4l2_ctrl_config vivi_ctrl_string = {
1117 .step = 1, 1154 .step = 1,
1118}; 1155};
1119 1156
1157static const struct v4l2_ctrl_config vivi_ctrl_bitmask = {
1158 .ops = &vivi_ctrl_ops,
1159 .id = VIVI_CID_CUSTOM_BASE + 6,
1160 .name = "Bitmask",
1161 .type = V4L2_CTRL_TYPE_BITMASK,
1162 .def = 0x80002000,
1163 .min = 0,
1164 .max = 0x80402010,
1165 .step = 0,
1166};
1167
1120static const struct v4l2_file_operations vivi_fops = { 1168static const struct v4l2_file_operations vivi_fops = {
1121 .owner = THIS_MODULE, 1169 .owner = THIS_MODULE,
1122 .open = v4l2_fh_open, 1170 .open = v4l2_fh_open,
1123 .release = vivi_close, 1171 .release = vivi_close,
1124 .read = vivi_read, 1172 .read = vivi_read,
1125 .poll = vivi_poll, 1173 .poll = vivi_poll,
@@ -1143,6 +1191,8 @@ static const struct v4l2_ioctl_ops vivi_ioctl_ops = {
1143 .vidioc_s_input = vidioc_s_input, 1191 .vidioc_s_input = vidioc_s_input,
1144 .vidioc_streamon = vidioc_streamon, 1192 .vidioc_streamon = vidioc_streamon,
1145 .vidioc_streamoff = vidioc_streamoff, 1193 .vidioc_streamoff = vidioc_streamoff,
1194 .vidioc_subscribe_event = vidioc_subscribe_event,
1195 .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
1146}; 1196};
1147 1197
1148static struct video_device vivi_template = { 1198static struct video_device vivi_template = {
@@ -1213,16 +1263,22 @@ static int __init vivi_create_instance(int inst)
1213 V4L2_CID_SATURATION, 0, 255, 1, 127); 1263 V4L2_CID_SATURATION, 0, 255, 1, 127);
1214 dev->hue = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops, 1264 dev->hue = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
1215 V4L2_CID_HUE, -128, 127, 1, 0); 1265 V4L2_CID_HUE, -128, 127, 1, 0);
1266 dev->autogain = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
1267 V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
1268 dev->gain = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
1269 V4L2_CID_GAIN, 0, 255, 1, 100);
1216 dev->button = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_button, NULL); 1270 dev->button = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_button, NULL);
1217 dev->int32 = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_int32, NULL); 1271 dev->int32 = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_int32, NULL);
1218 dev->int64 = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_int64, NULL); 1272 dev->int64 = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_int64, NULL);
1219 dev->boolean = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_boolean, NULL); 1273 dev->boolean = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_boolean, NULL);
1220 dev->menu = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_menu, NULL); 1274 dev->menu = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_menu, NULL);
1221 dev->string = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_string, NULL); 1275 dev->string = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_string, NULL);
1276 dev->bitmask = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_bitmask, NULL);
1222 if (hdl->error) { 1277 if (hdl->error) {
1223 ret = hdl->error; 1278 ret = hdl->error;
1224 goto unreg_dev; 1279 goto unreg_dev;
1225 } 1280 }
1281 v4l2_ctrl_auto_cluster(2, &dev->autogain, 0, true);
1226 dev->v4l2_dev.ctrl_handler = hdl; 1282 dev->v4l2_dev.ctrl_handler = hdl;
1227 1283
1228 /* initialize locks */ 1284 /* initialize locks */
@@ -1325,9 +1381,8 @@ static int __init vivi_init(void)
1325 } 1381 }
1326 1382
1327 printk(KERN_INFO "Video Technology Magazine Virtual Video " 1383 printk(KERN_INFO "Video Technology Magazine Virtual Video "
1328 "Capture Board ver %u.%u.%u successfully loaded.\n", 1384 "Capture Board ver %s successfully loaded.\n",
1329 (VIVI_VERSION >> 16) & 0xFF, (VIVI_VERSION >> 8) & 0xFF, 1385 VIVI_VERSION);
1330 VIVI_VERSION & 0xFF);
1331 1386
1332 /* n_devs will reflect the actual number of allocated devices */ 1387 /* n_devs will reflect the actual number of allocated devices */
1333 n_devs = i; 1388 n_devs = i;
diff --git a/drivers/media/video/w9966.c b/drivers/media/video/w9966.c
index fa35639d0c15..453dbbd1e6e8 100644
--- a/drivers/media/video/w9966.c
+++ b/drivers/media/video/w9966.c
@@ -57,7 +57,6 @@
57#include <linux/module.h> 57#include <linux/module.h>
58#include <linux/init.h> 58#include <linux/init.h>
59#include <linux/delay.h> 59#include <linux/delay.h>
60#include <linux/version.h>
61#include <linux/videodev2.h> 60#include <linux/videodev2.h>
62#include <linux/slab.h> 61#include <linux/slab.h>
63#include <media/v4l2-common.h> 62#include <media/v4l2-common.h>
@@ -127,7 +126,7 @@ struct w9966 {
127MODULE_AUTHOR("Jakob Kemi <jakob.kemi@post.utfors.se>"); 126MODULE_AUTHOR("Jakob Kemi <jakob.kemi@post.utfors.se>");
128MODULE_DESCRIPTION("Winbond w9966cf WebCam driver (0.32)"); 127MODULE_DESCRIPTION("Winbond w9966cf WebCam driver (0.32)");
129MODULE_LICENSE("GPL"); 128MODULE_LICENSE("GPL");
130 129MODULE_VERSION("0.33.1");
131 130
132#ifdef MODULE 131#ifdef MODULE
133static const char *pardev[] = {[0 ... W9966_MAXCAMS] = ""}; 132static const char *pardev[] = {[0 ... W9966_MAXCAMS] = ""};
@@ -568,7 +567,6 @@ static int cam_querycap(struct file *file, void *priv,
568 strlcpy(vcap->driver, cam->v4l2_dev.name, sizeof(vcap->driver)); 567 strlcpy(vcap->driver, cam->v4l2_dev.name, sizeof(vcap->driver));
569 strlcpy(vcap->card, W9966_DRIVERNAME, sizeof(vcap->card)); 568 strlcpy(vcap->card, W9966_DRIVERNAME, sizeof(vcap->card));
570 strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info)); 569 strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
571 vcap->version = KERNEL_VERSION(0, 33, 0);
572 vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE; 570 vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
573 return 0; 571 return 0;
574} 572}
diff --git a/drivers/media/video/zoran/zoran.h b/drivers/media/video/zoran/zoran.h
index f3f640014928..d7166afc255e 100644
--- a/drivers/media/video/zoran/zoran.h
+++ b/drivers/media/video/zoran/zoran.h
@@ -41,10 +41,6 @@ struct zoran_sync {
41}; 41};
42 42
43 43
44#define MAJOR_VERSION 0 /* driver major version */
45#define MINOR_VERSION 10 /* driver minor version */
46#define RELEASE_VERSION 0 /* release version */
47
48#define ZORAN_NAME "ZORAN" /* name of the device */ 44#define ZORAN_NAME "ZORAN" /* name of the device */
49 45
50#define ZR_DEVNAME(zr) ((zr)->name) 46#define ZR_DEVNAME(zr) ((zr)->name)
diff --git a/drivers/media/video/zoran/zoran_card.c b/drivers/media/video/zoran/zoran_card.c
index 79b04ac0f1ad..c3602d6cd48e 100644
--- a/drivers/media/video/zoran/zoran_card.c
+++ b/drivers/media/video/zoran/zoran_card.c
@@ -123,9 +123,12 @@ int zr36067_debug = 1;
123module_param_named(debug, zr36067_debug, int, 0644); 123module_param_named(debug, zr36067_debug, int, 0644);
124MODULE_PARM_DESC(debug, "Debug level (0-5)"); 124MODULE_PARM_DESC(debug, "Debug level (0-5)");
125 125
126#define ZORAN_VERSION "0.10.1"
127
126MODULE_DESCRIPTION("Zoran-36057/36067 JPEG codec driver"); 128MODULE_DESCRIPTION("Zoran-36057/36067 JPEG codec driver");
127MODULE_AUTHOR("Serguei Miridonov"); 129MODULE_AUTHOR("Serguei Miridonov");
128MODULE_LICENSE("GPL"); 130MODULE_LICENSE("GPL");
131MODULE_VERSION(ZORAN_VERSION);
129 132
130#define ZR_DEVICE(subven, subdev, data) { \ 133#define ZR_DEVICE(subven, subdev, data) { \
131 .vendor = PCI_VENDOR_ID_ZORAN, .device = PCI_DEVICE_ID_ZORAN_36057, \ 134 .vendor = PCI_VENDOR_ID_ZORAN, .device = PCI_DEVICE_ID_ZORAN_36057, \
@@ -1459,8 +1462,8 @@ static int __init zoran_init(void)
1459{ 1462{
1460 int res; 1463 int res;
1461 1464
1462 printk(KERN_INFO "Zoran MJPEG board driver version %d.%d.%d\n", 1465 printk(KERN_INFO "Zoran MJPEG board driver version %s\n",
1463 MAJOR_VERSION, MINOR_VERSION, RELEASE_VERSION); 1466 ZORAN_VERSION);
1464 1467
1465 /* check the parameters we have been given, adjust if necessary */ 1468 /* check the parameters we have been given, adjust if necessary */
1466 if (v4l_nbufs < 2) 1469 if (v4l_nbufs < 2)
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index 2771d818406e..d4d05d2ace65 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -44,7 +44,6 @@
44 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 44 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
45 */ 45 */
46 46
47#include <linux/version.h>
48#include <linux/init.h> 47#include <linux/init.h>
49#include <linux/module.h> 48#include <linux/module.h>
50#include <linux/delay.h> 49#include <linux/delay.h>
@@ -1538,8 +1537,6 @@ static int zoran_querycap(struct file *file, void *__fh, struct v4l2_capability
1538 strncpy(cap->driver, "zoran", sizeof(cap->driver)-1); 1537 strncpy(cap->driver, "zoran", sizeof(cap->driver)-1);
1539 snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", 1538 snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
1540 pci_name(zr->pci_dev)); 1539 pci_name(zr->pci_dev));
1541 cap->version = KERNEL_VERSION(MAJOR_VERSION, MINOR_VERSION,
1542 RELEASE_VERSION);
1543 cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE | 1540 cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE |
1544 V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OVERLAY; 1541 V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OVERLAY;
1545 return 0; 1542 return 0;
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index 7dfb01e9930e..c492846c1c5a 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -29,7 +29,6 @@
29 29
30 30
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/init.h> 32#include <linux/init.h>
34#include <linux/usb.h> 33#include <linux/usb.h>
35#include <linux/vmalloc.h> 34#include <linux/vmalloc.h>
@@ -42,8 +41,7 @@
42 41
43 42
44/* Version Information */ 43/* Version Information */
45#define DRIVER_VERSION "v0.73" 44#define DRIVER_VERSION "0.7.4"
46#define ZR364XX_VERSION_CODE KERNEL_VERSION(0, 7, 3)
47#define DRIVER_AUTHOR "Antoine Jacquet, http://royale.zerezo.com/" 45#define DRIVER_AUTHOR "Antoine Jacquet, http://royale.zerezo.com/"
48#define DRIVER_DESC "Zoran 364xx" 46#define DRIVER_DESC "Zoran 364xx"
49 47
@@ -744,7 +742,6 @@ static int zr364xx_vidioc_querycap(struct file *file, void *priv,
744 strlcpy(cap->card, cam->udev->product, sizeof(cap->card)); 742 strlcpy(cap->card, cam->udev->product, sizeof(cap->card));
745 strlcpy(cap->bus_info, dev_name(&cam->udev->dev), 743 strlcpy(cap->bus_info, dev_name(&cam->udev->dev),
746 sizeof(cap->bus_info)); 744 sizeof(cap->bus_info));
747 cap->version = ZR364XX_VERSION_CODE;
748 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | 745 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
749 V4L2_CAP_READWRITE | 746 V4L2_CAP_READWRITE |
750 V4L2_CAP_STREAMING; 747 V4L2_CAP_STREAMING;
@@ -1721,3 +1718,4 @@ module_exit(zr364xx_exit);
1721MODULE_AUTHOR(DRIVER_AUTHOR); 1718MODULE_AUTHOR(DRIVER_AUTHOR);
1722MODULE_DESCRIPTION(DRIVER_DESC); 1719MODULE_DESCRIPTION(DRIVER_DESC);
1723MODULE_LICENSE("GPL"); 1720MODULE_LICENSE("GPL");
1721MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index a1d4ee6671be..ce61a5769765 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -827,7 +827,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
827 * DID_SOFT_ERROR is set. 827 * DID_SOFT_ERROR is set.
828 */ 828 */
829 if (ioc->bus_type == SPI) { 829 if (ioc->bus_type == SPI) {
830 if (pScsiReq->CDB[0] == READ_6 || 830 if ((pScsiReq->CDB[0] == READ_6 && ((pScsiReq->CDB[1] & 0x02) == 0)) ||
831 pScsiReq->CDB[0] == READ_10 || 831 pScsiReq->CDB[0] == READ_10 ||
832 pScsiReq->CDB[0] == READ_12 || 832 pScsiReq->CDB[0] == READ_12 ||
833 pScsiReq->CDB[0] == READ_16 || 833 pScsiReq->CDB[0] == READ_16 ||
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index 74fbe56321ff..c8ed7b63fdf5 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -59,7 +59,7 @@
59#include <asm/dma.h> 59#include <asm/dma.h>
60#include <asm/system.h> 60#include <asm/system.h>
61#include <asm/io.h> 61#include <asm/io.h>
62#include <asm/atomic.h> 62#include <linux/atomic.h>
63 63
64#include <scsi/scsi.h> 64#include <scsi/scsi.h>
65#include <scsi/scsi_host.h> 65#include <scsi/scsi_host.h>
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 6ca938a6bf94..37b83eb6d703 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -218,7 +218,7 @@ config TWL4030_POWER
218 and load scripts controlling which resources are switched off/on 218 and load scripts controlling which resources are switched off/on
219 or reset when a sleep, wakeup or warm reset event occurs. 219 or reset when a sleep, wakeup or warm reset event occurs.
220 220
221config TWL4030_CODEC 221config MFD_TWL4030_AUDIO
222 bool 222 bool
223 depends on TWL4030_CORE 223 depends on TWL4030_CORE
224 select MFD_CORE 224 select MFD_CORE
@@ -233,6 +233,12 @@ config TWL6030_PWM
233 Say yes here if you want support for TWL6030 PWM. 233 Say yes here if you want support for TWL6030 PWM.
234 This is used to control charging LED brightness. 234 This is used to control charging LED brightness.
235 235
236config TWL6040_CORE
237 bool
238 depends on TWL4030_CORE && GENERIC_HARDIRQS
239 select MFD_CORE
240 default n
241
236config MFD_STMPE 242config MFD_STMPE
237 bool "Support STMicroelectronics STMPE" 243 bool "Support STMicroelectronics STMPE"
238 depends on I2C=y && GENERIC_HARDIRQS 244 depends on I2C=y && GENERIC_HARDIRQS
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index d7d47d2a4c76..22a280fcb705 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -40,8 +40,9 @@ obj-$(CONFIG_MENELAUS) += menelaus.o
40obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o 40obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o
41obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o 41obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o
42obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o 42obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o
43obj-$(CONFIG_TWL4030_CODEC) += twl4030-codec.o 43obj-$(CONFIG_MFD_TWL4030_AUDIO) += twl4030-audio.o
44obj-$(CONFIG_TWL6030_PWM) += twl6030-pwm.o 44obj-$(CONFIG_TWL6030_PWM) += twl6030-pwm.o
45obj-$(CONFIG_TWL6040_CORE) += twl6040-core.o twl6040-irq.o
45 46
46obj-$(CONFIG_MFD_MC13XXX) += mc13xxx-core.o 47obj-$(CONFIG_MFD_MC13XXX) += mc13xxx-core.o
47 48
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index 69272e4e3459..696879e2eef7 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -287,12 +287,8 @@ static __devinitdata struct i2c_board_info timberdale_saa7706_i2c_board_info = {
287static __devinitdata struct timb_radio_platform_data 287static __devinitdata struct timb_radio_platform_data
288 timberdale_radio_platform_data = { 288 timberdale_radio_platform_data = {
289 .i2c_adapter = 0, 289 .i2c_adapter = 0,
290 .tuner = { 290 .tuner = &timberdale_tef6868_i2c_board_info,
291 .info = &timberdale_tef6868_i2c_board_info 291 .dsp = &timberdale_saa7706_i2c_board_info
292 },
293 .dsp = {
294 .info = &timberdale_saa7706_i2c_board_info
295 }
296}; 292};
297 293
298static const __devinitconst struct resource timberdale_video_resources[] = { 294static const __devinitconst struct resource timberdale_video_resources[] = {
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index b8f2a4e7f6e7..a2eddc70995c 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -110,7 +110,7 @@
110#endif 110#endif
111 111
112#if defined(CONFIG_TWL4030_CODEC) || defined(CONFIG_TWL4030_CODEC_MODULE) ||\ 112#if defined(CONFIG_TWL4030_CODEC) || defined(CONFIG_TWL4030_CODEC_MODULE) ||\
113 defined(CONFIG_SND_SOC_TWL6040) || defined(CONFIG_SND_SOC_TWL6040_MODULE) 113 defined(CONFIG_TWL6040_CORE) || defined(CONFIG_TWL6040_CORE_MODULE)
114#define twl_has_codec() true 114#define twl_has_codec() true
115#else 115#else
116#define twl_has_codec() false 116#define twl_has_codec() false
@@ -815,20 +815,19 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
815 return PTR_ERR(child); 815 return PTR_ERR(child);
816 } 816 }
817 817
818 if (twl_has_codec() && pdata->codec && twl_class_is_4030()) { 818 if (twl_has_codec() && pdata->audio && twl_class_is_4030()) {
819 sub_chip_id = twl_map[TWL_MODULE_AUDIO_VOICE].sid; 819 sub_chip_id = twl_map[TWL_MODULE_AUDIO_VOICE].sid;
820 child = add_child(sub_chip_id, "twl4030-audio", 820 child = add_child(sub_chip_id, "twl4030-audio",
821 pdata->codec, sizeof(*pdata->codec), 821 pdata->audio, sizeof(*pdata->audio),
822 false, 0, 0); 822 false, 0, 0);
823 if (IS_ERR(child)) 823 if (IS_ERR(child))
824 return PTR_ERR(child); 824 return PTR_ERR(child);
825 } 825 }
826 826
827 /* Phoenix codec driver is probed directly atm */ 827 if (twl_has_codec() && pdata->audio && twl_class_is_6030()) {
828 if (twl_has_codec() && pdata->codec && twl_class_is_6030()) {
829 sub_chip_id = twl_map[TWL_MODULE_AUDIO_VOICE].sid; 828 sub_chip_id = twl_map[TWL_MODULE_AUDIO_VOICE].sid;
830 child = add_child(sub_chip_id, "twl6040-codec", 829 child = add_child(sub_chip_id, "twl6040",
831 pdata->codec, sizeof(*pdata->codec), 830 pdata->audio, sizeof(*pdata->audio),
832 false, 0, 0); 831 false, 0, 0);
833 if (IS_ERR(child)) 832 if (IS_ERR(child))
834 return PTR_ERR(child); 833 return PTR_ERR(child);
diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c
new file mode 100644
index 000000000000..ae51ab5d0e5d
--- /dev/null
+++ b/drivers/mfd/twl4030-audio.c
@@ -0,0 +1,277 @@
1/*
2 * MFD driver for twl4030 audio submodule, which contains an audio codec, and
3 * the vibra control.
4 *
5 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
6 *
7 * Copyright: (C) 2009 Nokia Corporation
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 *
23 */
24
25#include <linux/module.h>
26#include <linux/types.h>
27#include <linux/slab.h>
28#include <linux/kernel.h>
29#include <linux/fs.h>
30#include <linux/platform_device.h>
31#include <linux/i2c/twl.h>
32#include <linux/mfd/core.h>
33#include <linux/mfd/twl4030-audio.h>
34
35#define TWL4030_AUDIO_CELLS 2
36
37static struct platform_device *twl4030_audio_dev;
38
39struct twl4030_audio_resource {
40 int request_count;
41 u8 reg;
42 u8 mask;
43};
44
45struct twl4030_audio {
46 unsigned int audio_mclk;
47 struct mutex mutex;
48 struct twl4030_audio_resource resource[TWL4030_AUDIO_RES_MAX];
49 struct mfd_cell cells[TWL4030_AUDIO_CELLS];
50};
51
52/*
53 * Modify the resource, the function returns the content of the register
54 * after the modification.
55 */
56static int twl4030_audio_set_resource(enum twl4030_audio_res id, int enable)
57{
58 struct twl4030_audio *audio = platform_get_drvdata(twl4030_audio_dev);
59 u8 val;
60
61 twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &val,
62 audio->resource[id].reg);
63
64 if (enable)
65 val |= audio->resource[id].mask;
66 else
67 val &= ~audio->resource[id].mask;
68
69 twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
70 val, audio->resource[id].reg);
71
72 return val;
73}
74
75static inline int twl4030_audio_get_resource(enum twl4030_audio_res id)
76{
77 struct twl4030_audio *audio = platform_get_drvdata(twl4030_audio_dev);
78 u8 val;
79
80 twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &val,
81 audio->resource[id].reg);
82
83 return val;
84}
85
86/*
87 * Enable the resource.
88 * The function returns with error or the content of the register
89 */
90int twl4030_audio_enable_resource(enum twl4030_audio_res id)
91{
92 struct twl4030_audio *audio = platform_get_drvdata(twl4030_audio_dev);
93 int val;
94
95 if (id >= TWL4030_AUDIO_RES_MAX) {
96 dev_err(&twl4030_audio_dev->dev,
97 "Invalid resource ID (%u)\n", id);
98 return -EINVAL;
99 }
100
101 mutex_lock(&audio->mutex);
102 if (!audio->resource[id].request_count)
103 /* Resource was disabled, enable it */
104 val = twl4030_audio_set_resource(id, 1);
105 else
106 val = twl4030_audio_get_resource(id);
107
108 audio->resource[id].request_count++;
109 mutex_unlock(&audio->mutex);
110
111 return val;
112}
113EXPORT_SYMBOL_GPL(twl4030_audio_enable_resource);
114
115/*
116 * Disable the resource.
117 * The function returns with error or the content of the register
118 */
119int twl4030_audio_disable_resource(unsigned id)
120{
121 struct twl4030_audio *audio = platform_get_drvdata(twl4030_audio_dev);
122 int val;
123
124 if (id >= TWL4030_AUDIO_RES_MAX) {
125 dev_err(&twl4030_audio_dev->dev,
126 "Invalid resource ID (%u)\n", id);
127 return -EINVAL;
128 }
129
130 mutex_lock(&audio->mutex);
131 if (!audio->resource[id].request_count) {
132 dev_err(&twl4030_audio_dev->dev,
133 "Resource has been disabled already (%u)\n", id);
134 mutex_unlock(&audio->mutex);
135 return -EPERM;
136 }
137 audio->resource[id].request_count--;
138
139 if (!audio->resource[id].request_count)
140 /* Resource can be disabled now */
141 val = twl4030_audio_set_resource(id, 0);
142 else
143 val = twl4030_audio_get_resource(id);
144
145 mutex_unlock(&audio->mutex);
146
147 return val;
148}
149EXPORT_SYMBOL_GPL(twl4030_audio_disable_resource);
150
151unsigned int twl4030_audio_get_mclk(void)
152{
153 struct twl4030_audio *audio = platform_get_drvdata(twl4030_audio_dev);
154
155 return audio->audio_mclk;
156}
157EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk);
158
159static int __devinit twl4030_audio_probe(struct platform_device *pdev)
160{
161 struct twl4030_audio *audio;
162 struct twl4030_audio_data *pdata = pdev->dev.platform_data;
163 struct mfd_cell *cell = NULL;
164 int ret, childs = 0;
165 u8 val;
166
167 if (!pdata) {
168 dev_err(&pdev->dev, "Platform data is missing\n");
169 return -EINVAL;
170 }
171
172 /* Configure APLL_INFREQ and disable APLL if enabled */
173 val = 0;
174 switch (pdata->audio_mclk) {
175 case 19200000:
176 val |= TWL4030_APLL_INFREQ_19200KHZ;
177 break;
178 case 26000000:
179 val |= TWL4030_APLL_INFREQ_26000KHZ;
180 break;
181 case 38400000:
182 val |= TWL4030_APLL_INFREQ_38400KHZ;
183 break;
184 default:
185 dev_err(&pdev->dev, "Invalid audio_mclk\n");
186 return -EINVAL;
187 }
188 twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
189 val, TWL4030_REG_APLL_CTL);
190
191 audio = kzalloc(sizeof(struct twl4030_audio), GFP_KERNEL);
192 if (!audio)
193 return -ENOMEM;
194
195 platform_set_drvdata(pdev, audio);
196
197 twl4030_audio_dev = pdev;
198 mutex_init(&audio->mutex);
199 audio->audio_mclk = pdata->audio_mclk;
200
201 /* Codec power */
202 audio->resource[TWL4030_AUDIO_RES_POWER].reg = TWL4030_REG_CODEC_MODE;
203 audio->resource[TWL4030_AUDIO_RES_POWER].mask = TWL4030_CODECPDZ;
204
205 /* PLL */
206 audio->resource[TWL4030_AUDIO_RES_APLL].reg = TWL4030_REG_APLL_CTL;
207 audio->resource[TWL4030_AUDIO_RES_APLL].mask = TWL4030_APLL_EN;
208
209 if (pdata->codec) {
210 cell = &audio->cells[childs];
211 cell->name = "twl4030-codec";
212 cell->platform_data = pdata->codec;
213 cell->pdata_size = sizeof(*pdata->codec);
214 childs++;
215 }
216 if (pdata->vibra) {
217 cell = &audio->cells[childs];
218 cell->name = "twl4030-vibra";
219 cell->platform_data = pdata->vibra;
220 cell->pdata_size = sizeof(*pdata->vibra);
221 childs++;
222 }
223
224 if (childs)
225 ret = mfd_add_devices(&pdev->dev, pdev->id, audio->cells,
226 childs, NULL, 0);
227 else {
228 dev_err(&pdev->dev, "No platform data found for childs\n");
229 ret = -ENODEV;
230 }
231
232 if (!ret)
233 return 0;
234
235 platform_set_drvdata(pdev, NULL);
236 kfree(audio);
237 twl4030_audio_dev = NULL;
238 return ret;
239}
240
241static int __devexit twl4030_audio_remove(struct platform_device *pdev)
242{
243 struct twl4030_audio *audio = platform_get_drvdata(pdev);
244
245 mfd_remove_devices(&pdev->dev);
246 platform_set_drvdata(pdev, NULL);
247 kfree(audio);
248 twl4030_audio_dev = NULL;
249
250 return 0;
251}
252
253MODULE_ALIAS("platform:twl4030-audio");
254
255static struct platform_driver twl4030_audio_driver = {
256 .probe = twl4030_audio_probe,
257 .remove = __devexit_p(twl4030_audio_remove),
258 .driver = {
259 .owner = THIS_MODULE,
260 .name = "twl4030-audio",
261 },
262};
263
264static int __devinit twl4030_audio_init(void)
265{
266 return platform_driver_register(&twl4030_audio_driver);
267}
268module_init(twl4030_audio_init);
269
270static void __devexit twl4030_audio_exit(void)
271{
272 platform_driver_unregister(&twl4030_audio_driver);
273}
274module_exit(twl4030_audio_exit);
275
276MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
277MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/twl4030-codec.c b/drivers/mfd/twl4030-codec.c
deleted file mode 100644
index 2bf4136464c1..000000000000
--- a/drivers/mfd/twl4030-codec.c
+++ /dev/null
@@ -1,277 +0,0 @@
1/*
2 * MFD driver for twl4030 codec submodule
3 *
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 *
6 * Copyright: (C) 2009 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/slab.h>
27#include <linux/kernel.h>
28#include <linux/fs.h>
29#include <linux/platform_device.h>
30#include <linux/i2c/twl.h>
31#include <linux/mfd/core.h>
32#include <linux/mfd/twl4030-codec.h>
33
34#define TWL4030_CODEC_CELLS 2
35
36static struct platform_device *twl4030_codec_dev;
37
38struct twl4030_codec_resource {
39 int request_count;
40 u8 reg;
41 u8 mask;
42};
43
44struct twl4030_codec {
45 unsigned int audio_mclk;
46 struct mutex mutex;
47 struct twl4030_codec_resource resource[TWL4030_CODEC_RES_MAX];
48 struct mfd_cell cells[TWL4030_CODEC_CELLS];
49};
50
51/*
52 * Modify the resource, the function returns the content of the register
53 * after the modification.
54 */
55static int twl4030_codec_set_resource(enum twl4030_codec_res id, int enable)
56{
57 struct twl4030_codec *codec = platform_get_drvdata(twl4030_codec_dev);
58 u8 val;
59
60 twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &val,
61 codec->resource[id].reg);
62
63 if (enable)
64 val |= codec->resource[id].mask;
65 else
66 val &= ~codec->resource[id].mask;
67
68 twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
69 val, codec->resource[id].reg);
70
71 return val;
72}
73
74static inline int twl4030_codec_get_resource(enum twl4030_codec_res id)
75{
76 struct twl4030_codec *codec = platform_get_drvdata(twl4030_codec_dev);
77 u8 val;
78
79 twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &val,
80 codec->resource[id].reg);
81
82 return val;
83}
84
85/*
86 * Enable the resource.
87 * The function returns with error or the content of the register
88 */
89int twl4030_codec_enable_resource(enum twl4030_codec_res id)
90{
91 struct twl4030_codec *codec = platform_get_drvdata(twl4030_codec_dev);
92 int val;
93
94 if (id >= TWL4030_CODEC_RES_MAX) {
95 dev_err(&twl4030_codec_dev->dev,
96 "Invalid resource ID (%u)\n", id);
97 return -EINVAL;
98 }
99
100 mutex_lock(&codec->mutex);
101 if (!codec->resource[id].request_count)
102 /* Resource was disabled, enable it */
103 val = twl4030_codec_set_resource(id, 1);
104 else
105 val = twl4030_codec_get_resource(id);
106
107 codec->resource[id].request_count++;
108 mutex_unlock(&codec->mutex);
109
110 return val;
111}
112EXPORT_SYMBOL_GPL(twl4030_codec_enable_resource);
113
114/*
115 * Disable the resource.
116 * The function returns with error or the content of the register
117 */
118int twl4030_codec_disable_resource(unsigned id)
119{
120 struct twl4030_codec *codec = platform_get_drvdata(twl4030_codec_dev);
121 int val;
122
123 if (id >= TWL4030_CODEC_RES_MAX) {
124 dev_err(&twl4030_codec_dev->dev,
125 "Invalid resource ID (%u)\n", id);
126 return -EINVAL;
127 }
128
129 mutex_lock(&codec->mutex);
130 if (!codec->resource[id].request_count) {
131 dev_err(&twl4030_codec_dev->dev,
132 "Resource has been disabled already (%u)\n", id);
133 mutex_unlock(&codec->mutex);
134 return -EPERM;
135 }
136 codec->resource[id].request_count--;
137
138 if (!codec->resource[id].request_count)
139 /* Resource can be disabled now */
140 val = twl4030_codec_set_resource(id, 0);
141 else
142 val = twl4030_codec_get_resource(id);
143
144 mutex_unlock(&codec->mutex);
145
146 return val;
147}
148EXPORT_SYMBOL_GPL(twl4030_codec_disable_resource);
149
150unsigned int twl4030_codec_get_mclk(void)
151{
152 struct twl4030_codec *codec = platform_get_drvdata(twl4030_codec_dev);
153
154 return codec->audio_mclk;
155}
156EXPORT_SYMBOL_GPL(twl4030_codec_get_mclk);
157
158static int __devinit twl4030_codec_probe(struct platform_device *pdev)
159{
160 struct twl4030_codec *codec;
161 struct twl4030_codec_data *pdata = pdev->dev.platform_data;
162 struct mfd_cell *cell = NULL;
163 int ret, childs = 0;
164 u8 val;
165
166 if (!pdata) {
167 dev_err(&pdev->dev, "Platform data is missing\n");
168 return -EINVAL;
169 }
170
171 /* Configure APLL_INFREQ and disable APLL if enabled */
172 val = 0;
173 switch (pdata->audio_mclk) {
174 case 19200000:
175 val |= TWL4030_APLL_INFREQ_19200KHZ;
176 break;
177 case 26000000:
178 val |= TWL4030_APLL_INFREQ_26000KHZ;
179 break;
180 case 38400000:
181 val |= TWL4030_APLL_INFREQ_38400KHZ;
182 break;
183 default:
184 dev_err(&pdev->dev, "Invalid audio_mclk\n");
185 return -EINVAL;
186 }
187 twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
188 val, TWL4030_REG_APLL_CTL);
189
190 codec = kzalloc(sizeof(struct twl4030_codec), GFP_KERNEL);
191 if (!codec)
192 return -ENOMEM;
193
194 platform_set_drvdata(pdev, codec);
195
196 twl4030_codec_dev = pdev;
197 mutex_init(&codec->mutex);
198 codec->audio_mclk = pdata->audio_mclk;
199
200 /* Codec power */
201 codec->resource[TWL4030_CODEC_RES_POWER].reg = TWL4030_REG_CODEC_MODE;
202 codec->resource[TWL4030_CODEC_RES_POWER].mask = TWL4030_CODECPDZ;
203
204 /* PLL */
205 codec->resource[TWL4030_CODEC_RES_APLL].reg = TWL4030_REG_APLL_CTL;
206 codec->resource[TWL4030_CODEC_RES_APLL].mask = TWL4030_APLL_EN;
207
208 if (pdata->audio) {
209 cell = &codec->cells[childs];
210 cell->name = "twl4030-codec";
211 cell->platform_data = pdata->audio;
212 cell->pdata_size = sizeof(*pdata->audio);
213 childs++;
214 }
215 if (pdata->vibra) {
216 cell = &codec->cells[childs];
217 cell->name = "twl4030-vibra";
218 cell->platform_data = pdata->vibra;
219 cell->pdata_size = sizeof(*pdata->vibra);
220 childs++;
221 }
222
223 if (childs)
224 ret = mfd_add_devices(&pdev->dev, pdev->id, codec->cells,
225 childs, NULL, 0);
226 else {
227 dev_err(&pdev->dev, "No platform data found for childs\n");
228 ret = -ENODEV;
229 }
230
231 if (!ret)
232 return 0;
233
234 platform_set_drvdata(pdev, NULL);
235 kfree(codec);
236 twl4030_codec_dev = NULL;
237 return ret;
238}
239
240static int __devexit twl4030_codec_remove(struct platform_device *pdev)
241{
242 struct twl4030_codec *codec = platform_get_drvdata(pdev);
243
244 mfd_remove_devices(&pdev->dev);
245 platform_set_drvdata(pdev, NULL);
246 kfree(codec);
247 twl4030_codec_dev = NULL;
248
249 return 0;
250}
251
252MODULE_ALIAS("platform:twl4030-audio");
253
254static struct platform_driver twl4030_codec_driver = {
255 .probe = twl4030_codec_probe,
256 .remove = __devexit_p(twl4030_codec_remove),
257 .driver = {
258 .owner = THIS_MODULE,
259 .name = "twl4030-audio",
260 },
261};
262
263static int __devinit twl4030_codec_init(void)
264{
265 return platform_driver_register(&twl4030_codec_driver);
266}
267module_init(twl4030_codec_init);
268
269static void __devexit twl4030_codec_exit(void)
270{
271 platform_driver_unregister(&twl4030_codec_driver);
272}
273module_exit(twl4030_codec_exit);
274
275MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
276MODULE_LICENSE("GPL");
277
diff --git a/drivers/mfd/twl6040-core.c b/drivers/mfd/twl6040-core.c
new file mode 100644
index 000000000000..24d436c2fe4a
--- /dev/null
+++ b/drivers/mfd/twl6040-core.c
@@ -0,0 +1,620 @@
1/*
2 * MFD driver for TWL6040 audio device
3 *
4 * Authors: Misael Lopez Cruz <misael.lopez@ti.com>
5 * Jorge Eduardo Candelaria <jorge.candelaria@ti.com>
6 * Peter Ujfalusi <peter.ujfalusi@ti.com>
7 *
8 * Copyright: (C) 2011 Texas Instruments, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22 * 02110-1301 USA
23 *
24 */
25
26#include <linux/module.h>
27#include <linux/types.h>
28#include <linux/slab.h>
29#include <linux/kernel.h>
30#include <linux/platform_device.h>
31#include <linux/gpio.h>
32#include <linux/delay.h>
33#include <linux/i2c/twl.h>
34#include <linux/mfd/core.h>
35#include <linux/mfd/twl6040.h>
36
37static struct platform_device *twl6040_dev;
38
39int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg)
40{
41 int ret;
42 u8 val = 0;
43
44 mutex_lock(&twl6040->io_mutex);
45 ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg);
46 if (ret < 0) {
47 mutex_unlock(&twl6040->io_mutex);
48 return ret;
49 }
50 mutex_unlock(&twl6040->io_mutex);
51
52 return val;
53}
54EXPORT_SYMBOL(twl6040_reg_read);
55
56int twl6040_reg_write(struct twl6040 *twl6040, unsigned int reg, u8 val)
57{
58 int ret;
59
60 mutex_lock(&twl6040->io_mutex);
61 ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg);
62 mutex_unlock(&twl6040->io_mutex);
63
64 return ret;
65}
66EXPORT_SYMBOL(twl6040_reg_write);
67
68int twl6040_set_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask)
69{
70 int ret;
71 u8 val;
72
73 mutex_lock(&twl6040->io_mutex);
74 ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg);
75 if (ret)
76 goto out;
77
78 val |= mask;
79 ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg);
80out:
81 mutex_unlock(&twl6040->io_mutex);
82 return ret;
83}
84EXPORT_SYMBOL(twl6040_set_bits);
85
86int twl6040_clear_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask)
87{
88 int ret;
89 u8 val;
90
91 mutex_lock(&twl6040->io_mutex);
92 ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg);
93 if (ret)
94 goto out;
95
96 val &= ~mask;
97 ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg);
98out:
99 mutex_unlock(&twl6040->io_mutex);
100 return ret;
101}
102EXPORT_SYMBOL(twl6040_clear_bits);
103
104/* twl6040 codec manual power-up sequence */
105static int twl6040_power_up(struct twl6040 *twl6040)
106{
107 u8 ldoctl, ncpctl, lppllctl;
108 int ret;
109
110 /* enable high-side LDO, reference system and internal oscillator */
111 ldoctl = TWL6040_HSLDOENA | TWL6040_REFENA | TWL6040_OSCENA;
112 ret = twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
113 if (ret)
114 return ret;
115 usleep_range(10000, 10500);
116
117 /* enable negative charge pump */
118 ncpctl = TWL6040_NCPENA;
119 ret = twl6040_reg_write(twl6040, TWL6040_REG_NCPCTL, ncpctl);
120 if (ret)
121 goto ncp_err;
122 usleep_range(1000, 1500);
123
124 /* enable low-side LDO */
125 ldoctl |= TWL6040_LSLDOENA;
126 ret = twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
127 if (ret)
128 goto lsldo_err;
129 usleep_range(1000, 1500);
130
131 /* enable low-power PLL */
132 lppllctl = TWL6040_LPLLENA;
133 ret = twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
134 if (ret)
135 goto lppll_err;
136 usleep_range(5000, 5500);
137
138 /* disable internal oscillator */
139 ldoctl &= ~TWL6040_OSCENA;
140 ret = twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
141 if (ret)
142 goto osc_err;
143
144 return 0;
145
146osc_err:
147 lppllctl &= ~TWL6040_LPLLENA;
148 twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
149lppll_err:
150 ldoctl &= ~TWL6040_LSLDOENA;
151 twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
152lsldo_err:
153 ncpctl &= ~TWL6040_NCPENA;
154 twl6040_reg_write(twl6040, TWL6040_REG_NCPCTL, ncpctl);
155ncp_err:
156 ldoctl &= ~(TWL6040_HSLDOENA | TWL6040_REFENA | TWL6040_OSCENA);
157 twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
158
159 return ret;
160}
161
162/* twl6040 manual power-down sequence */
163static void twl6040_power_down(struct twl6040 *twl6040)
164{
165 u8 ncpctl, ldoctl, lppllctl;
166
167 ncpctl = twl6040_reg_read(twl6040, TWL6040_REG_NCPCTL);
168 ldoctl = twl6040_reg_read(twl6040, TWL6040_REG_LDOCTL);
169 lppllctl = twl6040_reg_read(twl6040, TWL6040_REG_LPPLLCTL);
170
171 /* enable internal oscillator */
172 ldoctl |= TWL6040_OSCENA;
173 twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
174 usleep_range(1000, 1500);
175
176 /* disable low-power PLL */
177 lppllctl &= ~TWL6040_LPLLENA;
178 twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
179
180 /* disable low-side LDO */
181 ldoctl &= ~TWL6040_LSLDOENA;
182 twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
183
184 /* disable negative charge pump */
185 ncpctl &= ~TWL6040_NCPENA;
186 twl6040_reg_write(twl6040, TWL6040_REG_NCPCTL, ncpctl);
187
188 /* disable high-side LDO, reference system and internal oscillator */
189 ldoctl &= ~(TWL6040_HSLDOENA | TWL6040_REFENA | TWL6040_OSCENA);
190 twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
191}
192
193static irqreturn_t twl6040_naudint_handler(int irq, void *data)
194{
195 struct twl6040 *twl6040 = data;
196 u8 intid, status;
197
198 intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
199
200 if (intid & TWL6040_READYINT)
201 complete(&twl6040->ready);
202
203 if (intid & TWL6040_THINT) {
204 status = twl6040_reg_read(twl6040, TWL6040_REG_STATUS);
205 if (status & TWL6040_TSHUTDET) {
206 dev_warn(&twl6040_dev->dev,
207 "Thermal shutdown, powering-off");
208 twl6040_power(twl6040, 0);
209 } else {
210 dev_warn(&twl6040_dev->dev,
211 "Leaving thermal shutdown, powering-on");
212 twl6040_power(twl6040, 1);
213 }
214 }
215
216 return IRQ_HANDLED;
217}
218
219static int twl6040_power_up_completion(struct twl6040 *twl6040,
220 int naudint)
221{
222 int time_left;
223 u8 intid;
224
225 time_left = wait_for_completion_timeout(&twl6040->ready,
226 msecs_to_jiffies(144));
227 if (!time_left) {
228 intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
229 if (!(intid & TWL6040_READYINT)) {
230 dev_err(&twl6040_dev->dev,
231 "timeout waiting for READYINT\n");
232 return -ETIMEDOUT;
233 }
234 }
235
236 return 0;
237}
238
239int twl6040_power(struct twl6040 *twl6040, int on)
240{
241 int audpwron = twl6040->audpwron;
242 int naudint = twl6040->irq;
243 int ret = 0;
244
245 mutex_lock(&twl6040->mutex);
246
247 if (on) {
248 /* already powered-up */
249 if (twl6040->power_count++)
250 goto out;
251
252 if (gpio_is_valid(audpwron)) {
253 /* use AUDPWRON line */
254 gpio_set_value(audpwron, 1);
255 /* wait for power-up completion */
256 ret = twl6040_power_up_completion(twl6040, naudint);
257 if (ret) {
258 dev_err(&twl6040_dev->dev,
259 "automatic power-down failed\n");
260 twl6040->power_count = 0;
261 goto out;
262 }
263 } else {
264 /* use manual power-up sequence */
265 ret = twl6040_power_up(twl6040);
266 if (ret) {
267 dev_err(&twl6040_dev->dev,
268 "manual power-up failed\n");
269 twl6040->power_count = 0;
270 goto out;
271 }
272 }
273 /* Default PLL configuration after power up */
274 twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL;
275 twl6040->sysclk = 19200000;
276 } else {
277 /* already powered-down */
278 if (!twl6040->power_count) {
279 dev_err(&twl6040_dev->dev,
280 "device is already powered-off\n");
281 ret = -EPERM;
282 goto out;
283 }
284
285 if (--twl6040->power_count)
286 goto out;
287
288 if (gpio_is_valid(audpwron)) {
289 /* use AUDPWRON line */
290 gpio_set_value(audpwron, 0);
291
292 /* power-down sequence latency */
293 usleep_range(500, 700);
294 } else {
295 /* use manual power-down sequence */
296 twl6040_power_down(twl6040);
297 }
298 twl6040->sysclk = 0;
299 }
300
301out:
302 mutex_unlock(&twl6040->mutex);
303 return ret;
304}
305EXPORT_SYMBOL(twl6040_power);
306
307int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
308 unsigned int freq_in, unsigned int freq_out)
309{
310 u8 hppllctl, lppllctl;
311 int ret = 0;
312
313 mutex_lock(&twl6040->mutex);
314
315 hppllctl = twl6040_reg_read(twl6040, TWL6040_REG_HPPLLCTL);
316 lppllctl = twl6040_reg_read(twl6040, TWL6040_REG_LPPLLCTL);
317
318 switch (pll_id) {
319 case TWL6040_SYSCLK_SEL_LPPLL:
320 /* low-power PLL divider */
321 switch (freq_out) {
322 case 17640000:
323 lppllctl |= TWL6040_LPLLFIN;
324 break;
325 case 19200000:
326 lppllctl &= ~TWL6040_LPLLFIN;
327 break;
328 default:
329 dev_err(&twl6040_dev->dev,
330 "freq_out %d not supported\n", freq_out);
331 ret = -EINVAL;
332 goto pll_out;
333 }
334 twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
335
336 switch (freq_in) {
337 case 32768:
338 lppllctl |= TWL6040_LPLLENA;
339 twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL,
340 lppllctl);
341 mdelay(5);
342 lppllctl &= ~TWL6040_HPLLSEL;
343 twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL,
344 lppllctl);
345 hppllctl &= ~TWL6040_HPLLENA;
346 twl6040_reg_write(twl6040, TWL6040_REG_HPPLLCTL,
347 hppllctl);
348 break;
349 default:
350 dev_err(&twl6040_dev->dev,
351 "freq_in %d not supported\n", freq_in);
352 ret = -EINVAL;
353 goto pll_out;
354 }
355 break;
356 case TWL6040_SYSCLK_SEL_HPPLL:
357 /* high-performance PLL can provide only 19.2 MHz */
358 if (freq_out != 19200000) {
359 dev_err(&twl6040_dev->dev,
360 "freq_out %d not supported\n", freq_out);
361 ret = -EINVAL;
362 goto pll_out;
363 }
364
365 hppllctl &= ~TWL6040_MCLK_MSK;
366
367 switch (freq_in) {
368 case 12000000:
369 /* PLL enabled, active mode */
370 hppllctl |= TWL6040_MCLK_12000KHZ |
371 TWL6040_HPLLENA;
372 break;
373 case 19200000:
374 /*
375 * PLL disabled
376 * (enable PLL if MCLK jitter quality
377 * doesn't meet specification)
378 */
379 hppllctl |= TWL6040_MCLK_19200KHZ;
380 break;
381 case 26000000:
382 /* PLL enabled, active mode */
383 hppllctl |= TWL6040_MCLK_26000KHZ |
384 TWL6040_HPLLENA;
385 break;
386 case 38400000:
387 /* PLL enabled, active mode */
388 hppllctl |= TWL6040_MCLK_38400KHZ |
389 TWL6040_HPLLENA;
390 break;
391 default:
392 dev_err(&twl6040_dev->dev,
393 "freq_in %d not supported\n", freq_in);
394 ret = -EINVAL;
395 goto pll_out;
396 }
397
398 /* enable clock slicer to ensure input waveform is square */
399 hppllctl |= TWL6040_HPLLSQRENA;
400
401 twl6040_reg_write(twl6040, TWL6040_REG_HPPLLCTL, hppllctl);
402 usleep_range(500, 700);
403 lppllctl |= TWL6040_HPLLSEL;
404 twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
405 lppllctl &= ~TWL6040_LPLLENA;
406 twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
407 break;
408 default:
409 dev_err(&twl6040_dev->dev, "unknown pll id %d\n", pll_id);
410 ret = -EINVAL;
411 goto pll_out;
412 }
413
414 twl6040->sysclk = freq_out;
415 twl6040->pll = pll_id;
416
417pll_out:
418 mutex_unlock(&twl6040->mutex);
419 return ret;
420}
421EXPORT_SYMBOL(twl6040_set_pll);
422
423int twl6040_get_pll(struct twl6040 *twl6040)
424{
425 if (twl6040->power_count)
426 return twl6040->pll;
427 else
428 return -ENODEV;
429}
430EXPORT_SYMBOL(twl6040_get_pll);
431
432unsigned int twl6040_get_sysclk(struct twl6040 *twl6040)
433{
434 return twl6040->sysclk;
435}
436EXPORT_SYMBOL(twl6040_get_sysclk);
437
438static struct resource twl6040_vibra_rsrc[] = {
439 {
440 .flags = IORESOURCE_IRQ,
441 },
442};
443
444static struct resource twl6040_codec_rsrc[] = {
445 {
446 .flags = IORESOURCE_IRQ,
447 },
448};
449
450static int __devinit twl6040_probe(struct platform_device *pdev)
451{
452 struct twl4030_audio_data *pdata = pdev->dev.platform_data;
453 struct twl6040 *twl6040;
454 struct mfd_cell *cell = NULL;
455 int ret, children = 0;
456
457 if (!pdata) {
458 dev_err(&pdev->dev, "Platform data is missing\n");
459 return -EINVAL;
460 }
461
462 /* In order to operate correctly we need valid interrupt config */
463 if (!pdata->naudint_irq || !pdata->irq_base) {
464 dev_err(&pdev->dev, "Invalid IRQ configuration\n");
465 return -EINVAL;
466 }
467
468 twl6040 = kzalloc(sizeof(struct twl6040), GFP_KERNEL);
469 if (!twl6040)
470 return -ENOMEM;
471
472 platform_set_drvdata(pdev, twl6040);
473
474 twl6040_dev = pdev;
475 twl6040->dev = &pdev->dev;
476 twl6040->audpwron = pdata->audpwron_gpio;
477 twl6040->irq = pdata->naudint_irq;
478 twl6040->irq_base = pdata->irq_base;
479
480 mutex_init(&twl6040->mutex);
481 mutex_init(&twl6040->io_mutex);
482 init_completion(&twl6040->ready);
483
484 twl6040->rev = twl6040_reg_read(twl6040, TWL6040_REG_ASICREV);
485
486 if (gpio_is_valid(twl6040->audpwron)) {
487 ret = gpio_request(twl6040->audpwron, "audpwron");
488 if (ret)
489 goto gpio1_err;
490
491 ret = gpio_direction_output(twl6040->audpwron, 0);
492 if (ret)
493 goto gpio2_err;
494 }
495
496 /* ERRATA: Automatic power-up is not possible in ES1.0 */
497 if (twl6040->rev == TWL6040_REV_ES1_0)
498 twl6040->audpwron = -EINVAL;
499
500 /* codec interrupt */
501 ret = twl6040_irq_init(twl6040);
502 if (ret)
503 goto gpio2_err;
504
505 ret = request_threaded_irq(twl6040->irq_base + TWL6040_IRQ_READY,
506 NULL, twl6040_naudint_handler, 0,
507 "twl6040_irq_ready", twl6040);
508 if (ret) {
509 dev_err(twl6040->dev, "READY IRQ request failed: %d\n",
510 ret);
511 goto irq_err;
512 }
513
514 /* dual-access registers controlled by I2C only */
515 twl6040_set_bits(twl6040, TWL6040_REG_ACCCTL, TWL6040_I2CSEL);
516
517 if (pdata->codec) {
518 int irq = twl6040->irq_base + TWL6040_IRQ_PLUG;
519
520 cell = &twl6040->cells[children];
521 cell->name = "twl6040-codec";
522 twl6040_codec_rsrc[0].start = irq;
523 twl6040_codec_rsrc[0].end = irq;
524 cell->resources = twl6040_codec_rsrc;
525 cell->num_resources = ARRAY_SIZE(twl6040_codec_rsrc);
526 cell->platform_data = pdata->codec;
527 cell->pdata_size = sizeof(*pdata->codec);
528 children++;
529 }
530
531 if (pdata->vibra) {
532 int irq = twl6040->irq_base + TWL6040_IRQ_VIB;
533
534 cell = &twl6040->cells[children];
535 cell->name = "twl6040-vibra";
536 twl6040_vibra_rsrc[0].start = irq;
537 twl6040_vibra_rsrc[0].end = irq;
538 cell->resources = twl6040_vibra_rsrc;
539 cell->num_resources = ARRAY_SIZE(twl6040_vibra_rsrc);
540
541 cell->platform_data = pdata->vibra;
542 cell->pdata_size = sizeof(*pdata->vibra);
543 children++;
544 }
545
546 if (children) {
547 ret = mfd_add_devices(&pdev->dev, pdev->id, twl6040->cells,
548 children, NULL, 0);
549 if (ret)
550 goto mfd_err;
551 } else {
552 dev_err(&pdev->dev, "No platform data found for children\n");
553 ret = -ENODEV;
554 goto mfd_err;
555 }
556
557 return 0;
558
559mfd_err:
560 free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040);
561irq_err:
562 twl6040_irq_exit(twl6040);
563gpio2_err:
564 if (gpio_is_valid(twl6040->audpwron))
565 gpio_free(twl6040->audpwron);
566gpio1_err:
567 platform_set_drvdata(pdev, NULL);
568 kfree(twl6040);
569 twl6040_dev = NULL;
570 return ret;
571}
572
573static int __devexit twl6040_remove(struct platform_device *pdev)
574{
575 struct twl6040 *twl6040 = platform_get_drvdata(pdev);
576
577 if (twl6040->power_count)
578 twl6040_power(twl6040, 0);
579
580 if (gpio_is_valid(twl6040->audpwron))
581 gpio_free(twl6040->audpwron);
582
583 free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040);
584 twl6040_irq_exit(twl6040);
585
586 mfd_remove_devices(&pdev->dev);
587 platform_set_drvdata(pdev, NULL);
588 kfree(twl6040);
589 twl6040_dev = NULL;
590
591 return 0;
592}
593
594static struct platform_driver twl6040_driver = {
595 .probe = twl6040_probe,
596 .remove = __devexit_p(twl6040_remove),
597 .driver = {
598 .owner = THIS_MODULE,
599 .name = "twl6040",
600 },
601};
602
603static int __devinit twl6040_init(void)
604{
605 return platform_driver_register(&twl6040_driver);
606}
607module_init(twl6040_init);
608
609static void __devexit twl6040_exit(void)
610{
611 platform_driver_unregister(&twl6040_driver);
612}
613
614module_exit(twl6040_exit);
615
616MODULE_DESCRIPTION("TWL6040 MFD");
617MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>");
618MODULE_AUTHOR("Jorge Eduardo Candelaria <jorge.candelaria@ti.com>");
619MODULE_LICENSE("GPL");
620MODULE_ALIAS("platform:twl6040");
diff --git a/drivers/mfd/twl6040-irq.c b/drivers/mfd/twl6040-irq.c
new file mode 100644
index 000000000000..b3f8ddaa28a8
--- /dev/null
+++ b/drivers/mfd/twl6040-irq.c
@@ -0,0 +1,191 @@
1/*
2 * Interrupt controller support for TWL6040
3 *
4 * Author: Misael Lopez Cruz <misael.lopez@ti.com>
5 *
6 * Copyright: (C) 2011 Texas Instruments, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/irq.h>
27#include <linux/interrupt.h>
28#include <linux/mfd/core.h>
29#include <linux/mfd/twl6040.h>
30
31struct twl6040_irq_data {
32 int mask;
33 int status;
34};
35
36static struct twl6040_irq_data twl6040_irqs[] = {
37 {
38 .mask = TWL6040_THMSK,
39 .status = TWL6040_THINT,
40 },
41 {
42 .mask = TWL6040_PLUGMSK,
43 .status = TWL6040_PLUGINT | TWL6040_UNPLUGINT,
44 },
45 {
46 .mask = TWL6040_HOOKMSK,
47 .status = TWL6040_HOOKINT,
48 },
49 {
50 .mask = TWL6040_HFMSK,
51 .status = TWL6040_HFINT,
52 },
53 {
54 .mask = TWL6040_VIBMSK,
55 .status = TWL6040_VIBINT,
56 },
57 {
58 .mask = TWL6040_READYMSK,
59 .status = TWL6040_READYINT,
60 },
61};
62
63static inline
64struct twl6040_irq_data *irq_to_twl6040_irq(struct twl6040 *twl6040,
65 int irq)
66{
67 return &twl6040_irqs[irq - twl6040->irq_base];
68}
69
70static void twl6040_irq_lock(struct irq_data *data)
71{
72 struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
73
74 mutex_lock(&twl6040->irq_mutex);
75}
76
77static void twl6040_irq_sync_unlock(struct irq_data *data)
78{
79 struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
80
81 /* write back to hardware any change in irq mask */
82 if (twl6040->irq_masks_cur != twl6040->irq_masks_cache) {
83 twl6040->irq_masks_cache = twl6040->irq_masks_cur;
84 twl6040_reg_write(twl6040, TWL6040_REG_INTMR,
85 twl6040->irq_masks_cur);
86 }
87
88 mutex_unlock(&twl6040->irq_mutex);
89}
90
91static void twl6040_irq_enable(struct irq_data *data)
92{
93 struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
94 struct twl6040_irq_data *irq_data = irq_to_twl6040_irq(twl6040,
95 data->irq);
96
97 twl6040->irq_masks_cur &= ~irq_data->mask;
98}
99
100static void twl6040_irq_disable(struct irq_data *data)
101{
102 struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
103 struct twl6040_irq_data *irq_data = irq_to_twl6040_irq(twl6040,
104 data->irq);
105
106 twl6040->irq_masks_cur |= irq_data->mask;
107}
108
109static struct irq_chip twl6040_irq_chip = {
110 .name = "twl6040",
111 .irq_bus_lock = twl6040_irq_lock,
112 .irq_bus_sync_unlock = twl6040_irq_sync_unlock,
113 .irq_enable = twl6040_irq_enable,
114 .irq_disable = twl6040_irq_disable,
115};
116
117static irqreturn_t twl6040_irq_thread(int irq, void *data)
118{
119 struct twl6040 *twl6040 = data;
120 u8 intid;
121 int i;
122
123 intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
124
125 /* apply masking and report (backwards to handle READYINT first) */
126 for (i = ARRAY_SIZE(twl6040_irqs) - 1; i >= 0; i--) {
127 if (twl6040->irq_masks_cur & twl6040_irqs[i].mask)
128 intid &= ~twl6040_irqs[i].status;
129 if (intid & twl6040_irqs[i].status)
130 handle_nested_irq(twl6040->irq_base + i);
131 }
132
133 /* ack unmasked irqs */
134 twl6040_reg_write(twl6040, TWL6040_REG_INTID, intid);
135
136 return IRQ_HANDLED;
137}
138
139int twl6040_irq_init(struct twl6040 *twl6040)
140{
141 int cur_irq, ret;
142 u8 val;
143
144 mutex_init(&twl6040->irq_mutex);
145
146 /* mask the individual interrupt sources */
147 twl6040->irq_masks_cur = TWL6040_ALLINT_MSK;
148 twl6040->irq_masks_cache = TWL6040_ALLINT_MSK;
149 twl6040_reg_write(twl6040, TWL6040_REG_INTMR, TWL6040_ALLINT_MSK);
150
151 /* Register them with genirq */
152 for (cur_irq = twl6040->irq_base;
153 cur_irq < twl6040->irq_base + ARRAY_SIZE(twl6040_irqs);
154 cur_irq++) {
155 irq_set_chip_data(cur_irq, twl6040);
156 irq_set_chip_and_handler(cur_irq, &twl6040_irq_chip,
157 handle_level_irq);
158 irq_set_nested_thread(cur_irq, 1);
159
160 /* ARM needs us to explicitly flag the IRQ as valid
161 * and will set them noprobe when we do so. */
162#ifdef CONFIG_ARM
163 set_irq_flags(cur_irq, IRQF_VALID);
164#else
165 irq_set_noprobe(cur_irq);
166#endif
167 }
168
169 ret = request_threaded_irq(twl6040->irq, NULL, twl6040_irq_thread,
170 IRQF_ONESHOT, "twl6040", twl6040);
171 if (ret) {
172 dev_err(twl6040->dev, "failed to request IRQ %d: %d\n",
173 twl6040->irq, ret);
174 return ret;
175 }
176
177 /* reset interrupts */
178 val = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
179
180 /* interrupts cleared on write */
181 twl6040_clear_bits(twl6040, TWL6040_REG_ACCCTL, TWL6040_INTCLRMODE);
182
183 return 0;
184}
185EXPORT_SYMBOL(twl6040_irq_init);
186
187void twl6040_irq_exit(struct twl6040 *twl6040)
188{
189 free_irq(twl6040->irq, twl6040);
190}
191EXPORT_SYMBOL(twl6040_irq_exit);
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index b05db55c8c8e..21b28fc6d912 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -26,7 +26,7 @@
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28 28
29#include <asm/atomic.h> 29#include <linux/atomic.h>
30#include <asm/io.h> 30#include <asm/io.h>
31 31
32#define PHANTOM_VERSION "n0.9.8" 32#define PHANTOM_VERSION "n0.9.8"
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 6df5a55da110..053d36caf955 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -45,7 +45,7 @@
45 45
46MODULE_AUTHOR("VMware, Inc."); 46MODULE_AUTHOR("VMware, Inc.");
47MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver"); 47MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
48MODULE_VERSION("1.2.1.2-k"); 48MODULE_VERSION("1.2.1.3-k");
49MODULE_ALIAS("dmi:*:svnVMware*:*"); 49MODULE_ALIAS("dmi:*:svnVMware*:*");
50MODULE_ALIAS("vmware_vmmemctl"); 50MODULE_ALIAS("vmware_vmmemctl");
51MODULE_LICENSE("GPL"); 51MODULE_LICENSE("GPL");
@@ -215,7 +215,6 @@ struct vmballoon {
215}; 215};
216 216
217static struct vmballoon balloon; 217static struct vmballoon balloon;
218static struct workqueue_struct *vmballoon_wq;
219 218
220/* 219/*
221 * Send "start" command to the host, communicating supported version 220 * Send "start" command to the host, communicating supported version
@@ -674,7 +673,12 @@ static void vmballoon_work(struct work_struct *work)
674 vmballoon_deflate(b); 673 vmballoon_deflate(b);
675 } 674 }
676 675
677 queue_delayed_work(vmballoon_wq, dwork, round_jiffies_relative(HZ)); 676 /*
677 * We are using a freezable workqueue so that balloon operations are
678 * stopped while the system transitions to/from sleep/hibernation.
679 */
680 queue_delayed_work(system_freezable_wq,
681 dwork, round_jiffies_relative(HZ));
678} 682}
679 683
680/* 684/*
@@ -785,12 +789,6 @@ static int __init vmballoon_init(void)
785 if (x86_hyper != &x86_hyper_vmware) 789 if (x86_hyper != &x86_hyper_vmware)
786 return -ENODEV; 790 return -ENODEV;
787 791
788 vmballoon_wq = create_freezable_workqueue("vmmemctl");
789 if (!vmballoon_wq) {
790 pr_err("failed to create workqueue\n");
791 return -ENOMEM;
792 }
793
794 INIT_LIST_HEAD(&balloon.pages); 792 INIT_LIST_HEAD(&balloon.pages);
795 INIT_LIST_HEAD(&balloon.refused_pages); 793 INIT_LIST_HEAD(&balloon.refused_pages);
796 794
@@ -805,34 +803,27 @@ static int __init vmballoon_init(void)
805 */ 803 */
806 if (!vmballoon_send_start(&balloon)) { 804 if (!vmballoon_send_start(&balloon)) {
807 pr_err("failed to send start command to the host\n"); 805 pr_err("failed to send start command to the host\n");
808 error = -EIO; 806 return -EIO;
809 goto fail;
810 } 807 }
811 808
812 if (!vmballoon_send_guest_id(&balloon)) { 809 if (!vmballoon_send_guest_id(&balloon)) {
813 pr_err("failed to send guest ID to the host\n"); 810 pr_err("failed to send guest ID to the host\n");
814 error = -EIO; 811 return -EIO;
815 goto fail;
816 } 812 }
817 813
818 error = vmballoon_debugfs_init(&balloon); 814 error = vmballoon_debugfs_init(&balloon);
819 if (error) 815 if (error)
820 goto fail; 816 return error;
821 817
822 queue_delayed_work(vmballoon_wq, &balloon.dwork, 0); 818 queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
823 819
824 return 0; 820 return 0;
825
826fail:
827 destroy_workqueue(vmballoon_wq);
828 return error;
829} 821}
830module_init(vmballoon_init); 822module_init(vmballoon_init);
831 823
832static void __exit vmballoon_exit(void) 824static void __exit vmballoon_exit(void)
833{ 825{
834 cancel_delayed_work_sync(&balloon.dwork); 826 cancel_delayed_work_sync(&balloon.dwork);
835 destroy_workqueue(vmballoon_wq);
836 827
837 vmballoon_debugfs_exit(&balloon); 828 vmballoon_debugfs_exit(&balloon);
838 829
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 710b706f4fcf..9ebfb4b482f5 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -20,7 +20,9 @@
20#include <linux/mmc/host.h> 20#include <linux/mmc/host.h>
21#include <linux/mmc/mmc.h> 21#include <linux/mmc/mmc.h>
22#include <linux/mmc/sdio.h> 22#include <linux/mmc/sdio.h>
23#include <mach/hardware.h> 23#include <linux/of.h>
24#include <linux/of_device.h>
25#include <linux/of_gpio.h>
24#include <mach/esdhc.h> 26#include <mach/esdhc.h>
25#include "sdhci-pltfm.h" 27#include "sdhci-pltfm.h"
26#include "sdhci-esdhc.h" 28#include "sdhci-esdhc.h"
@@ -29,7 +31,6 @@
29#define SDHCI_VENDOR_SPEC 0xC0 31#define SDHCI_VENDOR_SPEC 0xC0
30#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 32#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002
31 33
32#define ESDHC_FLAG_GPIO_FOR_CD (1 << 0)
33/* 34/*
34 * The CMDTYPE of the CMD register (offset 0xE) should be set to 35 * The CMDTYPE of the CMD register (offset 0xE) should be set to
35 * "11" when the STOP CMD12 is issued on imx53 to abort one 36 * "11" when the STOP CMD12 is issued on imx53 to abort one
@@ -43,10 +44,67 @@
43 */ 44 */
44#define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1) 45#define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1)
45 46
47enum imx_esdhc_type {
48 IMX25_ESDHC,
49 IMX35_ESDHC,
50 IMX51_ESDHC,
51 IMX53_ESDHC,
52};
53
46struct pltfm_imx_data { 54struct pltfm_imx_data {
47 int flags; 55 int flags;
48 u32 scratchpad; 56 u32 scratchpad;
57 enum imx_esdhc_type devtype;
58 struct esdhc_platform_data boarddata;
59};
60
61static struct platform_device_id imx_esdhc_devtype[] = {
62 {
63 .name = "sdhci-esdhc-imx25",
64 .driver_data = IMX25_ESDHC,
65 }, {
66 .name = "sdhci-esdhc-imx35",
67 .driver_data = IMX35_ESDHC,
68 }, {
69 .name = "sdhci-esdhc-imx51",
70 .driver_data = IMX51_ESDHC,
71 }, {
72 .name = "sdhci-esdhc-imx53",
73 .driver_data = IMX53_ESDHC,
74 }, {
75 /* sentinel */
76 }
49}; 77};
78MODULE_DEVICE_TABLE(platform, imx_esdhc_devtype);
79
80static const struct of_device_id imx_esdhc_dt_ids[] = {
81 { .compatible = "fsl,imx25-esdhc", .data = &imx_esdhc_devtype[IMX25_ESDHC], },
82 { .compatible = "fsl,imx35-esdhc", .data = &imx_esdhc_devtype[IMX35_ESDHC], },
83 { .compatible = "fsl,imx51-esdhc", .data = &imx_esdhc_devtype[IMX51_ESDHC], },
84 { .compatible = "fsl,imx53-esdhc", .data = &imx_esdhc_devtype[IMX53_ESDHC], },
85 { /* sentinel */ }
86};
87MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids);
88
89static inline int is_imx25_esdhc(struct pltfm_imx_data *data)
90{
91 return data->devtype == IMX25_ESDHC;
92}
93
94static inline int is_imx35_esdhc(struct pltfm_imx_data *data)
95{
96 return data->devtype == IMX35_ESDHC;
97}
98
99static inline int is_imx51_esdhc(struct pltfm_imx_data *data)
100{
101 return data->devtype == IMX51_ESDHC;
102}
103
104static inline int is_imx53_esdhc(struct pltfm_imx_data *data)
105{
106 return data->devtype == IMX53_ESDHC;
107}
50 108
51static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) 109static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg)
52{ 110{
@@ -60,17 +118,14 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
60{ 118{
61 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 119 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
62 struct pltfm_imx_data *imx_data = pltfm_host->priv; 120 struct pltfm_imx_data *imx_data = pltfm_host->priv;
121 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
63 122
64 /* fake CARD_PRESENT flag on mx25/35 */ 123 /* fake CARD_PRESENT flag */
65 u32 val = readl(host->ioaddr + reg); 124 u32 val = readl(host->ioaddr + reg);
66 125
67 if (unlikely((reg == SDHCI_PRESENT_STATE) 126 if (unlikely((reg == SDHCI_PRESENT_STATE)
68 && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD))) { 127 && gpio_is_valid(boarddata->cd_gpio))) {
69 struct esdhc_platform_data *boarddata = 128 if (gpio_get_value(boarddata->cd_gpio))
70 host->mmc->parent->platform_data;
71
72 if (boarddata && gpio_is_valid(boarddata->cd_gpio)
73 && gpio_get_value(boarddata->cd_gpio))
74 /* no card, if a valid gpio says so... */ 129 /* no card, if a valid gpio says so... */
75 val &= ~SDHCI_CARD_PRESENT; 130 val &= ~SDHCI_CARD_PRESENT;
76 else 131 else
@@ -85,12 +140,12 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
85{ 140{
86 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 141 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
87 struct pltfm_imx_data *imx_data = pltfm_host->priv; 142 struct pltfm_imx_data *imx_data = pltfm_host->priv;
143 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
88 144
89 if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE) 145 if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)
90 && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD))) 146 && (boarddata->cd_type == ESDHC_CD_GPIO)))
91 /* 147 /*
92 * these interrupts won't work with a custom card_detect gpio 148 * these interrupts won't work with a custom card_detect gpio
93 * (only applied to mx25/35)
94 */ 149 */
95 val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 150 val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
96 151
@@ -173,6 +228,17 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
173 return; 228 return;
174 } 229 }
175 esdhc_clrset_le(host, 0xff, val, reg); 230 esdhc_clrset_le(host, 0xff, val, reg);
231
232 /*
233 * The esdhc has a design violation to SDHC spec which tells
234 * that software reset should not affect card detection circuit.
235 * But esdhc clears its SYSCTL register bits [0..2] during the
236 * software reset. This will stop those clocks that card detection
237 * circuit relies on. To work around it, we turn the clocks on back
238 * to keep card detection circuit functional.
239 */
240 if ((reg == SDHCI_SOFTWARE_RESET) && (val & 1))
241 esdhc_clrset_le(host, 0x7, 0x7, ESDHC_SYSTEM_CONTROL);
176} 242}
177 243
178static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host) 244static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host)
@@ -189,6 +255,26 @@ static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
189 return clk_get_rate(pltfm_host->clk) / 256 / 16; 255 return clk_get_rate(pltfm_host->clk) / 256 / 16;
190} 256}
191 257
258static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
259{
260 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
261 struct pltfm_imx_data *imx_data = pltfm_host->priv;
262 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
263
264 switch (boarddata->wp_type) {
265 case ESDHC_WP_GPIO:
266 if (gpio_is_valid(boarddata->wp_gpio))
267 return gpio_get_value(boarddata->wp_gpio);
268 case ESDHC_WP_CONTROLLER:
269 return !(readl(host->ioaddr + SDHCI_PRESENT_STATE) &
270 SDHCI_WRITE_PROTECT);
271 case ESDHC_WP_NONE:
272 break;
273 }
274
275 return -ENOSYS;
276}
277
192static struct sdhci_ops sdhci_esdhc_ops = { 278static struct sdhci_ops sdhci_esdhc_ops = {
193 .read_l = esdhc_readl_le, 279 .read_l = esdhc_readl_le,
194 .read_w = esdhc_readw_le, 280 .read_w = esdhc_readw_le,
@@ -198,6 +284,7 @@ static struct sdhci_ops sdhci_esdhc_ops = {
198 .set_clock = esdhc_set_clock, 284 .set_clock = esdhc_set_clock,
199 .get_max_clock = esdhc_pltfm_get_max_clock, 285 .get_max_clock = esdhc_pltfm_get_max_clock,
200 .get_min_clock = esdhc_pltfm_get_min_clock, 286 .get_min_clock = esdhc_pltfm_get_min_clock,
287 .get_ro = esdhc_pltfm_get_ro,
201}; 288};
202 289
203static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { 290static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
@@ -207,17 +294,6 @@ static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
207 .ops = &sdhci_esdhc_ops, 294 .ops = &sdhci_esdhc_ops,
208}; 295};
209 296
210static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
211{
212 struct esdhc_platform_data *boarddata =
213 host->mmc->parent->platform_data;
214
215 if (boarddata && gpio_is_valid(boarddata->wp_gpio))
216 return gpio_get_value(boarddata->wp_gpio);
217 else
218 return -ENOSYS;
219}
220
221static irqreturn_t cd_irq(int irq, void *data) 297static irqreturn_t cd_irq(int irq, void *data)
222{ 298{
223 struct sdhci_host *sdhost = (struct sdhci_host *)data; 299 struct sdhci_host *sdhost = (struct sdhci_host *)data;
@@ -226,8 +302,48 @@ static irqreturn_t cd_irq(int irq, void *data)
226 return IRQ_HANDLED; 302 return IRQ_HANDLED;
227}; 303};
228 304
305#ifdef CONFIG_OF
306static int __devinit
307sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
308 struct esdhc_platform_data *boarddata)
309{
310 struct device_node *np = pdev->dev.of_node;
311
312 if (!np)
313 return -ENODEV;
314
315 if (of_get_property(np, "fsl,card-wired", NULL))
316 boarddata->cd_type = ESDHC_CD_PERMANENT;
317
318 if (of_get_property(np, "fsl,cd-controller", NULL))
319 boarddata->cd_type = ESDHC_CD_CONTROLLER;
320
321 if (of_get_property(np, "fsl,wp-controller", NULL))
322 boarddata->wp_type = ESDHC_WP_CONTROLLER;
323
324 boarddata->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
325 if (gpio_is_valid(boarddata->cd_gpio))
326 boarddata->cd_type = ESDHC_CD_GPIO;
327
328 boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
329 if (gpio_is_valid(boarddata->wp_gpio))
330 boarddata->wp_type = ESDHC_WP_GPIO;
331
332 return 0;
333}
334#else
335static inline int
336sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
337 struct esdhc_platform_data *boarddata)
338{
339 return -ENODEV;
340}
341#endif
342
229static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev) 343static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
230{ 344{
345 const struct of_device_id *of_id =
346 of_match_device(imx_esdhc_dt_ids, &pdev->dev);
231 struct sdhci_pltfm_host *pltfm_host; 347 struct sdhci_pltfm_host *pltfm_host;
232 struct sdhci_host *host; 348 struct sdhci_host *host;
233 struct esdhc_platform_data *boarddata; 349 struct esdhc_platform_data *boarddata;
@@ -242,8 +358,14 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
242 pltfm_host = sdhci_priv(host); 358 pltfm_host = sdhci_priv(host);
243 359
244 imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL); 360 imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL);
245 if (!imx_data) 361 if (!imx_data) {
246 return -ENOMEM; 362 err = -ENOMEM;
363 goto err_imx_data;
364 }
365
366 if (of_id)
367 pdev->id_entry = of_id->data;
368 imx_data->devtype = pdev->id_entry->driver_data;
247 pltfm_host->priv = imx_data; 369 pltfm_host->priv = imx_data;
248 370
249 clk = clk_get(mmc_dev(host->mmc), NULL); 371 clk = clk_get(mmc_dev(host->mmc), NULL);
@@ -255,50 +377,72 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
255 clk_enable(clk); 377 clk_enable(clk);
256 pltfm_host->clk = clk; 378 pltfm_host->clk = clk;
257 379
258 if (!cpu_is_mx25()) 380 if (!is_imx25_esdhc(imx_data))
259 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; 381 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
260 382
261 if (cpu_is_mx25() || cpu_is_mx35()) { 383 if (is_imx25_esdhc(imx_data) || is_imx35_esdhc(imx_data))
262 /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */ 384 /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
263 host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK; 385 host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
264 /* write_protect can't be routed to controller, use gpio */
265 sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro;
266 }
267 386
268 if (!(cpu_is_mx25() || cpu_is_mx35() || cpu_is_mx51())) 387 if (is_imx53_esdhc(imx_data))
269 imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; 388 imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT;
270 389
271 boarddata = host->mmc->parent->platform_data; 390 boarddata = &imx_data->boarddata;
272 if (boarddata) { 391 if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) {
392 if (!host->mmc->parent->platform_data) {
393 dev_err(mmc_dev(host->mmc), "no board data!\n");
394 err = -EINVAL;
395 goto no_board_data;
396 }
397 imx_data->boarddata = *((struct esdhc_platform_data *)
398 host->mmc->parent->platform_data);
399 }
400
401 /* write_protect */
402 if (boarddata->wp_type == ESDHC_WP_GPIO) {
273 err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP"); 403 err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP");
274 if (err) { 404 if (err) {
275 dev_warn(mmc_dev(host->mmc), 405 dev_warn(mmc_dev(host->mmc),
276 "no write-protect pin available!\n"); 406 "no write-protect pin available!\n");
277 boarddata->wp_gpio = err; 407 boarddata->wp_gpio = -EINVAL;
278 } 408 }
409 } else {
410 boarddata->wp_gpio = -EINVAL;
411 }
412
413 /* card_detect */
414 if (boarddata->cd_type != ESDHC_CD_GPIO)
415 boarddata->cd_gpio = -EINVAL;
279 416
417 switch (boarddata->cd_type) {
418 case ESDHC_CD_GPIO:
280 err = gpio_request_one(boarddata->cd_gpio, GPIOF_IN, "ESDHC_CD"); 419 err = gpio_request_one(boarddata->cd_gpio, GPIOF_IN, "ESDHC_CD");
281 if (err) { 420 if (err) {
282 dev_warn(mmc_dev(host->mmc), 421 dev_err(mmc_dev(host->mmc),
283 "no card-detect pin available!\n"); 422 "no card-detect pin available!\n");
284 goto no_card_detect_pin; 423 goto no_card_detect_pin;
285 } 424 }
286 425
287 /* i.MX5x has issues to be researched */
288 if (!cpu_is_mx25() && !cpu_is_mx35())
289 goto not_supported;
290
291 err = request_irq(gpio_to_irq(boarddata->cd_gpio), cd_irq, 426 err = request_irq(gpio_to_irq(boarddata->cd_gpio), cd_irq,
292 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, 427 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
293 mmc_hostname(host->mmc), host); 428 mmc_hostname(host->mmc), host);
294 if (err) { 429 if (err) {
295 dev_warn(mmc_dev(host->mmc), "request irq error\n"); 430 dev_err(mmc_dev(host->mmc), "request irq error\n");
296 goto no_card_detect_irq; 431 goto no_card_detect_irq;
297 } 432 }
433 /* fall through */
298 434
299 imx_data->flags |= ESDHC_FLAG_GPIO_FOR_CD; 435 case ESDHC_CD_CONTROLLER:
300 /* Now we have a working card_detect again */ 436 /* we have a working card_detect back */
301 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 437 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
438 break;
439
440 case ESDHC_CD_PERMANENT:
441 host->mmc->caps = MMC_CAP_NONREMOVABLE;
442 break;
443
444 case ESDHC_CD_NONE:
445 break;
302 } 446 }
303 447
304 err = sdhci_add_host(host); 448 err = sdhci_add_host(host);
@@ -307,16 +451,21 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
307 451
308 return 0; 452 return 0;
309 453
310 no_card_detect_irq: 454err_add_host:
311 gpio_free(boarddata->cd_gpio); 455 if (gpio_is_valid(boarddata->cd_gpio))
312 no_card_detect_pin: 456 free_irq(gpio_to_irq(boarddata->cd_gpio), host);
313 boarddata->cd_gpio = err; 457no_card_detect_irq:
314 not_supported: 458 if (gpio_is_valid(boarddata->cd_gpio))
315 kfree(imx_data); 459 gpio_free(boarddata->cd_gpio);
316 err_add_host: 460 if (gpio_is_valid(boarddata->wp_gpio))
461 gpio_free(boarddata->wp_gpio);
462no_card_detect_pin:
463no_board_data:
317 clk_disable(pltfm_host->clk); 464 clk_disable(pltfm_host->clk);
318 clk_put(pltfm_host->clk); 465 clk_put(pltfm_host->clk);
319 err_clk_get: 466err_clk_get:
467 kfree(imx_data);
468err_imx_data:
320 sdhci_pltfm_free(pdev); 469 sdhci_pltfm_free(pdev);
321 return err; 470 return err;
322} 471}
@@ -325,20 +474,18 @@ static int __devexit sdhci_esdhc_imx_remove(struct platform_device *pdev)
325{ 474{
326 struct sdhci_host *host = platform_get_drvdata(pdev); 475 struct sdhci_host *host = platform_get_drvdata(pdev);
327 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 476 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
328 struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
329 struct pltfm_imx_data *imx_data = pltfm_host->priv; 477 struct pltfm_imx_data *imx_data = pltfm_host->priv;
478 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
330 int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); 479 int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
331 480
332 sdhci_remove_host(host, dead); 481 sdhci_remove_host(host, dead);
333 482
334 if (boarddata && gpio_is_valid(boarddata->wp_gpio)) 483 if (gpio_is_valid(boarddata->wp_gpio))
335 gpio_free(boarddata->wp_gpio); 484 gpio_free(boarddata->wp_gpio);
336 485
337 if (boarddata && gpio_is_valid(boarddata->cd_gpio)) { 486 if (gpio_is_valid(boarddata->cd_gpio)) {
487 free_irq(gpio_to_irq(boarddata->cd_gpio), host);
338 gpio_free(boarddata->cd_gpio); 488 gpio_free(boarddata->cd_gpio);
339
340 if (!(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION))
341 free_irq(gpio_to_irq(boarddata->cd_gpio), host);
342 } 489 }
343 490
344 clk_disable(pltfm_host->clk); 491 clk_disable(pltfm_host->clk);
@@ -354,7 +501,9 @@ static struct platform_driver sdhci_esdhc_imx_driver = {
354 .driver = { 501 .driver = {
355 .name = "sdhci-esdhc-imx", 502 .name = "sdhci-esdhc-imx",
356 .owner = THIS_MODULE, 503 .owner = THIS_MODULE,
504 .of_match_table = imx_esdhc_dt_ids,
357 }, 505 },
506 .id_table = imx_esdhc_devtype,
358 .probe = sdhci_esdhc_imx_probe, 507 .probe = sdhci_esdhc_imx_probe,
359 .remove = __devexit_p(sdhci_esdhc_imx_remove), 508 .remove = __devexit_p(sdhci_esdhc_imx_remove),
360#ifdef CONFIG_PM 509#ifdef CONFIG_PM
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 71c0ce1f6db0..6414efeddca0 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -85,6 +85,7 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
85{ 85{
86 struct sdhci_host *host; 86 struct sdhci_host *host;
87 struct sdhci_pltfm_host *pltfm_host; 87 struct sdhci_pltfm_host *pltfm_host;
88 struct device_node *np = pdev->dev.of_node;
88 struct resource *iomem; 89 struct resource *iomem;
89 int ret; 90 int ret;
90 91
@@ -98,7 +99,7 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
98 dev_err(&pdev->dev, "Invalid iomem size!\n"); 99 dev_err(&pdev->dev, "Invalid iomem size!\n");
99 100
100 /* Some PCI-based MFD need the parent here */ 101 /* Some PCI-based MFD need the parent here */
101 if (pdev->dev.parent != &platform_bus) 102 if (pdev->dev.parent != &platform_bus && !np)
102 host = sdhci_alloc_host(pdev->dev.parent, sizeof(*pltfm_host)); 103 host = sdhci_alloc_host(pdev->dev.parent, sizeof(*pltfm_host));
103 else 104 else
104 host = sdhci_alloc_host(&pdev->dev, sizeof(*pltfm_host)); 105 host = sdhci_alloc_host(&pdev->dev, sizeof(*pltfm_host));
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 087d88023ba1..eeaf64391fbe 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -21,6 +21,7 @@
21#include <linux/mutex.h> 21#include <linux/mutex.h>
22#include <linux/pagemap.h> 22#include <linux/pagemap.h>
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/scatterlist.h>
24 25
25/* Definitions for values the CTRL_SDIO_STATUS register can take. */ 26/* Definitions for values the CTRL_SDIO_STATUS register can take. */
26#define TMIO_SDIO_STAT_IOIRQ 0x0001 27#define TMIO_SDIO_STAT_IOIRQ 0x0001
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index b7622c3745fa..e1eca2ab505e 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -282,6 +282,7 @@ obj-$(CONFIG_USB_HSO) += usb/
282obj-$(CONFIG_USB_USBNET) += usb/ 282obj-$(CONFIG_USB_USBNET) += usb/
283obj-$(CONFIG_USB_ZD1201) += usb/ 283obj-$(CONFIG_USB_ZD1201) += usb/
284obj-$(CONFIG_USB_IPHETH) += usb/ 284obj-$(CONFIG_USB_IPHETH) += usb/
285obj-$(CONFIG_USB_CDC_PHONET) += usb/
285 286
286obj-$(CONFIG_WLAN) += wireless/ 287obj-$(CONFIG_WLAN) += wireless/
287obj-$(CONFIG_NET_TULIP) += tulip/ 288obj-$(CONFIG_NET_TULIP) += tulip/
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 536038b22710..31798f5f5d06 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -1502,13 +1502,13 @@ static int __devinit ace_init(struct net_device *dev)
1502 * firmware to wipe the ring without re-initializing it. 1502 * firmware to wipe the ring without re-initializing it.
1503 */ 1503 */
1504 if (!test_and_set_bit(0, &ap->std_refill_busy)) 1504 if (!test_and_set_bit(0, &ap->std_refill_busy))
1505 ace_load_std_rx_ring(ap, RX_RING_SIZE); 1505 ace_load_std_rx_ring(dev, RX_RING_SIZE);
1506 else 1506 else
1507 printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n", 1507 printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n",
1508 ap->name); 1508 ap->name);
1509 if (ap->version >= 2) { 1509 if (ap->version >= 2) {
1510 if (!test_and_set_bit(0, &ap->mini_refill_busy)) 1510 if (!test_and_set_bit(0, &ap->mini_refill_busy))
1511 ace_load_mini_rx_ring(ap, RX_MINI_SIZE); 1511 ace_load_mini_rx_ring(dev, RX_MINI_SIZE);
1512 else 1512 else
1513 printk(KERN_ERR "%s: Someone is busy refilling " 1513 printk(KERN_ERR "%s: Someone is busy refilling "
1514 "the RX mini ring\n", ap->name); 1514 "the RX mini ring\n", ap->name);
@@ -1584,9 +1584,10 @@ static void ace_watchdog(struct net_device *data)
1584} 1584}
1585 1585
1586 1586
1587static void ace_tasklet(unsigned long dev) 1587static void ace_tasklet(unsigned long arg)
1588{ 1588{
1589 struct ace_private *ap = netdev_priv((struct net_device *)dev); 1589 struct net_device *dev = (struct net_device *) arg;
1590 struct ace_private *ap = netdev_priv(dev);
1590 int cur_size; 1591 int cur_size;
1591 1592
1592 cur_size = atomic_read(&ap->cur_rx_bufs); 1593 cur_size = atomic_read(&ap->cur_rx_bufs);
@@ -1595,7 +1596,7 @@ static void ace_tasklet(unsigned long dev)
1595#ifdef DEBUG 1596#ifdef DEBUG
1596 printk("refilling buffers (current %i)\n", cur_size); 1597 printk("refilling buffers (current %i)\n", cur_size);
1597#endif 1598#endif
1598 ace_load_std_rx_ring(ap, RX_RING_SIZE - cur_size); 1599 ace_load_std_rx_ring(dev, RX_RING_SIZE - cur_size);
1599 } 1600 }
1600 1601
1601 if (ap->version >= 2) { 1602 if (ap->version >= 2) {
@@ -1606,7 +1607,7 @@ static void ace_tasklet(unsigned long dev)
1606 printk("refilling mini buffers (current %i)\n", 1607 printk("refilling mini buffers (current %i)\n",
1607 cur_size); 1608 cur_size);
1608#endif 1609#endif
1609 ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size); 1610 ace_load_mini_rx_ring(dev, RX_MINI_SIZE - cur_size);
1610 } 1611 }
1611 } 1612 }
1612 1613
@@ -1616,7 +1617,7 @@ static void ace_tasklet(unsigned long dev)
1616#ifdef DEBUG 1617#ifdef DEBUG
1617 printk("refilling jumbo buffers (current %i)\n", cur_size); 1618 printk("refilling jumbo buffers (current %i)\n", cur_size);
1618#endif 1619#endif
1619 ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size); 1620 ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size);
1620 } 1621 }
1621 ap->tasklet_pending = 0; 1622 ap->tasklet_pending = 0;
1622} 1623}
@@ -1642,8 +1643,9 @@ static void ace_dump_trace(struct ace_private *ap)
1642 * done only before the device is enabled, thus no interrupts are 1643 * done only before the device is enabled, thus no interrupts are
1643 * generated and by the interrupt handler/tasklet handler. 1644 * generated and by the interrupt handler/tasklet handler.
1644 */ 1645 */
1645static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs) 1646static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs)
1646{ 1647{
1648 struct ace_private *ap = netdev_priv(dev);
1647 struct ace_regs __iomem *regs = ap->regs; 1649 struct ace_regs __iomem *regs = ap->regs;
1648 short i, idx; 1650 short i, idx;
1649 1651
@@ -1657,11 +1659,10 @@ static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
1657 struct rx_desc *rd; 1659 struct rx_desc *rd;
1658 dma_addr_t mapping; 1660 dma_addr_t mapping;
1659 1661
1660 skb = dev_alloc_skb(ACE_STD_BUFSIZE + NET_IP_ALIGN); 1662 skb = netdev_alloc_skb_ip_align(dev, ACE_STD_BUFSIZE);
1661 if (!skb) 1663 if (!skb)
1662 break; 1664 break;
1663 1665
1664 skb_reserve(skb, NET_IP_ALIGN);
1665 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), 1666 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
1666 offset_in_page(skb->data), 1667 offset_in_page(skb->data),
1667 ACE_STD_BUFSIZE, 1668 ACE_STD_BUFSIZE,
@@ -1705,8 +1706,9 @@ static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
1705} 1706}
1706 1707
1707 1708
1708static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs) 1709static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs)
1709{ 1710{
1711 struct ace_private *ap = netdev_priv(dev);
1710 struct ace_regs __iomem *regs = ap->regs; 1712 struct ace_regs __iomem *regs = ap->regs;
1711 short i, idx; 1713 short i, idx;
1712 1714
@@ -1718,11 +1720,10 @@ static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
1718 struct rx_desc *rd; 1720 struct rx_desc *rd;
1719 dma_addr_t mapping; 1721 dma_addr_t mapping;
1720 1722
1721 skb = dev_alloc_skb(ACE_MINI_BUFSIZE + NET_IP_ALIGN); 1723 skb = netdev_alloc_skb_ip_align(dev, ACE_MINI_BUFSIZE);
1722 if (!skb) 1724 if (!skb)
1723 break; 1725 break;
1724 1726
1725 skb_reserve(skb, NET_IP_ALIGN);
1726 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), 1727 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
1727 offset_in_page(skb->data), 1728 offset_in_page(skb->data),
1728 ACE_MINI_BUFSIZE, 1729 ACE_MINI_BUFSIZE,
@@ -1762,8 +1763,9 @@ static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
1762 * Load the jumbo rx ring, this may happen at any time if the MTU 1763 * Load the jumbo rx ring, this may happen at any time if the MTU
1763 * is changed to a value > 1500. 1764 * is changed to a value > 1500.
1764 */ 1765 */
1765static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs) 1766static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs)
1766{ 1767{
1768 struct ace_private *ap = netdev_priv(dev);
1767 struct ace_regs __iomem *regs = ap->regs; 1769 struct ace_regs __iomem *regs = ap->regs;
1768 short i, idx; 1770 short i, idx;
1769 1771
@@ -1774,11 +1776,10 @@ static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs)
1774 struct rx_desc *rd; 1776 struct rx_desc *rd;
1775 dma_addr_t mapping; 1777 dma_addr_t mapping;
1776 1778
1777 skb = dev_alloc_skb(ACE_JUMBO_BUFSIZE + NET_IP_ALIGN); 1779 skb = netdev_alloc_skb_ip_align(dev, ACE_JUMBO_BUFSIZE);
1778 if (!skb) 1780 if (!skb)
1779 break; 1781 break;
1780 1782
1781 skb_reserve(skb, NET_IP_ALIGN);
1782 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), 1783 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
1783 offset_in_page(skb->data), 1784 offset_in_page(skb->data),
1784 ACE_JUMBO_BUFSIZE, 1785 ACE_JUMBO_BUFSIZE,
@@ -2196,7 +2197,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
2196#ifdef DEBUG 2197#ifdef DEBUG
2197 printk("low on std buffers %i\n", cur_size); 2198 printk("low on std buffers %i\n", cur_size);
2198#endif 2199#endif
2199 ace_load_std_rx_ring(ap, 2200 ace_load_std_rx_ring(dev,
2200 RX_RING_SIZE - cur_size); 2201 RX_RING_SIZE - cur_size);
2201 } else 2202 } else
2202 run_tasklet = 1; 2203 run_tasklet = 1;
@@ -2212,7 +2213,8 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
2212 printk("low on mini buffers %i\n", 2213 printk("low on mini buffers %i\n",
2213 cur_size); 2214 cur_size);
2214#endif 2215#endif
2215 ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size); 2216 ace_load_mini_rx_ring(dev,
2217 RX_MINI_SIZE - cur_size);
2216 } else 2218 } else
2217 run_tasklet = 1; 2219 run_tasklet = 1;
2218 } 2220 }
@@ -2228,7 +2230,8 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
2228 printk("low on jumbo buffers %i\n", 2230 printk("low on jumbo buffers %i\n",
2229 cur_size); 2231 cur_size);
2230#endif 2232#endif
2231 ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size); 2233 ace_load_jumbo_rx_ring(dev,
2234 RX_JUMBO_SIZE - cur_size);
2232 } else 2235 } else
2233 run_tasklet = 1; 2236 run_tasklet = 1;
2234 } 2237 }
@@ -2267,7 +2270,7 @@ static int ace_open(struct net_device *dev)
2267 2270
2268 if (ap->jumbo && 2271 if (ap->jumbo &&
2269 !test_and_set_bit(0, &ap->jumbo_refill_busy)) 2272 !test_and_set_bit(0, &ap->jumbo_refill_busy))
2270 ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE); 2273 ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE);
2271 2274
2272 if (dev->flags & IFF_PROMISC) { 2275 if (dev->flags & IFF_PROMISC) {
2273 cmd.evt = C_SET_PROMISC_MODE; 2276 cmd.evt = C_SET_PROMISC_MODE;
@@ -2575,7 +2578,7 @@ static int ace_change_mtu(struct net_device *dev, int new_mtu)
2575 "support\n", dev->name); 2578 "support\n", dev->name);
2576 ap->jumbo = 1; 2579 ap->jumbo = 1;
2577 if (!test_and_set_bit(0, &ap->jumbo_refill_busy)) 2580 if (!test_and_set_bit(0, &ap->jumbo_refill_busy))
2578 ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE); 2581 ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE);
2579 ace_set_rxtx_parms(dev, 1); 2582 ace_set_rxtx_parms(dev, 1);
2580 } 2583 }
2581 } else { 2584 } else {
diff --git a/drivers/net/acenic.h b/drivers/net/acenic.h
index f67dc9b0eb80..51c486cfbb8c 100644
--- a/drivers/net/acenic.h
+++ b/drivers/net/acenic.h
@@ -766,9 +766,9 @@ static inline void ace_unmask_irq(struct net_device *dev)
766 * Prototypes 766 * Prototypes
767 */ 767 */
768static int ace_init(struct net_device *dev); 768static int ace_init(struct net_device *dev);
769static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs); 769static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs);
770static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs); 770static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs);
771static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs); 771static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs);
772static irqreturn_t ace_interrupt(int irq, void *dev_id); 772static irqreturn_t ace_interrupt(int irq, void *dev_id);
773static int ace_load_firmware(struct net_device *dev); 773static int ace_load_firmware(struct net_device *dev);
774static int ace_open(struct net_device *dev); 774static int ace_open(struct net_device *dev);
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 6f0e9403004b..97e6954304ea 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -44,7 +44,7 @@
44 * SMP torture testing 44 * SMP torture testing
45 */ 45 */
46 46
47#include <asm/atomic.h> 47#include <linux/atomic.h>
48#include <asm/byteorder.h> 48#include <asm/byteorder.h>
49 49
50#include <linux/compiler.h> 50#include <linux/compiler.h>
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index e0f87cf1e2ba..d4f7dda39721 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -20,7 +20,7 @@
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */ 21 */
22 22
23#include <asm/atomic.h> 23#include <linux/atomic.h>
24#include <linux/crc32.h> 24#include <linux/crc32.h>
25#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
26#include <linux/etherdevice.h> 26#include <linux/etherdevice.h>
diff --git a/drivers/net/atlx/atl2.h b/drivers/net/atlx/atl2.h
index 78344ddf4bf0..bf9016ebdd9b 100644
--- a/drivers/net/atlx/atl2.h
+++ b/drivers/net/atlx/atl2.h
@@ -25,7 +25,7 @@
25#ifndef _ATL2_H_ 25#ifndef _ATL2_H_
26#define _ATL2_H_ 26#define _ATL2_H_
27 27
28#include <asm/atomic.h> 28#include <linux/atomic.h>
29#include <linux/netdevice.h> 29#include <linux/netdevice.h>
30 30
31#ifndef _ATL2_HW_H_ 31#ifndef _ATL2_HW_H_
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 02842d05c11f..38a83acd502e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1557,8 +1557,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1557 1557
1558 if (slave_dev->type != ARPHRD_ETHER) 1558 if (slave_dev->type != ARPHRD_ETHER)
1559 bond_setup_by_slave(bond_dev, slave_dev); 1559 bond_setup_by_slave(bond_dev, slave_dev);
1560 else 1560 else {
1561 ether_setup(bond_dev); 1561 ether_setup(bond_dev);
1562 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1563 }
1562 1564
1563 netdev_bonding_change(bond_dev, 1565 netdev_bonding_change(bond_dev,
1564 NETDEV_POST_TYPE_CHANGE); 1566 NETDEV_POST_TYPE_CHANGE);
@@ -4330,7 +4332,7 @@ static void bond_setup(struct net_device *bond_dev)
4330 bond_dev->tx_queue_len = 0; 4332 bond_dev->tx_queue_len = 0;
4331 bond_dev->flags |= IFF_MASTER|IFF_MULTICAST; 4333 bond_dev->flags |= IFF_MASTER|IFF_MULTICAST;
4332 bond_dev->priv_flags |= IFF_BONDING; 4334 bond_dev->priv_flags |= IFF_BONDING;
4333 bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 4335 bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
4334 4336
4335 /* At first, we block adding VLANs. That's the only way to 4337 /* At first, we block adding VLANs. That's the only way to
4336 * prevent problems that occur when adding VLANs over an 4338 * prevent problems that occur when adding VLANs over an
@@ -4691,7 +4693,7 @@ static int bond_check_params(struct bond_params *params)
4691 /* miimon and arp_interval not set, we need one so things 4693 /* miimon and arp_interval not set, we need one so things
4692 * work as expected, see bonding.txt for details 4694 * work as expected, see bonding.txt for details
4693 */ 4695 */
4694 pr_warning("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details.\n"); 4696 pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details.\n");
4695 } 4697 }
4696 4698
4697 if (primary && !USES_PRIMARY(bond_mode)) { 4699 if (primary && !USES_PRIMARY(bond_mode)) {
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index b60835f58650..2dfb4bf90087 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1025,6 +1025,7 @@ static ssize_t bonding_store_primary(struct device *d,
1025 int i; 1025 int i;
1026 struct slave *slave; 1026 struct slave *slave;
1027 struct bonding *bond = to_bond(d); 1027 struct bonding *bond = to_bond(d);
1028 char ifname[IFNAMSIZ];
1028 1029
1029 if (!rtnl_trylock()) 1030 if (!rtnl_trylock())
1030 return restart_syscall(); 1031 return restart_syscall();
@@ -1035,32 +1036,33 @@ static ssize_t bonding_store_primary(struct device *d,
1035 if (!USES_PRIMARY(bond->params.mode)) { 1036 if (!USES_PRIMARY(bond->params.mode)) {
1036 pr_info("%s: Unable to set primary slave; %s is in mode %d\n", 1037 pr_info("%s: Unable to set primary slave; %s is in mode %d\n",
1037 bond->dev->name, bond->dev->name, bond->params.mode); 1038 bond->dev->name, bond->dev->name, bond->params.mode);
1038 } else { 1039 goto out;
1039 bond_for_each_slave(bond, slave, i) { 1040 }
1040 if (strnicmp
1041 (slave->dev->name, buf,
1042 strlen(slave->dev->name)) == 0) {
1043 pr_info("%s: Setting %s as primary slave.\n",
1044 bond->dev->name, slave->dev->name);
1045 bond->primary_slave = slave;
1046 strcpy(bond->params.primary, slave->dev->name);
1047 bond_select_active_slave(bond);
1048 goto out;
1049 }
1050 }
1051 1041
1052 /* if we got here, then we didn't match the name of any slave */ 1042 sscanf(buf, "%16s", ifname); /* IFNAMSIZ */
1053 1043
1054 if (strlen(buf) == 0 || buf[0] == '\n') { 1044 /* check to see if we are clearing primary */
1055 pr_info("%s: Setting primary slave to None.\n", 1045 if (!strlen(ifname) || buf[0] == '\n') {
1056 bond->dev->name); 1046 pr_info("%s: Setting primary slave to None.\n",
1057 bond->primary_slave = NULL; 1047 bond->dev->name);
1058 bond_select_active_slave(bond); 1048 bond->primary_slave = NULL;
1059 } else { 1049 bond_select_active_slave(bond);
1060 pr_info("%s: Unable to set %.*s as primary slave as it is not a slave.\n", 1050 goto out;
1061 bond->dev->name, (int)strlen(buf) - 1, buf); 1051 }
1052
1053 bond_for_each_slave(bond, slave, i) {
1054 if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
1055 pr_info("%s: Setting %s as primary slave.\n",
1056 bond->dev->name, slave->dev->name);
1057 bond->primary_slave = slave;
1058 strcpy(bond->params.primary, slave->dev->name);
1059 bond_select_active_slave(bond);
1060 goto out;
1062 } 1061 }
1063 } 1062 }
1063
1064 pr_info("%s: Unable to set %.*s as primary slave.\n",
1065 bond->dev->name, (int)strlen(buf) - 1, buf);
1064out: 1066out:
1065 write_unlock_bh(&bond->curr_slave_lock); 1067 write_unlock_bh(&bond->curr_slave_lock);
1066 read_unlock(&bond->lock); 1068 read_unlock(&bond->lock);
@@ -1195,6 +1197,7 @@ static ssize_t bonding_store_active_slave(struct device *d,
1195 struct slave *old_active = NULL; 1197 struct slave *old_active = NULL;
1196 struct slave *new_active = NULL; 1198 struct slave *new_active = NULL;
1197 struct bonding *bond = to_bond(d); 1199 struct bonding *bond = to_bond(d);
1200 char ifname[IFNAMSIZ];
1198 1201
1199 if (!rtnl_trylock()) 1202 if (!rtnl_trylock())
1200 return restart_syscall(); 1203 return restart_syscall();
@@ -1203,56 +1206,62 @@ static ssize_t bonding_store_active_slave(struct device *d,
1203 read_lock(&bond->lock); 1206 read_lock(&bond->lock);
1204 write_lock_bh(&bond->curr_slave_lock); 1207 write_lock_bh(&bond->curr_slave_lock);
1205 1208
1206 if (!USES_PRIMARY(bond->params.mode)) 1209 if (!USES_PRIMARY(bond->params.mode)) {
1207 pr_info("%s: Unable to change active slave; %s is in mode %d\n", 1210 pr_info("%s: Unable to change active slave; %s is in mode %d\n",
1208 bond->dev->name, bond->dev->name, bond->params.mode); 1211 bond->dev->name, bond->dev->name, bond->params.mode);
1209 else { 1212 goto out;
1210 bond_for_each_slave(bond, slave, i) { 1213 }
1211 if (strnicmp 1214
1212 (slave->dev->name, buf, 1215 sscanf(buf, "%16s", ifname); /* IFNAMSIZ */
1213 strlen(slave->dev->name)) == 0) { 1216
1214 old_active = bond->curr_active_slave; 1217 /* check to see if we are clearing active */
1215 new_active = slave; 1218 if (!strlen(ifname) || buf[0] == '\n') {
1216 if (new_active == old_active) { 1219 pr_info("%s: Clearing current active slave.\n",
1217 /* do nothing */ 1220 bond->dev->name);
1218 pr_info("%s: %s is already the current active slave.\n", 1221 bond->curr_active_slave = NULL;
1222 bond_select_active_slave(bond);
1223 goto out;
1224 }
1225
1226 bond_for_each_slave(bond, slave, i) {
1227 if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
1228 old_active = bond->curr_active_slave;
1229 new_active = slave;
1230 if (new_active == old_active) {
1231 /* do nothing */
1232 pr_info("%s: %s is already the current"
1233 " active slave.\n",
1234 bond->dev->name,
1235 slave->dev->name);
1236 goto out;
1237 }
1238 else {
1239 if ((new_active) &&
1240 (old_active) &&
1241 (new_active->link == BOND_LINK_UP) &&
1242 IS_UP(new_active->dev)) {
1243 pr_info("%s: Setting %s as active"
1244 " slave.\n",
1219 bond->dev->name, 1245 bond->dev->name,
1220 slave->dev->name); 1246 slave->dev->name);
1221 goto out; 1247 bond_change_active_slave(bond,
1248 new_active);
1222 } 1249 }
1223 else { 1250 else {
1224 if ((new_active) && 1251 pr_info("%s: Could not set %s as"
1225 (old_active) && 1252 " active slave; either %s is"
1226 (new_active->link == BOND_LINK_UP) && 1253 " down or the link is down.\n",
1227 IS_UP(new_active->dev)) { 1254 bond->dev->name,
1228 pr_info("%s: Setting %s as active slave.\n", 1255 slave->dev->name,
1229 bond->dev->name, 1256 slave->dev->name);
1230 slave->dev->name);
1231 bond_change_active_slave(bond, new_active);
1232 }
1233 else {
1234 pr_info("%s: Could not set %s as active slave; either %s is down or the link is down.\n",
1235 bond->dev->name,
1236 slave->dev->name,
1237 slave->dev->name);
1238 }
1239 goto out;
1240 } 1257 }
1258 goto out;
1241 } 1259 }
1242 } 1260 }
1243
1244 /* if we got here, then we didn't match the name of any slave */
1245
1246 if (strlen(buf) == 0 || buf[0] == '\n') {
1247 pr_info("%s: Setting active slave to None.\n",
1248 bond->dev->name);
1249 bond->primary_slave = NULL;
1250 bond_select_active_slave(bond);
1251 } else {
1252 pr_info("%s: Unable to set %.*s as active slave as it is not a slave.\n",
1253 bond->dev->name, (int)strlen(buf) - 1, buf);
1254 }
1255 } 1261 }
1262
1263 pr_info("%s: Unable to set %.*s as active slave.\n",
1264 bond->dev->name, (int)strlen(buf) - 1, buf);
1256 out: 1265 out:
1257 write_unlock_bh(&bond->curr_slave_lock); 1266 write_unlock_bh(&bond->curr_slave_lock);
1258 read_unlock(&bond->lock); 1267 read_unlock(&bond->lock);
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index b414f5ae0da5..646c86bcc545 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -98,7 +98,7 @@
98 98
99#include <net/checksum.h> 99#include <net/checksum.h>
100 100
101#include <asm/atomic.h> 101#include <linux/atomic.h>
102#include <asm/system.h> 102#include <asm/system.h>
103#include <asm/io.h> 103#include <asm/io.h>
104#include <asm/byteorder.h> 104#include <asm/byteorder.h>
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 086ce0418b29..e0638cb4b07c 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -40,7 +40,7 @@
40#include <linux/dma-mapping.h> 40#include <linux/dma-mapping.h>
41#include <linux/clk.h> 41#include <linux/clk.h>
42#include <linux/gpio.h> 42#include <linux/gpio.h>
43#include <asm/atomic.h> 43#include <linux/atomic.h>
44 44
45MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); 45MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
46MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); 46MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 32636a1d62a5..805076c54f1b 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -34,7 +34,7 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <net/neighbour.h> 35#include <net/neighbour.h>
36#include <linux/notifier.h> 36#include <linux/notifier.h>
37#include <asm/atomic.h> 37#include <linux/atomic.h>
38#include <linux/proc_fs.h> 38#include <linux/proc_fs.h>
39#include <linux/if_vlan.h> 39#include <linux/if_vlan.h>
40#include <net/netevent.h> 40#include <net/netevent.h>
diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
index fd3eb07e3f40..7a12d52ed4fc 100644
--- a/drivers/net/cxgb3/l2t.h
+++ b/drivers/net/cxgb3/l2t.h
@@ -34,7 +34,7 @@
34 34
35#include <linux/spinlock.h> 35#include <linux/spinlock.h>
36#include "t3cdev.h" 36#include "t3cdev.h"
37#include <asm/atomic.h> 37#include <linux/atomic.h>
38 38
39enum { 39enum {
40 L2T_STATE_VALID, /* entry is up to date */ 40 L2T_STATE_VALID, /* entry is up to date */
diff --git a/drivers/net/cxgb3/t3cdev.h b/drivers/net/cxgb3/t3cdev.h
index be55e9ae74d1..705713b56636 100644
--- a/drivers/net/cxgb3/t3cdev.h
+++ b/drivers/net/cxgb3/t3cdev.h
@@ -33,7 +33,7 @@
33#define _T3CDEV_H_ 33#define _T3CDEV_H_
34 34
35#include <linux/list.h> 35#include <linux/list.h>
36#include <asm/atomic.h> 36#include <linux/atomic.h>
37#include <linux/netdevice.h> 37#include <linux/netdevice.h>
38#include <linux/proc_fs.h> 38#include <linux/proc_fs.h>
39#include <linux/skbuff.h> 39#include <linux/skbuff.h>
diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h
index 1b48c0170145..b1d39b8d141a 100644
--- a/drivers/net/cxgb4/cxgb4_uld.h
+++ b/drivers/net/cxgb4/cxgb4_uld.h
@@ -38,7 +38,7 @@
38#include <linux/cache.h> 38#include <linux/cache.h>
39#include <linux/spinlock.h> 39#include <linux/spinlock.h>
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <asm/atomic.h> 41#include <linux/atomic.h>
42 42
43/* CPL message priority levels */ 43/* CPL message priority levels */
44enum { 44enum {
diff --git a/drivers/net/cxgb4/l2t.h b/drivers/net/cxgb4/l2t.h
index 7bd8f42378ff..02b31d0c6410 100644
--- a/drivers/net/cxgb4/l2t.h
+++ b/drivers/net/cxgb4/l2t.h
@@ -37,7 +37,7 @@
37 37
38#include <linux/spinlock.h> 38#include <linux/spinlock.h>
39#include <linux/if_ether.h> 39#include <linux/if_ether.h>
40#include <asm/atomic.h> 40#include <linux/atomic.h>
41 41
42struct adapter; 42struct adapter;
43struct l2t_data; 43struct l2t_data;
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 5b631fe74738..e8266ccf818a 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -44,6 +44,10 @@
44#include <linux/platform_device.h> 44#include <linux/platform_device.h>
45#include <linux/phy.h> 45#include <linux/phy.h>
46#include <linux/fec.h> 46#include <linux/fec.h>
47#include <linux/of.h>
48#include <linux/of_device.h>
49#include <linux/of_gpio.h>
50#include <linux/of_net.h>
47 51
48#include <asm/cacheflush.h> 52#include <asm/cacheflush.h>
49 53
@@ -66,17 +70,42 @@
66#define FEC_QUIRK_ENET_MAC (1 << 0) 70#define FEC_QUIRK_ENET_MAC (1 << 0)
67/* Controller needs driver to swap frame */ 71/* Controller needs driver to swap frame */
68#define FEC_QUIRK_SWAP_FRAME (1 << 1) 72#define FEC_QUIRK_SWAP_FRAME (1 << 1)
73/* Controller uses gasket */
74#define FEC_QUIRK_USE_GASKET (1 << 2)
69 75
70static struct platform_device_id fec_devtype[] = { 76static struct platform_device_id fec_devtype[] = {
71 { 77 {
78 /* keep it for coldfire */
72 .name = DRIVER_NAME, 79 .name = DRIVER_NAME,
73 .driver_data = 0, 80 .driver_data = 0,
74 }, { 81 }, {
82 .name = "imx25-fec",
83 .driver_data = FEC_QUIRK_USE_GASKET,
84 }, {
85 .name = "imx27-fec",
86 .driver_data = 0,
87 }, {
75 .name = "imx28-fec", 88 .name = "imx28-fec",
76 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, 89 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
77 }, 90 }, {
78 { } 91 /* sentinel */
92 }
79}; 93};
94MODULE_DEVICE_TABLE(platform, fec_devtype);
95
96enum imx_fec_type {
97 IMX25_FEC = 1, /* runs on i.mx25/50/53 */
98 IMX27_FEC, /* runs on i.mx27/35/51 */
99 IMX28_FEC,
100};
101
102static const struct of_device_id fec_dt_ids[] = {
103 { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
104 { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
105 { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
106 { /* sentinel */ }
107};
108MODULE_DEVICE_TABLE(of, fec_dt_ids);
80 109
81static unsigned char macaddr[ETH_ALEN]; 110static unsigned char macaddr[ETH_ALEN];
82module_param_array(macaddr, byte, NULL, 0); 111module_param_array(macaddr, byte, NULL, 0);
@@ -427,7 +456,7 @@ fec_restart(struct net_device *ndev, int duplex)
427 456
428 } else { 457 } else {
429#ifdef FEC_MIIGSK_ENR 458#ifdef FEC_MIIGSK_ENR
430 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) { 459 if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) {
431 /* disable the gasket and wait */ 460 /* disable the gasket and wait */
432 writel(0, fep->hwp + FEC_MIIGSK_ENR); 461 writel(0, fep->hwp + FEC_MIIGSK_ENR);
433 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) 462 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
@@ -436,8 +465,11 @@ fec_restart(struct net_device *ndev, int duplex)
436 /* 465 /*
437 * configure the gasket: 466 * configure the gasket:
438 * RMII, 50 MHz, no loopback, no echo 467 * RMII, 50 MHz, no loopback, no echo
468 * MII, 25 MHz, no loopback, no echo
439 */ 469 */
440 writel(1, fep->hwp + FEC_MIIGSK_CFGR); 470 writel((fep->phy_interface == PHY_INTERFACE_MODE_RMII) ?
471 1 : 0, fep->hwp + FEC_MIIGSK_CFGR);
472
441 473
442 /* re-enable the gasket */ 474 /* re-enable the gasket */
443 writel(2, fep->hwp + FEC_MIIGSK_ENR); 475 writel(2, fep->hwp + FEC_MIIGSK_ENR);
@@ -734,8 +766,22 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
734 */ 766 */
735 iap = macaddr; 767 iap = macaddr;
736 768
769#ifdef CONFIG_OF
737 /* 770 /*
738 * 2) from flash or fuse (via platform data) 771 * 2) from device tree data
772 */
773 if (!is_valid_ether_addr(iap)) {
774 struct device_node *np = fep->pdev->dev.of_node;
775 if (np) {
776 const char *mac = of_get_mac_address(np);
777 if (mac)
778 iap = (unsigned char *) mac;
779 }
780 }
781#endif
782
783 /*
784 * 3) from flash or fuse (via platform data)
739 */ 785 */
740 if (!is_valid_ether_addr(iap)) { 786 if (!is_valid_ether_addr(iap)) {
741#ifdef CONFIG_M5272 787#ifdef CONFIG_M5272
@@ -748,7 +794,7 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
748 } 794 }
749 795
750 /* 796 /*
751 * 3) FEC mac registers set by bootloader 797 * 4) FEC mac registers set by bootloader
752 */ 798 */
753 if (!is_valid_ether_addr(iap)) { 799 if (!is_valid_ether_addr(iap)) {
754 *((unsigned long *) &tmpaddr[0]) = 800 *((unsigned long *) &tmpaddr[0]) =
@@ -1354,6 +1400,52 @@ static int fec_enet_init(struct net_device *ndev)
1354 return 0; 1400 return 0;
1355} 1401}
1356 1402
1403#ifdef CONFIG_OF
1404static int __devinit fec_get_phy_mode_dt(struct platform_device *pdev)
1405{
1406 struct device_node *np = pdev->dev.of_node;
1407
1408 if (np)
1409 return of_get_phy_mode(np);
1410
1411 return -ENODEV;
1412}
1413
1414static int __devinit fec_reset_phy(struct platform_device *pdev)
1415{
1416 int err, phy_reset;
1417 struct device_node *np = pdev->dev.of_node;
1418
1419 if (!np)
1420 return -ENODEV;
1421
1422 phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
1423 err = gpio_request_one(phy_reset, GPIOF_OUT_INIT_LOW, "phy-reset");
1424 if (err) {
1425 pr_warn("FEC: failed to get gpio phy-reset: %d\n", err);
1426 return err;
1427 }
1428 msleep(1);
1429 gpio_set_value(phy_reset, 1);
1430
1431 return 0;
1432}
1433#else /* CONFIG_OF */
1434static inline int fec_get_phy_mode_dt(struct platform_device *pdev)
1435{
1436 return -ENODEV;
1437}
1438
1439static inline int fec_reset_phy(struct platform_device *pdev)
1440{
1441 /*
1442 * In case of platform probe, the reset has been done
1443 * by machine code.
1444 */
1445 return 0;
1446}
1447#endif /* CONFIG_OF */
1448
1357static int __devinit 1449static int __devinit
1358fec_probe(struct platform_device *pdev) 1450fec_probe(struct platform_device *pdev)
1359{ 1451{
@@ -1362,6 +1454,11 @@ fec_probe(struct platform_device *pdev)
1362 struct net_device *ndev; 1454 struct net_device *ndev;
1363 int i, irq, ret = 0; 1455 int i, irq, ret = 0;
1364 struct resource *r; 1456 struct resource *r;
1457 const struct of_device_id *of_id;
1458
1459 of_id = of_match_device(fec_dt_ids, &pdev->dev);
1460 if (of_id)
1461 pdev->id_entry = of_id->data;
1365 1462
1366 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1463 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1367 if (!r) 1464 if (!r)
@@ -1393,9 +1490,18 @@ fec_probe(struct platform_device *pdev)
1393 1490
1394 platform_set_drvdata(pdev, ndev); 1491 platform_set_drvdata(pdev, ndev);
1395 1492
1396 pdata = pdev->dev.platform_data; 1493 ret = fec_get_phy_mode_dt(pdev);
1397 if (pdata) 1494 if (ret < 0) {
1398 fep->phy_interface = pdata->phy; 1495 pdata = pdev->dev.platform_data;
1496 if (pdata)
1497 fep->phy_interface = pdata->phy;
1498 else
1499 fep->phy_interface = PHY_INTERFACE_MODE_MII;
1500 } else {
1501 fep->phy_interface = ret;
1502 }
1503
1504 fec_reset_phy(pdev);
1399 1505
1400 /* This device has up to three irqs on some platforms */ 1506 /* This device has up to three irqs on some platforms */
1401 for (i = 0; i < 3; i++) { 1507 for (i = 0; i < 3; i++) {
@@ -1530,6 +1636,7 @@ static struct platform_driver fec_driver = {
1530#ifdef CONFIG_PM 1636#ifdef CONFIG_PM
1531 .pm = &fec_pm_ops, 1637 .pm = &fec_pm_ops,
1532#endif 1638#endif
1639 .of_match_table = fec_dt_ids,
1533 }, 1640 },
1534 .id_table = fec_devtype, 1641 .id_table = fec_devtype,
1535 .probe = fec_probe, 1642 .probe = fec_probe,
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index e64cd9ceac3f..e55df308a3af 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -2764,7 +2764,14 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2764 prefetch(skb->data); 2764 prefetch(skb->data);
2765 2765
2766 vlanflags = le32_to_cpu(np->get_rx.ex->buflow); 2766 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2767 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) { 2767
2768 /*
2769 * There's need to check for NETIF_F_HW_VLAN_RX here.
2770 * Even if vlan rx accel is disabled,
2771 * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set.
2772 */
2773 if (dev->features & NETIF_F_HW_VLAN_RX &&
2774 vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2768 u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK; 2775 u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
2769 2776
2770 __vlan_hwaccel_put_tag(skb, vid); 2777 __vlan_hwaccel_put_tag(skb, vid);
@@ -5331,15 +5338,16 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5331 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 5338 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5332 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG | 5339 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG |
5333 NETIF_F_TSO | NETIF_F_RXCSUM; 5340 NETIF_F_TSO | NETIF_F_RXCSUM;
5334 dev->features |= dev->hw_features;
5335 } 5341 }
5336 5342
5337 np->vlanctl_bits = 0; 5343 np->vlanctl_bits = 0;
5338 if (id->driver_data & DEV_HAS_VLAN) { 5344 if (id->driver_data & DEV_HAS_VLAN) {
5339 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; 5345 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5340 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; 5346 dev->hw_features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
5341 } 5347 }
5342 5348
5349 dev->features |= dev->hw_features;
5350
5343 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; 5351 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
5344 if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) || 5352 if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
5345 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) || 5353 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
@@ -5607,6 +5615,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5607 goto out_error; 5615 goto out_error;
5608 } 5616 }
5609 5617
5618 nv_vlan_mode(dev, dev->features);
5619
5610 netif_carrier_off(dev); 5620 netif_carrier_off(dev);
5611 5621
5612 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", 5622 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 835cd2588148..2659daad783d 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -388,12 +388,8 @@ static void gfar_init_mac(struct net_device *ndev)
388 if (priv->hwts_rx_en) 388 if (priv->hwts_rx_en)
389 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; 389 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
390 390
391 /* keep vlan related bits if it's enabled */
392 if (ndev->features & NETIF_F_HW_VLAN_TX)
393 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
394
395 if (ndev->features & NETIF_F_HW_VLAN_RX) 391 if (ndev->features & NETIF_F_HW_VLAN_RX)
396 tctrl |= TCTRL_VLINS; 392 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
397 393
398 /* Init rctrl based on our settings */ 394 /* Init rctrl based on our settings */
399 gfar_write(&regs->rctrl, rctrl); 395 gfar_write(&regs->rctrl, rctrl);
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 0d283781bc5e..2a5a34d2d67b 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -36,7 +36,7 @@
36#include <linux/tcp.h> 36#include <linux/tcp.h>
37#include <linux/semaphore.h> 37#include <linux/semaphore.h>
38#include <linux/compat.h> 38#include <linux/compat.h>
39#include <asm/atomic.h> 39#include <linux/atomic.h>
40 40
41#define SIXPACK_VERSION "Revision: 0.3.0" 41#define SIXPACK_VERSION "Revision: 0.3.0"
42 42
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index 52b14256e2c0..ce555d9ac02c 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -36,7 +36,7 @@
36#include <linux/rtnetlink.h> 36#include <linux/rtnetlink.h>
37#include <linux/sockios.h> 37#include <linux/sockios.h>
38#include <linux/workqueue.h> 38#include <linux/workqueue.h>
39#include <asm/atomic.h> 39#include <linux/atomic.h>
40#include <asm/dma.h> 40#include <asm/dma.h>
41#include <asm/io.h> 41#include <asm/io.h>
42#include <asm/irq.h> 42#include <asm/irq.h>
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 725399ea0690..70cb7d8a3b53 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -39,6 +39,7 @@
39#include <linux/bitops.h> 39#include <linux/bitops.h>
40#include <linux/workqueue.h> 40#include <linux/workqueue.h>
41#include <linux/of.h> 41#include <linux/of.h>
42#include <linux/of_net.h>
42#include <linux/slab.h> 43#include <linux/slab.h>
43 44
44#include <asm/processor.h> 45#include <asm/processor.h>
@@ -2506,18 +2507,6 @@ static int __devinit emac_init_config(struct emac_instance *dev)
2506{ 2507{
2507 struct device_node *np = dev->ofdev->dev.of_node; 2508 struct device_node *np = dev->ofdev->dev.of_node;
2508 const void *p; 2509 const void *p;
2509 unsigned int plen;
2510 const char *pm, *phy_modes[] = {
2511 [PHY_MODE_NA] = "",
2512 [PHY_MODE_MII] = "mii",
2513 [PHY_MODE_RMII] = "rmii",
2514 [PHY_MODE_SMII] = "smii",
2515 [PHY_MODE_RGMII] = "rgmii",
2516 [PHY_MODE_TBI] = "tbi",
2517 [PHY_MODE_GMII] = "gmii",
2518 [PHY_MODE_RTBI] = "rtbi",
2519 [PHY_MODE_SGMII] = "sgmii",
2520 };
2521 2510
2522 /* Read config from device-tree */ 2511 /* Read config from device-tree */
2523 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1)) 2512 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
@@ -2566,23 +2555,9 @@ static int __devinit emac_init_config(struct emac_instance *dev)
2566 dev->mal_burst_size = 256; 2555 dev->mal_burst_size = 256;
2567 2556
2568 /* PHY mode needs some decoding */ 2557 /* PHY mode needs some decoding */
2569 dev->phy_mode = PHY_MODE_NA; 2558 dev->phy_mode = of_get_phy_mode(np);
2570 pm = of_get_property(np, "phy-mode", &plen); 2559 if (dev->phy_mode < 0)
2571 if (pm != NULL) { 2560 dev->phy_mode = PHY_MODE_NA;
2572 int i;
2573 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2574 if (!strcasecmp(pm, phy_modes[i])) {
2575 dev->phy_mode = i;
2576 break;
2577 }
2578 }
2579
2580 /* Backward compat with non-final DT */
2581 if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2582 u32 nmode = *(const u32 *)pm;
2583 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2584 dev->phy_mode = nmode;
2585 }
2586 2561
2587 /* Check EMAC version */ 2562 /* Check EMAC version */
2588 if (of_device_is_compatible(np, "ibm,emac4sync")) { 2563 if (of_device_is_compatible(np, "ibm,emac4sync")) {
diff --git a/drivers/net/ibm_newemac/emac.h b/drivers/net/ibm_newemac/emac.h
index 8a61b597a169..1568278d759a 100644
--- a/drivers/net/ibm_newemac/emac.h
+++ b/drivers/net/ibm_newemac/emac.h
@@ -26,6 +26,7 @@
26#define __IBM_NEWEMAC_H 26#define __IBM_NEWEMAC_H
27 27
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/phy.h>
29 30
30/* EMAC registers Write Access rules */ 31/* EMAC registers Write Access rules */
31struct emac_regs { 32struct emac_regs {
@@ -106,15 +107,15 @@ struct emac_regs {
106/* 107/*
107 * PHY mode settings (EMAC <-> ZMII/RGMII bridge <-> PHY) 108 * PHY mode settings (EMAC <-> ZMII/RGMII bridge <-> PHY)
108 */ 109 */
109#define PHY_MODE_NA 0 110#define PHY_MODE_NA PHY_INTERFACE_MODE_NA
110#define PHY_MODE_MII 1 111#define PHY_MODE_MII PHY_INTERFACE_MODE_MII
111#define PHY_MODE_RMII 2 112#define PHY_MODE_RMII PHY_INTERFACE_MODE_RMII
112#define PHY_MODE_SMII 3 113#define PHY_MODE_SMII PHY_INTERFACE_MODE_SMII
113#define PHY_MODE_RGMII 4 114#define PHY_MODE_RGMII PHY_INTERFACE_MODE_RGMII
114#define PHY_MODE_TBI 5 115#define PHY_MODE_TBI PHY_INTERFACE_MODE_TBI
115#define PHY_MODE_GMII 6 116#define PHY_MODE_GMII PHY_INTERFACE_MODE_GMII
116#define PHY_MODE_RTBI 7 117#define PHY_MODE_RTBI PHY_INTERFACE_MODE_RTBI
117#define PHY_MODE_SGMII 8 118#define PHY_MODE_SGMII PHY_INTERFACE_MODE_SGMII
118 119
119/* EMACx_MR0 */ 120/* EMACx_MR0 */
120#define EMAC_MR0_RXI 0x80000000 121#define EMAC_MR0_RXI 0x80000000
diff --git a/drivers/net/ibm_newemac/phy.c b/drivers/net/ibm_newemac/phy.c
index ac9d964e59ec..ab4e5969fe65 100644
--- a/drivers/net/ibm_newemac/phy.c
+++ b/drivers/net/ibm_newemac/phy.c
@@ -28,12 +28,15 @@
28#include "emac.h" 28#include "emac.h"
29#include "phy.h" 29#include "phy.h"
30 30
31static inline int phy_read(struct mii_phy *phy, int reg) 31#define phy_read _phy_read
32#define phy_write _phy_write
33
34static inline int _phy_read(struct mii_phy *phy, int reg)
32{ 35{
33 return phy->mdio_read(phy->dev, phy->address, reg); 36 return phy->mdio_read(phy->dev, phy->address, reg);
34} 37}
35 38
36static inline void phy_write(struct mii_phy *phy, int reg, int val) 39static inline void _phy_write(struct mii_phy *phy, int reg, int val)
37{ 40{
38 phy->mdio_write(phy->dev, phy->address, reg, val); 41 phy->mdio_write(phy->dev, phy->address, reg, val);
39} 42}
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 838c5b673767..ba99af05bf62 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -43,7 +43,7 @@
43#include <linux/ipv6.h> 43#include <linux/ipv6.h>
44#include <linux/slab.h> 44#include <linux/slab.h>
45#include <asm/hvcall.h> 45#include <asm/hvcall.h>
46#include <asm/atomic.h> 46#include <linux/atomic.h>
47#include <asm/vio.h> 47#include <asm/vio.h>
48#include <asm/iommu.h> 48#include <asm/iommu.h>
49#include <asm/firmware.h> 49#include <asm/firmware.h>
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 6e82dd32e806..46b5f5fd686b 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -183,7 +183,7 @@ static void ifb_setup(struct net_device *dev)
183 183
184 dev->flags |= IFF_NOARP; 184 dev->flags |= IFF_NOARP;
185 dev->flags &= ~IFF_MULTICAST; 185 dev->flags &= ~IFF_MULTICAST;
186 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 186 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
187 random_ether_addr(dev->dev_addr); 187 random_ether_addr(dev->dev_addr);
188} 188}
189 189
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index ba631fcece34..05172c39a0ce 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -572,7 +572,7 @@ void macvlan_common_setup(struct net_device *dev)
572{ 572{
573 ether_setup(dev); 573 ether_setup(dev);
574 574
575 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 575 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
576 dev->netdev_ops = &macvlan_netdev_ops; 576 dev->netdev_ops = &macvlan_netdev_ops;
577 dev->destructor = free_netdev; 577 dev->destructor = free_netdev;
578 dev->header_ops = &macvlan_hard_header_ops, 578 dev->header_ops = &macvlan_hard_header_ops,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index a47595760751..3cbda0851f83 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -33,7 +33,7 @@
33#include <linux/timer.h> 33#include <linux/timer.h>
34#include <linux/workqueue.h> 34#include <linux/workqueue.h>
35 35
36#include <asm/atomic.h> 36#include <linux/atomic.h>
37#include <asm/io.h> 37#include <asm/io.h>
38#include <asm/irq.h> 38#include <asm/irq.h>
39#include <asm/uaccess.h> 39#include <asm/uaccess.h>
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 4609bc0e2f56..10e5d985afa3 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -48,7 +48,7 @@
48#include <linux/slab.h> 48#include <linux/slab.h>
49#include <asm/unaligned.h> 49#include <asm/unaligned.h>
50#include <net/slhc_vj.h> 50#include <net/slhc_vj.h>
51#include <asm/atomic.h> 51#include <linux/atomic.h>
52 52
53#include <linux/nsproxy.h> 53#include <linux/nsproxy.h>
54#include <net/net_namespace.h> 54#include <net/net_namespace.h>
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 803576568154..dc3fbf61910b 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -190,6 +190,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
190 190
191/* minimum number of free TX descriptors required to wake up TX process */ 191/* minimum number of free TX descriptors required to wake up TX process */
192#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) 192#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
193#define TG3_TX_BD_DMA_MAX 4096
193 194
194#define TG3_RAW_IP_ALIGN 2 195#define TG3_RAW_IP_ALIGN 2
195 196
@@ -4824,7 +4825,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
4824 txq = netdev_get_tx_queue(tp->dev, index); 4825 txq = netdev_get_tx_queue(tp->dev, index);
4825 4826
4826 while (sw_idx != hw_idx) { 4827 while (sw_idx != hw_idx) {
4827 struct ring_info *ri = &tnapi->tx_buffers[sw_idx]; 4828 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
4828 struct sk_buff *skb = ri->skb; 4829 struct sk_buff *skb = ri->skb;
4829 int i, tx_bug = 0; 4830 int i, tx_bug = 0;
4830 4831
@@ -4840,6 +4841,12 @@ static void tg3_tx(struct tg3_napi *tnapi)
4840 4841
4841 ri->skb = NULL; 4842 ri->skb = NULL;
4842 4843
4844 while (ri->fragmented) {
4845 ri->fragmented = false;
4846 sw_idx = NEXT_TX(sw_idx);
4847 ri = &tnapi->tx_buffers[sw_idx];
4848 }
4849
4843 sw_idx = NEXT_TX(sw_idx); 4850 sw_idx = NEXT_TX(sw_idx);
4844 4851
4845 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 4852 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
@@ -4851,6 +4858,13 @@ static void tg3_tx(struct tg3_napi *tnapi)
4851 dma_unmap_addr(ri, mapping), 4858 dma_unmap_addr(ri, mapping),
4852 skb_shinfo(skb)->frags[i].size, 4859 skb_shinfo(skb)->frags[i].size,
4853 PCI_DMA_TODEVICE); 4860 PCI_DMA_TODEVICE);
4861
4862 while (ri->fragmented) {
4863 ri->fragmented = false;
4864 sw_idx = NEXT_TX(sw_idx);
4865 ri = &tnapi->tx_buffers[sw_idx];
4866 }
4867
4854 sw_idx = NEXT_TX(sw_idx); 4868 sw_idx = NEXT_TX(sw_idx);
4855 } 4869 }
4856 4870
@@ -5901,40 +5915,100 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5901#endif 5915#endif
5902} 5916}
5903 5917
5904static void tg3_set_txd(struct tg3_napi *tnapi, int entry, 5918static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
5905 dma_addr_t mapping, int len, u32 flags, 5919 dma_addr_t mapping, u32 len, u32 flags,
5906 u32 mss_and_is_end) 5920 u32 mss, u32 vlan)
5921{
5922 txbd->addr_hi = ((u64) mapping >> 32);
5923 txbd->addr_lo = ((u64) mapping & 0xffffffff);
5924 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
5925 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
5926}
5927
5928static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
5929 dma_addr_t map, u32 len, u32 flags,
5930 u32 mss, u32 vlan)
5907{ 5931{
5908 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry]; 5932 struct tg3 *tp = tnapi->tp;
5909 int is_end = (mss_and_is_end & 0x1); 5933 bool hwbug = false;
5910 u32 mss = (mss_and_is_end >> 1); 5934
5911 u32 vlan_tag = 0; 5935 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
5936 hwbug = 1;
5937
5938 if (tg3_4g_overflow_test(map, len))
5939 hwbug = 1;
5940
5941 if (tg3_40bit_overflow_test(tp, map, len))
5942 hwbug = 1;
5943
5944 if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
5945 u32 tmp_flag = flags & ~TXD_FLAG_END;
5946 while (len > TG3_TX_BD_DMA_MAX) {
5947 u32 frag_len = TG3_TX_BD_DMA_MAX;
5948 len -= TG3_TX_BD_DMA_MAX;
5949
5950 if (len) {
5951 tnapi->tx_buffers[*entry].fragmented = true;
5952 /* Avoid the 8byte DMA problem */
5953 if (len <= 8) {
5954 len += TG3_TX_BD_DMA_MAX / 2;
5955 frag_len = TG3_TX_BD_DMA_MAX / 2;
5956 }
5957 } else
5958 tmp_flag = flags;
5959
5960 if (*budget) {
5961 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
5962 frag_len, tmp_flag, mss, vlan);
5963 (*budget)--;
5964 *entry = NEXT_TX(*entry);
5965 } else {
5966 hwbug = 1;
5967 break;
5968 }
5969
5970 map += frag_len;
5971 }
5912 5972
5913 if (is_end) 5973 if (len) {
5914 flags |= TXD_FLAG_END; 5974 if (*budget) {
5915 if (flags & TXD_FLAG_VLAN) { 5975 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
5916 vlan_tag = flags >> 16; 5976 len, flags, mss, vlan);
5917 flags &= 0xffff; 5977 (*budget)--;
5978 *entry = NEXT_TX(*entry);
5979 } else {
5980 hwbug = 1;
5981 }
5982 }
5983 } else {
5984 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
5985 len, flags, mss, vlan);
5986 *entry = NEXT_TX(*entry);
5918 } 5987 }
5919 vlan_tag |= (mss << TXD_MSS_SHIFT);
5920 5988
5921 txd->addr_hi = ((u64) mapping >> 32); 5989 return hwbug;
5922 txd->addr_lo = ((u64) mapping & 0xffffffff);
5923 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5924 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5925} 5990}
5926 5991
5927static void tg3_skb_error_unmap(struct tg3_napi *tnapi, 5992static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
5928 struct sk_buff *skb, int last)
5929{ 5993{
5930 int i; 5994 int i;
5931 u32 entry = tnapi->tx_prod; 5995 struct sk_buff *skb;
5932 struct ring_info *txb = &tnapi->tx_buffers[entry]; 5996 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
5997
5998 skb = txb->skb;
5999 txb->skb = NULL;
5933 6000
5934 pci_unmap_single(tnapi->tp->pdev, 6001 pci_unmap_single(tnapi->tp->pdev,
5935 dma_unmap_addr(txb, mapping), 6002 dma_unmap_addr(txb, mapping),
5936 skb_headlen(skb), 6003 skb_headlen(skb),
5937 PCI_DMA_TODEVICE); 6004 PCI_DMA_TODEVICE);
6005
6006 while (txb->fragmented) {
6007 txb->fragmented = false;
6008 entry = NEXT_TX(entry);
6009 txb = &tnapi->tx_buffers[entry];
6010 }
6011
5938 for (i = 0; i < last; i++) { 6012 for (i = 0; i < last; i++) {
5939 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 6013 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5940 6014
@@ -5944,18 +6018,24 @@ static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5944 pci_unmap_page(tnapi->tp->pdev, 6018 pci_unmap_page(tnapi->tp->pdev,
5945 dma_unmap_addr(txb, mapping), 6019 dma_unmap_addr(txb, mapping),
5946 frag->size, PCI_DMA_TODEVICE); 6020 frag->size, PCI_DMA_TODEVICE);
6021
6022 while (txb->fragmented) {
6023 txb->fragmented = false;
6024 entry = NEXT_TX(entry);
6025 txb = &tnapi->tx_buffers[entry];
6026 }
5947 } 6027 }
5948} 6028}
5949 6029
5950/* Workaround 4GB and 40-bit hardware DMA bugs. */ 6030/* Workaround 4GB and 40-bit hardware DMA bugs. */
5951static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, 6031static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5952 struct sk_buff *skb, 6032 struct sk_buff *skb,
5953 u32 base_flags, u32 mss) 6033 u32 *entry, u32 *budget,
6034 u32 base_flags, u32 mss, u32 vlan)
5954{ 6035{
5955 struct tg3 *tp = tnapi->tp; 6036 struct tg3 *tp = tnapi->tp;
5956 struct sk_buff *new_skb; 6037 struct sk_buff *new_skb;
5957 dma_addr_t new_addr = 0; 6038 dma_addr_t new_addr = 0;
5958 u32 entry = tnapi->tx_prod;
5959 int ret = 0; 6039 int ret = 0;
5960 6040
5961 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) 6041 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
@@ -5976,24 +6056,22 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5976 PCI_DMA_TODEVICE); 6056 PCI_DMA_TODEVICE);
5977 /* Make sure the mapping succeeded */ 6057 /* Make sure the mapping succeeded */
5978 if (pci_dma_mapping_error(tp->pdev, new_addr)) { 6058 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5979 ret = -1;
5980 dev_kfree_skb(new_skb); 6059 dev_kfree_skb(new_skb);
5981
5982 /* Make sure new skb does not cross any 4G boundaries.
5983 * Drop the packet if it does.
5984 */
5985 } else if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
5986 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5987 PCI_DMA_TODEVICE);
5988 ret = -1; 6060 ret = -1;
5989 dev_kfree_skb(new_skb);
5990 } else { 6061 } else {
5991 tnapi->tx_buffers[entry].skb = new_skb; 6062 base_flags |= TXD_FLAG_END;
5992 dma_unmap_addr_set(&tnapi->tx_buffers[entry], 6063
6064 tnapi->tx_buffers[*entry].skb = new_skb;
6065 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
5993 mapping, new_addr); 6066 mapping, new_addr);
5994 6067
5995 tg3_set_txd(tnapi, entry, new_addr, new_skb->len, 6068 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
5996 base_flags, 1 | (mss << 1)); 6069 new_skb->len, base_flags,
6070 mss, vlan)) {
6071 tg3_tx_skb_unmap(tnapi, *entry, 0);
6072 dev_kfree_skb(new_skb);
6073 ret = -1;
6074 }
5997 } 6075 }
5998 } 6076 }
5999 6077
@@ -6051,7 +6129,8 @@ tg3_tso_bug_end:
6051static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 6129static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6052{ 6130{
6053 struct tg3 *tp = netdev_priv(dev); 6131 struct tg3 *tp = netdev_priv(dev);
6054 u32 len, entry, base_flags, mss; 6132 u32 len, entry, base_flags, mss, vlan = 0;
6133 u32 budget;
6055 int i = -1, would_hit_hwbug; 6134 int i = -1, would_hit_hwbug;
6056 dma_addr_t mapping; 6135 dma_addr_t mapping;
6057 struct tg3_napi *tnapi; 6136 struct tg3_napi *tnapi;
@@ -6063,12 +6142,14 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6063 if (tg3_flag(tp, ENABLE_TSS)) 6142 if (tg3_flag(tp, ENABLE_TSS))
6064 tnapi++; 6143 tnapi++;
6065 6144
6145 budget = tg3_tx_avail(tnapi);
6146
6066 /* We are running in BH disabled context with netif_tx_lock 6147 /* We are running in BH disabled context with netif_tx_lock
6067 * and TX reclaim runs via tp->napi.poll inside of a software 6148 * and TX reclaim runs via tp->napi.poll inside of a software
6068 * interrupt. Furthermore, IRQ processing runs lockless so we have 6149 * interrupt. Furthermore, IRQ processing runs lockless so we have
6069 * no IRQ context deadlocks to worry about either. Rejoice! 6150 * no IRQ context deadlocks to worry about either. Rejoice!
6070 */ 6151 */
6071 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { 6152 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6072 if (!netif_tx_queue_stopped(txq)) { 6153 if (!netif_tx_queue_stopped(txq)) {
6073 netif_tx_stop_queue(txq); 6154 netif_tx_stop_queue(txq);
6074 6155
@@ -6153,9 +6234,12 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6153 } 6234 }
6154 } 6235 }
6155 6236
6156 if (vlan_tx_tag_present(skb)) 6237#ifdef BCM_KERNEL_SUPPORTS_8021Q
6157 base_flags |= (TXD_FLAG_VLAN | 6238 if (vlan_tx_tag_present(skb)) {
6158 (vlan_tx_tag_get(skb) << 16)); 6239 base_flags |= TXD_FLAG_VLAN;
6240 vlan = vlan_tx_tag_get(skb);
6241 }
6242#endif
6159 6243
6160 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 6244 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6161 !mss && skb->len > VLAN_ETH_FRAME_LEN) 6245 !mss && skb->len > VLAN_ETH_FRAME_LEN)
@@ -6174,25 +6258,23 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6174 6258
6175 would_hit_hwbug = 0; 6259 would_hit_hwbug = 0;
6176 6260
6177 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6178 would_hit_hwbug = 1;
6179
6180 if (tg3_4g_overflow_test(mapping, len))
6181 would_hit_hwbug = 1;
6182
6183 if (tg3_40bit_overflow_test(tp, mapping, len))
6184 would_hit_hwbug = 1;
6185
6186 if (tg3_flag(tp, 5701_DMA_BUG)) 6261 if (tg3_flag(tp, 5701_DMA_BUG))
6187 would_hit_hwbug = 1; 6262 would_hit_hwbug = 1;
6188 6263
6189 tg3_set_txd(tnapi, entry, mapping, len, base_flags, 6264 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6190 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); 6265 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6191 6266 mss, vlan))
6192 entry = NEXT_TX(entry); 6267 would_hit_hwbug = 1;
6193 6268
6194 /* Now loop through additional data fragments, and queue them. */ 6269 /* Now loop through additional data fragments, and queue them. */
6195 if (skb_shinfo(skb)->nr_frags > 0) { 6270 if (skb_shinfo(skb)->nr_frags > 0) {
6271 u32 tmp_mss = mss;
6272
6273 if (!tg3_flag(tp, HW_TSO_1) &&
6274 !tg3_flag(tp, HW_TSO_2) &&
6275 !tg3_flag(tp, HW_TSO_3))
6276 tmp_mss = 0;
6277
6196 last = skb_shinfo(skb)->nr_frags - 1; 6278 last = skb_shinfo(skb)->nr_frags - 1;
6197 for (i = 0; i <= last; i++) { 6279 for (i = 0; i <= last; i++) {
6198 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 6280 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -6209,39 +6291,25 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6209 if (pci_dma_mapping_error(tp->pdev, mapping)) 6291 if (pci_dma_mapping_error(tp->pdev, mapping))
6210 goto dma_error; 6292 goto dma_error;
6211 6293
6212 if (tg3_flag(tp, SHORT_DMA_BUG) && 6294 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6213 len <= 8) 6295 len, base_flags |
6296 ((i == last) ? TXD_FLAG_END : 0),
6297 tmp_mss, vlan))
6214 would_hit_hwbug = 1; 6298 would_hit_hwbug = 1;
6215
6216 if (tg3_4g_overflow_test(mapping, len))
6217 would_hit_hwbug = 1;
6218
6219 if (tg3_40bit_overflow_test(tp, mapping, len))
6220 would_hit_hwbug = 1;
6221
6222 if (tg3_flag(tp, HW_TSO_1) ||
6223 tg3_flag(tp, HW_TSO_2) ||
6224 tg3_flag(tp, HW_TSO_3))
6225 tg3_set_txd(tnapi, entry, mapping, len,
6226 base_flags, (i == last)|(mss << 1));
6227 else
6228 tg3_set_txd(tnapi, entry, mapping, len,
6229 base_flags, (i == last));
6230
6231 entry = NEXT_TX(entry);
6232 } 6299 }
6233 } 6300 }
6234 6301
6235 if (would_hit_hwbug) { 6302 if (would_hit_hwbug) {
6236 tg3_skb_error_unmap(tnapi, skb, i); 6303 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6237 6304
6238 /* If the workaround fails due to memory/mapping 6305 /* If the workaround fails due to memory/mapping
6239 * failure, silently drop this packet. 6306 * failure, silently drop this packet.
6240 */ 6307 */
6241 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss)) 6308 entry = tnapi->tx_prod;
6309 budget = tg3_tx_avail(tnapi);
6310 if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget,
6311 base_flags, mss, vlan))
6242 goto out_unlock; 6312 goto out_unlock;
6243
6244 entry = NEXT_TX(tnapi->tx_prod);
6245 } 6313 }
6246 6314
6247 skb_tx_timestamp(skb); 6315 skb_tx_timestamp(skb);
@@ -6269,7 +6337,7 @@ out_unlock:
6269 return NETDEV_TX_OK; 6337 return NETDEV_TX_OK;
6270 6338
6271dma_error: 6339dma_error:
6272 tg3_skb_error_unmap(tnapi, skb, i); 6340 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6273 dev_kfree_skb(skb); 6341 dev_kfree_skb(skb);
6274 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; 6342 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6275 return NETDEV_TX_OK; 6343 return NETDEV_TX_OK;
@@ -6602,35 +6670,13 @@ static void tg3_free_rings(struct tg3 *tp)
6602 if (!tnapi->tx_buffers) 6670 if (!tnapi->tx_buffers)
6603 continue; 6671 continue;
6604 6672
6605 for (i = 0; i < TG3_TX_RING_SIZE; ) { 6673 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
6606 struct ring_info *txp; 6674 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
6607 struct sk_buff *skb;
6608 unsigned int k;
6609
6610 txp = &tnapi->tx_buffers[i];
6611 skb = txp->skb;
6612 6675
6613 if (skb == NULL) { 6676 if (!skb)
6614 i++;
6615 continue; 6677 continue;
6616 }
6617
6618 pci_unmap_single(tp->pdev,
6619 dma_unmap_addr(txp, mapping),
6620 skb_headlen(skb),
6621 PCI_DMA_TODEVICE);
6622 txp->skb = NULL;
6623 6678
6624 i++; 6679 tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
6625
6626 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6627 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6628 pci_unmap_page(tp->pdev,
6629 dma_unmap_addr(txp, mapping),
6630 skb_shinfo(skb)->frags[k].size,
6631 PCI_DMA_TODEVICE);
6632 i++;
6633 }
6634 6680
6635 dev_kfree_skb_any(skb); 6681 dev_kfree_skb_any(skb);
6636 } 6682 }
@@ -6762,9 +6808,9 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6762 */ 6808 */
6763 if ((!i && !tg3_flag(tp, ENABLE_TSS)) || 6809 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6764 (i && tg3_flag(tp, ENABLE_TSS))) { 6810 (i && tg3_flag(tp, ENABLE_TSS))) {
6765 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) * 6811 tnapi->tx_buffers = kzalloc(
6766 TG3_TX_RING_SIZE, 6812 sizeof(struct tg3_tx_ring_info) *
6767 GFP_KERNEL); 6813 TG3_TX_RING_SIZE, GFP_KERNEL);
6768 if (!tnapi->tx_buffers) 6814 if (!tnapi->tx_buffers)
6769 goto err_out; 6815 goto err_out;
6770 6816
@@ -8360,7 +8406,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8360 /* Program the jumbo buffer descriptor ring control 8406 /* Program the jumbo buffer descriptor ring control
8361 * blocks on those devices that have them. 8407 * blocks on those devices that have them.
8362 */ 8408 */
8363 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || 8409 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8364 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) { 8410 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8365 8411
8366 if (tg3_flag(tp, JUMBO_RING_ENABLE)) { 8412 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
@@ -11204,6 +11250,7 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11204{ 11250{
11205 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key; 11251 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11206 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; 11252 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11253 u32 budget;
11207 struct sk_buff *skb, *rx_skb; 11254 struct sk_buff *skb, *rx_skb;
11208 u8 *tx_data; 11255 u8 *tx_data;
11209 dma_addr_t map; 11256 dma_addr_t map;
@@ -11363,6 +11410,10 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11363 return -EIO; 11410 return -EIO;
11364 } 11411 }
11365 11412
11413 val = tnapi->tx_prod;
11414 tnapi->tx_buffers[val].skb = skb;
11415 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11416
11366 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 11417 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11367 rnapi->coal_now); 11418 rnapi->coal_now);
11368 11419
@@ -11370,8 +11421,13 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11370 11421
11371 rx_start_idx = rnapi->hw_status->idx[0].rx_producer; 11422 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11372 11423
11373 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 11424 budget = tg3_tx_avail(tnapi);
11374 base_flags, (mss << 1) | 1); 11425 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11426 base_flags | TXD_FLAG_END, mss, 0)) {
11427 tnapi->tx_buffers[val].skb = NULL;
11428 dev_kfree_skb(skb);
11429 return -EIO;
11430 }
11375 11431
11376 tnapi->tx_prod++; 11432 tnapi->tx_prod++;
11377 11433
@@ -11394,7 +11450,7 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11394 break; 11450 break;
11395 } 11451 }
11396 11452
11397 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE); 11453 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
11398 dev_kfree_skb(skb); 11454 dev_kfree_skb(skb);
11399 11455
11400 if (tx_idx != tnapi->tx_prod) 11456 if (tx_idx != tnapi->tx_prod)
@@ -13817,7 +13873,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13817 tg3_flag_set(tp, 5705_PLUS); 13873 tg3_flag_set(tp, 5705_PLUS);
13818 13874
13819 /* Determine TSO capabilities */ 13875 /* Determine TSO capabilities */
13820 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) 13876 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
13821 ; /* Do nothing. HW bug. */ 13877 ; /* Do nothing. HW bug. */
13822 else if (tg3_flag(tp, 57765_PLUS)) 13878 else if (tg3_flag(tp, 57765_PLUS))
13823 tg3_flag_set(tp, HW_TSO_3); 13879 tg3_flag_set(tp, HW_TSO_3);
@@ -13880,11 +13936,14 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13880 if (tg3_flag(tp, 5755_PLUS)) 13936 if (tg3_flag(tp, 5755_PLUS))
13881 tg3_flag_set(tp, SHORT_DMA_BUG); 13937 tg3_flag_set(tp, SHORT_DMA_BUG);
13882 13938
13939 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13940 tg3_flag_set(tp, 4K_FIFO_LIMIT);
13941
13883 if (tg3_flag(tp, 5717_PLUS)) 13942 if (tg3_flag(tp, 5717_PLUS))
13884 tg3_flag_set(tp, LRG_PROD_RING_CAP); 13943 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13885 13944
13886 if (tg3_flag(tp, 57765_PLUS) && 13945 if (tg3_flag(tp, 57765_PLUS) &&
13887 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719) 13946 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
13888 tg3_flag_set(tp, USE_JUMBO_BDFLAG); 13947 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13889 13948
13890 if (!tg3_flag(tp, 5705_PLUS) || 13949 if (!tg3_flag(tp, 5705_PLUS) ||
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 691539ba17b3..2ea456dd5880 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2652,6 +2652,12 @@ struct ring_info {
2652 DEFINE_DMA_UNMAP_ADDR(mapping); 2652 DEFINE_DMA_UNMAP_ADDR(mapping);
2653}; 2653};
2654 2654
2655struct tg3_tx_ring_info {
2656 struct sk_buff *skb;
2657 DEFINE_DMA_UNMAP_ADDR(mapping);
2658 bool fragmented;
2659};
2660
2655struct tg3_link_config { 2661struct tg3_link_config {
2656 /* Describes what we're trying to get. */ 2662 /* Describes what we're trying to get. */
2657 u32 advertising; 2663 u32 advertising;
@@ -2816,7 +2822,7 @@ struct tg3_napi {
2816 u32 last_tx_cons; 2822 u32 last_tx_cons;
2817 u32 prodmbox; 2823 u32 prodmbox;
2818 struct tg3_tx_buffer_desc *tx_ring; 2824 struct tg3_tx_buffer_desc *tx_ring;
2819 struct ring_info *tx_buffers; 2825 struct tg3_tx_ring_info *tx_buffers;
2820 2826
2821 dma_addr_t status_mapping; 2827 dma_addr_t status_mapping;
2822 dma_addr_t rx_rcb_mapping; 2828 dma_addr_t rx_rcb_mapping;
@@ -2899,6 +2905,7 @@ enum TG3_FLAGS {
2899 TG3_FLAG_57765_PLUS, 2905 TG3_FLAG_57765_PLUS,
2900 TG3_FLAG_APE_HAS_NCSI, 2906 TG3_FLAG_APE_HAS_NCSI,
2901 TG3_FLAG_5717_PLUS, 2907 TG3_FLAG_5717_PLUS,
2908 TG3_FLAG_4K_FIFO_LIMIT,
2902 2909
2903 /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */ 2910 /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
2904 TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */ 2911 TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 9a6b3824da14..71f3d1a35b74 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -528,6 +528,7 @@ static void tun_net_init(struct net_device *dev)
528 dev->netdev_ops = &tap_netdev_ops; 528 dev->netdev_ops = &tap_netdev_ops;
529 /* Ethernet TAP Device */ 529 /* Ethernet TAP Device */
530 ether_setup(dev); 530 ether_setup(dev);
531 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
531 532
532 random_ether_addr(dev->dev_addr); 533 random_ether_addr(dev->dev_addr);
533 534
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 52502883523e..c5c4b4def7fb 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -314,12 +314,11 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
314 skb_pull(skb, 4); 314 skb_pull(skb, 4);
315 315
316 while (skb->len > 0) { 316 while (skb->len > 0) {
317 if ((short)(header & 0x0000ffff) != 317 if ((header & 0x07ff) != ((~header >> 16) & 0x07ff))
318 ~((short)((header & 0xffff0000) >> 16))) {
319 netdev_err(dev->net, "asix_rx_fixup() Bad Header Length\n"); 318 netdev_err(dev->net, "asix_rx_fixup() Bad Header Length\n");
320 } 319
321 /* get the packet length */ 320 /* get the packet length */
322 size = (u16) (header & 0x0000ffff); 321 size = (u16) (header & 0x000007ff);
323 322
324 if ((skb->len) - ((size + 1) & 0xfffe) == 0) { 323 if ((skb->len) - ((size + 1) & 0xfffe) == 0) {
325 u8 alignment = (unsigned long)skb->data & 0x3; 324 u8 alignment = (unsigned long)skb->data & 0x3;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 7f78db7bd68d..5b23767ea817 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -263,6 +263,8 @@ static void veth_setup(struct net_device *dev)
263{ 263{
264 ether_setup(dev); 264 ether_setup(dev);
265 265
266 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
267
266 dev->netdev_ops = &veth_netdev_ops; 268 dev->netdev_ops = &veth_netdev_ops;
267 dev->ethtool_ops = &veth_ethtool_ops; 269 dev->ethtool_ops = &veth_ethtool_ops;
268 dev->features |= NETIF_F_LLTX; 270 dev->features |= NETIF_F_LLTX;
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index b25c9229a6a9..eb2028187fbe 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1074,9 +1074,10 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1074 1074
1075 used = pvc_is_used(pvc); 1075 used = pvc_is_used(pvc);
1076 1076
1077 if (type == ARPHRD_ETHER) 1077 if (type == ARPHRD_ETHER) {
1078 dev = alloc_netdev(0, "pvceth%d", ether_setup); 1078 dev = alloc_netdev(0, "pvceth%d", ether_setup);
1079 else 1079 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1080 } else
1080 dev = alloc_netdev(0, "pvc%d", pvc_setup); 1081 dev = alloc_netdev(0, "pvc%d", pvc_setup);
1081 1082
1082 if (!dev) { 1083 if (!dev) {
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 5eacc653a94d..c421a6141854 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -155,7 +155,7 @@
155#include <linux/netdevice.h> 155#include <linux/netdevice.h>
156#include <linux/completion.h> 156#include <linux/completion.h>
157#include <linux/rwsem.h> 157#include <linux/rwsem.h>
158#include <asm/atomic.h> 158#include <linux/atomic.h>
159#include <net/wimax.h> 159#include <net/wimax.h>
160#include <linux/wimax/i2400m.h> 160#include <linux/wimax/i2400m.h>
161#include <asm/byteorder.h> 161#include <asm/byteorder.h>
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 55cf71fbffe3..e1b3e3c134fd 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2823,6 +2823,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
2823 dev->wireless_data = &ai->wireless_data; 2823 dev->wireless_data = &ai->wireless_data;
2824 dev->irq = irq; 2824 dev->irq = irq;
2825 dev->base_addr = port; 2825 dev->base_addr = port;
2826 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2826 2827
2827 SET_NETDEV_DEV(dev, dmdev); 2828 SET_NETDEV_DEV(dev, dmdev);
2828 2829
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index d2293dcc117f..3cab843afb05 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -28,7 +28,7 @@ config B43
28 28
29config B43_BCMA 29config B43_BCMA
30 bool "Support for BCMA bus" 30 bool "Support for BCMA bus"
31 depends on B43 && BCMA && BROKEN 31 depends on B43 && BCMA
32 default y 32 default y
33 33
34config B43_SSB 34config B43_SSB
diff --git a/drivers/net/wireless/b43/bus.c b/drivers/net/wireless/b43/bus.c
index 64c3f65ff8c0..05f6c7bff6ab 100644
--- a/drivers/net/wireless/b43/bus.c
+++ b/drivers/net/wireless/b43/bus.c
@@ -244,10 +244,12 @@ void b43_bus_set_wldev(struct b43_bus_dev *dev, void *wldev)
244#ifdef CONFIG_B43_BCMA 244#ifdef CONFIG_B43_BCMA
245 case B43_BUS_BCMA: 245 case B43_BUS_BCMA:
246 bcma_set_drvdata(dev->bdev, wldev); 246 bcma_set_drvdata(dev->bdev, wldev);
247 break;
247#endif 248#endif
248#ifdef CONFIG_B43_SSB 249#ifdef CONFIG_B43_SSB
249 case B43_BUS_SSB: 250 case B43_BUS_SSB:
250 ssb_set_drvdata(dev->sdev, wldev); 251 ssb_set_drvdata(dev->sdev, wldev);
252 break;
251#endif 253#endif
252 } 254 }
253} 255}
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 032d46674f6b..26f1ab840cc7 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -5350,6 +5350,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
5350{ 5350{
5351 struct b43_wl *wl = ssb_get_devtypedata(sdev); 5351 struct b43_wl *wl = ssb_get_devtypedata(sdev);
5352 struct b43_wldev *wldev = ssb_get_drvdata(sdev); 5352 struct b43_wldev *wldev = ssb_get_drvdata(sdev);
5353 struct b43_bus_dev *dev = wldev->dev;
5353 5354
5354 /* We must cancel any work here before unregistering from ieee80211, 5355 /* We must cancel any work here before unregistering from ieee80211,
5355 * as the ieee80211 unreg will destroy the workqueue. */ 5356 * as the ieee80211 unreg will destroy the workqueue. */
@@ -5365,14 +5366,14 @@ static void b43_ssb_remove(struct ssb_device *sdev)
5365 ieee80211_unregister_hw(wl->hw); 5366 ieee80211_unregister_hw(wl->hw);
5366 } 5367 }
5367 5368
5368 b43_one_core_detach(wldev->dev); 5369 b43_one_core_detach(dev);
5369 5370
5370 if (list_empty(&wl->devlist)) { 5371 if (list_empty(&wl->devlist)) {
5371 b43_leds_unregister(wl); 5372 b43_leds_unregister(wl);
5372 /* Last core on the chip unregistered. 5373 /* Last core on the chip unregistered.
5373 * We can destroy common struct b43_wl. 5374 * We can destroy common struct b43_wl.
5374 */ 5375 */
5375 b43_wireless_exit(wldev->dev, wl); 5376 b43_wireless_exit(dev, wl);
5376 } 5377 }
5377} 5378}
5378 5379
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index 17a130d18dc9..a610a352102a 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -8,7 +8,7 @@
8#include <linux/stringify.h> 8#include <linux/stringify.h>
9#include <linux/netdevice.h> 9#include <linux/netdevice.h>
10#include <linux/pci.h> 10#include <linux/pci.h>
11#include <asm/atomic.h> 11#include <linux/atomic.h>
12#include <linux/io.h> 12#include <linux/io.h>
13 13
14#include <linux/ssb/ssb.h> 14#include <linux/ssb/ssb.h>
diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h
index f89c34226288..686941c242fc 100644
--- a/drivers/net/wireless/b43legacy/dma.h
+++ b/drivers/net/wireless/b43legacy/dma.h
@@ -5,7 +5,7 @@
5#include <linux/spinlock.h> 5#include <linux/spinlock.h>
6#include <linux/workqueue.h> 6#include <linux/workqueue.h>
7#include <linux/linkage.h> 7#include <linux/linkage.h>
8#include <asm/atomic.h> 8#include <linux/atomic.h>
9 9
10#include "b43legacy.h" 10#include "b43legacy.h"
11 11
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index d5084829c9e5..89a116fba1de 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -855,6 +855,7 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local,
855 855
856 iface = netdev_priv(dev); 856 iface = netdev_priv(dev);
857 ether_setup(dev); 857 ether_setup(dev);
858 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
858 859
859 /* kernel callbacks */ 860 /* kernel callbacks */
860 if (iface) { 861 if (iface) {
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index 037231540719..c77e0543e502 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -1596,7 +1596,7 @@ static void pn533_disconnect(struct usb_interface *interface)
1596 usb_free_urb(dev->out_urb); 1596 usb_free_urb(dev->out_urb);
1597 kfree(dev); 1597 kfree(dev);
1598 1598
1599 nfc_dev_info(&dev->interface->dev, "NXP PN533 NFC device disconnected"); 1599 nfc_dev_info(&interface->dev, "NXP PN533 NFC device disconnected");
1600} 1600}
1601 1601
1602static struct usb_driver pn533_driver = { 1602static struct usb_driver pn533_driver = {
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index 86f334a2769c..bb184717588f 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -8,6 +8,51 @@
8#include <linux/etherdevice.h> 8#include <linux/etherdevice.h>
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/of_net.h> 10#include <linux/of_net.h>
11#include <linux/phy.h>
12
13/**
14 * It maps 'enum phy_interface_t' found in include/linux/phy.h
15 * into the device tree binding of 'phy-mode', so that Ethernet
16 * device driver can get phy interface from device tree.
17 */
18static const char *phy_modes[] = {
19 [PHY_INTERFACE_MODE_NA] = "",
20 [PHY_INTERFACE_MODE_MII] = "mii",
21 [PHY_INTERFACE_MODE_GMII] = "gmii",
22 [PHY_INTERFACE_MODE_SGMII] = "sgmii",
23 [PHY_INTERFACE_MODE_TBI] = "tbi",
24 [PHY_INTERFACE_MODE_RMII] = "rmii",
25 [PHY_INTERFACE_MODE_RGMII] = "rgmii",
26 [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
27 [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
28 [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
29 [PHY_INTERFACE_MODE_RTBI] = "rtbi",
30 [PHY_INTERFACE_MODE_SMII] = "smii",
31};
32
33/**
34 * of_get_phy_mode - Get phy mode for given device_node
35 * @np: Pointer to the given device_node
36 *
37 * The function gets phy interface string from property 'phy-mode',
38 * and return its index in phy_modes table, or errno in error case.
39 */
40const int of_get_phy_mode(struct device_node *np)
41{
42 const char *pm;
43 int err, i;
44
45 err = of_property_read_string(np, "phy-mode", &pm);
46 if (err < 0)
47 return err;
48
49 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
50 if (!strcasecmp(pm, phy_modes[i]))
51 return i;
52
53 return -ENODEV;
54}
55EXPORT_SYMBOL_GPL(of_get_phy_mode);
11 56
12/** 57/**
13 * Search the device tree for the best MAC address to use. 'mac-address' is 58 * Search the device tree for the best MAC address to use. 'mac-address' is
diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
index 0b54e46c3c14..38b6fc028984 100644
--- a/drivers/oprofile/oprofile_stats.h
+++ b/drivers/oprofile/oprofile_stats.h
@@ -10,7 +10,7 @@
10#ifndef OPROFILE_STATS_H 10#ifndef OPROFILE_STATS_H
11#define OPROFILE_STATS_H 11#define OPROFILE_STATS_H
12 12
13#include <asm/atomic.h> 13#include <linux/atomic.h>
14 14
15struct oprofile_stat_struct { 15struct oprofile_stat_struct {
16 atomic_t sample_lost_no_mm; 16 atomic_t sample_lost_no_mm;
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 8f3faf343f75..095f29e13734 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -408,7 +408,7 @@ got_one:
408} 408}
409EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware); 409EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware);
410 410
411static int is_ejectable(acpi_handle handle) 411static int pcihp_is_ejectable(acpi_handle handle)
412{ 412{
413 acpi_status status; 413 acpi_status status;
414 acpi_handle tmp; 414 acpi_handle tmp;
@@ -442,7 +442,7 @@ int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle)
442 return 0; 442 return 0;
443 if (bridge_handle != parent_handle) 443 if (bridge_handle != parent_handle)
444 return 0; 444 return 0;
445 return is_ejectable(handle); 445 return pcihp_is_ejectable(handle);
446} 446}
447EXPORT_SYMBOL_GPL(acpi_pci_check_ejectable); 447EXPORT_SYMBOL_GPL(acpi_pci_check_ejectable);
448 448
@@ -450,7 +450,7 @@ static acpi_status
450check_hotplug(acpi_handle handle, u32 lvl, void *context, void **rv) 450check_hotplug(acpi_handle handle, u32 lvl, void *context, void **rv)
451{ 451{
452 int *found = (int *)context; 452 int *found = (int *)context;
453 if (is_ejectable(handle)) { 453 if (pcihp_is_ejectable(handle)) {
454 *found = 1; 454 *found = 1;
455 return AE_CTRL_TERMINATE; 455 return AE_CTRL_TERMINATE;
456 } 456 }
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index d703e73fffa7..3fadf2f135e8 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -32,7 +32,7 @@
32#include <linux/pci_hotplug.h> 32#include <linux/pci_hotplug.h>
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/interrupt.h> 34#include <linux/interrupt.h>
35#include <asm/atomic.h> 35#include <linux/atomic.h>
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/kthread.h> 37#include <linux/kthread.h>
38#include "cpci_hotplug.h" 38#include "cpci_hotplug.h"
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 4952c3b9379d..f1ce99cceac6 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -840,8 +840,9 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
840 /* Need to read VID early b/c it's used to differentiate CPQ and INTC 840 /* Need to read VID early b/c it's used to differentiate CPQ and INTC
841 * discovery 841 * discovery
842 */ 842 */
843 rc = pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor_id); 843 vendor_id = pdev->vendor;
844 if (rc || ((vendor_id != PCI_VENDOR_ID_COMPAQ) && (vendor_id != PCI_VENDOR_ID_INTEL))) { 844 if ((vendor_id != PCI_VENDOR_ID_COMPAQ) &&
845 (vendor_id != PCI_VENDOR_ID_INTEL)) {
845 err(msg_HPC_non_compaq_or_intel); 846 err(msg_HPC_non_compaq_or_intel);
846 rc = -ENODEV; 847 rc = -ENODEV;
847 goto err_disable_device; 848 goto err_disable_device;
@@ -868,11 +869,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
868 /* TODO: This code can be made to support non-Compaq or Intel 869 /* TODO: This code can be made to support non-Compaq or Intel
869 * subsystem IDs 870 * subsystem IDs
870 */ 871 */
871 rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &subsystem_vid); 872 subsystem_vid = pdev->subsystem_vendor;
872 if (rc) {
873 err("%s : pci_read_config_word failed\n", __func__);
874 goto err_disable_device;
875 }
876 dbg("Subsystem Vendor ID: %x\n", subsystem_vid); 873 dbg("Subsystem Vendor ID: %x\n", subsystem_vid);
877 if ((subsystem_vid != PCI_VENDOR_ID_COMPAQ) && (subsystem_vid != PCI_VENDOR_ID_INTEL)) { 874 if ((subsystem_vid != PCI_VENDOR_ID_COMPAQ) && (subsystem_vid != PCI_VENDOR_ID_INTEL)) {
878 err(msg_HPC_non_compaq_or_intel); 875 err(msg_HPC_non_compaq_or_intel);
@@ -887,11 +884,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
887 goto err_disable_device; 884 goto err_disable_device;
888 } 885 }
889 886
890 rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsystem_deviceid); 887 subsystem_deviceid = pdev->subsystem_device;
891 if (rc) {
892 err("%s : pci_read_config_word failed\n", __func__);
893 goto err_free_ctrl;
894 }
895 888
896 info("Hot Plug Subsystem Device ID: %x\n", subsystem_deviceid); 889 info("Hot Plug Subsystem Device ID: %x\n", subsystem_deviceid);
897 890
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 085dbb5fc168..1e9c9aacc3a6 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -213,6 +213,9 @@ static int board_added(struct slot *p_slot)
213 goto err_exit; 213 goto err_exit;
214 } 214 }
215 215
216 /* Wait for 1 second after checking link training status */
217 msleep(1000);
218
216 /* Check for a power fault */ 219 /* Check for a power fault */
217 if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) { 220 if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) {
218 ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot)); 221 ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot));
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 50a23da5d24d..96dc4734e4af 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -275,16 +275,9 @@ int pciehp_check_link_status(struct controller *ctrl)
275 * hot-plug capable downstream port. But old controller might 275 * hot-plug capable downstream port. But old controller might
276 * not implement it. In this case, we wait for 1000 ms. 276 * not implement it. In this case, we wait for 1000 ms.
277 */ 277 */
278 if (ctrl->link_active_reporting){ 278 if (ctrl->link_active_reporting)
279 /* Wait for Data Link Layer Link Active bit to be set */
280 pcie_wait_link_active(ctrl); 279 pcie_wait_link_active(ctrl);
281 /* 280 else
282 * We must wait for 100 ms after the Data Link Layer
283 * Link Active bit reads 1b before initiating a
284 * configuration access to the hot added device.
285 */
286 msleep(100);
287 } else
288 msleep(1000); 281 msleep(1000);
289 282
290 retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); 283 retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index 77cb2a14c896..81525ae5d869 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -55,7 +55,7 @@ enum smbios_attr_enum {
55 SMBIOS_ATTR_INSTANCE_SHOW, 55 SMBIOS_ATTR_INSTANCE_SHOW,
56}; 56};
57 57
58static mode_t 58static size_t
59find_smbios_instance_string(struct pci_dev *pdev, char *buf, 59find_smbios_instance_string(struct pci_dev *pdev, char *buf,
60 enum smbios_attr_enum attribute) 60 enum smbios_attr_enum attribute)
61{ 61{
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 692671b11667..08a95b369d85 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1905,7 +1905,7 @@ void pci_enable_ari(struct pci_dev *dev)
1905{ 1905{
1906 int pos; 1906 int pos;
1907 u32 cap; 1907 u32 cap;
1908 u16 ctrl; 1908 u16 flags, ctrl;
1909 struct pci_dev *bridge; 1909 struct pci_dev *bridge;
1910 1910
1911 if (!pci_is_pcie(dev) || dev->devfn) 1911 if (!pci_is_pcie(dev) || dev->devfn)
@@ -1923,6 +1923,11 @@ void pci_enable_ari(struct pci_dev *dev)
1923 if (!pos) 1923 if (!pos)
1924 return; 1924 return;
1925 1925
1926 /* ARI is a PCIe v2 feature */
1927 pci_read_config_word(bridge, pos + PCI_EXP_FLAGS, &flags);
1928 if ((flags & PCI_EXP_FLAGS_VERS) < 2)
1929 return;
1930
1926 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap); 1931 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
1927 if (!(cap & PCI_EXP_DEVCAP2_ARI)) 1932 if (!(cap & PCI_EXP_DEVCAP2_ARI))
1928 return; 1933 return;
@@ -3186,7 +3191,7 @@ EXPORT_SYMBOL(pcie_get_readrq);
3186 * @rq: maximum memory read count in bytes 3191 * @rq: maximum memory read count in bytes
3187 * valid values are 128, 256, 512, 1024, 2048, 4096 3192 * valid values are 128, 256, 512, 1024, 2048, 4096
3188 * 3193 *
3189 * If possible sets maximum read byte count 3194 * If possible sets maximum memory read request in bytes
3190 */ 3195 */
3191int pcie_set_readrq(struct pci_dev *dev, int rq) 3196int pcie_set_readrq(struct pci_dev *dev, int rq)
3192{ 3197{
@@ -3209,7 +3214,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
3209 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) { 3214 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
3210 ctl &= ~PCI_EXP_DEVCTL_READRQ; 3215 ctl &= ~PCI_EXP_DEVCTL_READRQ;
3211 ctl |= v; 3216 ctl |= v;
3212 err = pci_write_config_dword(dev, cap + PCI_EXP_DEVCTL, ctl); 3217 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3213 } 3218 }
3214 3219
3215out: 3220out:
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 43421fbe080a..9674e9f30d49 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -24,6 +24,7 @@
24#include <linux/suspend.h> 24#include <linux/suspend.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/kfifo.h>
27#include "aerdrv.h" 28#include "aerdrv.h"
28 29
29static int forceload; 30static int forceload;
@@ -445,8 +446,7 @@ static struct pcie_port_service_driver *find_aer_service(struct pci_dev *dev)
445 return drv; 446 return drv;
446} 447}
447 448
448static pci_ers_result_t reset_link(struct pcie_device *aerdev, 449static pci_ers_result_t reset_link(struct pci_dev *dev)
449 struct pci_dev *dev)
450{ 450{
451 struct pci_dev *udev; 451 struct pci_dev *udev;
452 pci_ers_result_t status; 452 pci_ers_result_t status;
@@ -486,7 +486,6 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev,
486 486
487/** 487/**
488 * do_recovery - handle nonfatal/fatal error recovery process 488 * do_recovery - handle nonfatal/fatal error recovery process
489 * @aerdev: pointer to a pcie_device data structure of root port
490 * @dev: pointer to a pci_dev data structure of agent detecting an error 489 * @dev: pointer to a pci_dev data structure of agent detecting an error
491 * @severity: error severity type 490 * @severity: error severity type
492 * 491 *
@@ -494,8 +493,7 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev,
494 * error detected message to all downstream drivers within a hierarchy in 493 * error detected message to all downstream drivers within a hierarchy in
495 * question and return the returned code. 494 * question and return the returned code.
496 */ 495 */
497static void do_recovery(struct pcie_device *aerdev, struct pci_dev *dev, 496static void do_recovery(struct pci_dev *dev, int severity)
498 int severity)
499{ 497{
500 pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED; 498 pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED;
501 enum pci_channel_state state; 499 enum pci_channel_state state;
@@ -511,7 +509,7 @@ static void do_recovery(struct pcie_device *aerdev, struct pci_dev *dev,
511 report_error_detected); 509 report_error_detected);
512 510
513 if (severity == AER_FATAL) { 511 if (severity == AER_FATAL) {
514 result = reset_link(aerdev, dev); 512 result = reset_link(dev);
515 if (result != PCI_ERS_RESULT_RECOVERED) 513 if (result != PCI_ERS_RESULT_RECOVERED)
516 goto failed; 514 goto failed;
517 } 515 }
@@ -576,9 +574,73 @@ static void handle_error_source(struct pcie_device *aerdev,
576 pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, 574 pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
577 info->status); 575 info->status);
578 } else 576 } else
579 do_recovery(aerdev, dev, info->severity); 577 do_recovery(dev, info->severity);
580} 578}
581 579
580#ifdef CONFIG_ACPI_APEI_PCIEAER
581static void aer_recover_work_func(struct work_struct *work);
582
583#define AER_RECOVER_RING_ORDER 4
584#define AER_RECOVER_RING_SIZE (1 << AER_RECOVER_RING_ORDER)
585
586struct aer_recover_entry
587{
588 u8 bus;
589 u8 devfn;
590 u16 domain;
591 int severity;
592};
593
594static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
595 AER_RECOVER_RING_SIZE);
596/*
597 * Mutual exclusion for writers of aer_recover_ring, reader side don't
598 * need lock, because there is only one reader and lock is not needed
599 * between reader and writer.
600 */
601static DEFINE_SPINLOCK(aer_recover_ring_lock);
602static DECLARE_WORK(aer_recover_work, aer_recover_work_func);
603
604void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
605 int severity)
606{
607 unsigned long flags;
608 struct aer_recover_entry entry = {
609 .bus = bus,
610 .devfn = devfn,
611 .domain = domain,
612 .severity = severity,
613 };
614
615 spin_lock_irqsave(&aer_recover_ring_lock, flags);
616 if (kfifo_put(&aer_recover_ring, &entry))
617 schedule_work(&aer_recover_work);
618 else
619 pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n",
620 domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
621 spin_unlock_irqrestore(&aer_recover_ring_lock, flags);
622}
623EXPORT_SYMBOL_GPL(aer_recover_queue);
624
625static void aer_recover_work_func(struct work_struct *work)
626{
627 struct aer_recover_entry entry;
628 struct pci_dev *pdev;
629
630 while (kfifo_get(&aer_recover_ring, &entry)) {
631 pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
632 entry.devfn);
633 if (!pdev) {
634 pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
635 entry.domain, entry.bus,
636 PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
637 continue;
638 }
639 do_recovery(pdev, entry.severity);
640 }
641}
642#endif
643
582/** 644/**
583 * get_device_error_info - read error status from dev and store it to info 645 * get_device_error_info - read error status from dev and store it to info
584 * @dev: pointer to the device expected to have a error record 646 * @dev: pointer to the device expected to have a error record
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index b07a42e0b350..3ea51736f18d 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -204,7 +204,7 @@ void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
204} 204}
205 205
206#ifdef CONFIG_ACPI_APEI_PCIEAER 206#ifdef CONFIG_ACPI_APEI_PCIEAER
207static int cper_severity_to_aer(int cper_severity) 207int cper_severity_to_aer(int cper_severity)
208{ 208{
209 switch (cper_severity) { 209 switch (cper_severity) {
210 case CPER_SEV_RECOVERABLE: 210 case CPER_SEV_RECOVERABLE:
@@ -215,6 +215,7 @@ static int cper_severity_to_aer(int cper_severity)
215 return AER_CORRECTABLE; 215 return AER_CORRECTABLE;
216 } 216 }
217} 217}
218EXPORT_SYMBOL_GPL(cper_severity_to_aer);
218 219
219void cper_print_aer(const char *prefix, int cper_severity, 220void cper_print_aer(const char *prefix, int cper_severity,
220 struct aer_capability_regs *aer) 221 struct aer_capability_regs *aer)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 9ab492f21f86..795c9026d55f 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -68,21 +68,6 @@ static int __init pcibus_class_init(void)
68} 68}
69postcore_initcall(pcibus_class_init); 69postcore_initcall(pcibus_class_init);
70 70
71/*
72 * Translate the low bits of the PCI base
73 * to the resource type
74 */
75static inline unsigned int pci_calc_resource_flags(unsigned int flags)
76{
77 if (flags & PCI_BASE_ADDRESS_SPACE_IO)
78 return IORESOURCE_IO;
79
80 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
81 return IORESOURCE_MEM | IORESOURCE_PREFETCH;
82
83 return IORESOURCE_MEM;
84}
85
86static u64 pci_size(u64 base, u64 maxbase, u64 mask) 71static u64 pci_size(u64 base, u64 maxbase, u64 mask)
87{ 72{
88 u64 size = mask & maxbase; /* Find the significant bits */ 73 u64 size = mask & maxbase; /* Find the significant bits */
@@ -101,18 +86,39 @@ static u64 pci_size(u64 base, u64 maxbase, u64 mask)
101 return size; 86 return size;
102} 87}
103 88
104static inline enum pci_bar_type decode_bar(struct resource *res, u32 bar) 89static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
105{ 90{
91 u32 mem_type;
92 unsigned long flags;
93
106 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { 94 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
107 res->flags = bar & ~PCI_BASE_ADDRESS_IO_MASK; 95 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
108 return pci_bar_io; 96 flags |= IORESOURCE_IO;
97 return flags;
109 } 98 }
110 99
111 res->flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK; 100 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
101 flags |= IORESOURCE_MEM;
102 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
103 flags |= IORESOURCE_PREFETCH;
112 104
113 if (res->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) 105 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
114 return pci_bar_mem64; 106 switch (mem_type) {
115 return pci_bar_mem32; 107 case PCI_BASE_ADDRESS_MEM_TYPE_32:
108 break;
109 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
110 dev_info(&dev->dev, "1M mem BAR treated as 32-bit BAR\n");
111 break;
112 case PCI_BASE_ADDRESS_MEM_TYPE_64:
113 flags |= IORESOURCE_MEM_64;
114 break;
115 default:
116 dev_warn(&dev->dev,
117 "mem unknown type %x treated as 32-bit BAR\n",
118 mem_type);
119 break;
120 }
121 return flags;
116} 122}
117 123
118/** 124/**
@@ -165,9 +171,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
165 l = 0; 171 l = 0;
166 172
167 if (type == pci_bar_unknown) { 173 if (type == pci_bar_unknown) {
168 type = decode_bar(res, l); 174 res->flags = decode_bar(dev, l);
169 res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN; 175 res->flags |= IORESOURCE_SIZEALIGN;
170 if (type == pci_bar_io) { 176 if (res->flags & IORESOURCE_IO) {
171 l &= PCI_BASE_ADDRESS_IO_MASK; 177 l &= PCI_BASE_ADDRESS_IO_MASK;
172 mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT; 178 mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
173 } else { 179 } else {
@@ -180,7 +186,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
180 mask = (u32)PCI_ROM_ADDRESS_MASK; 186 mask = (u32)PCI_ROM_ADDRESS_MASK;
181 } 187 }
182 188
183 if (type == pci_bar_mem64) { 189 if (res->flags & IORESOURCE_MEM_64) {
184 u64 l64 = l; 190 u64 l64 = l;
185 u64 sz64 = sz; 191 u64 sz64 = sz;
186 u64 mask64 = mask | (u64)~0 << 32; 192 u64 mask64 = mask | (u64)~0 << 32;
@@ -204,7 +210,6 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
204 goto fail; 210 goto fail;
205 } 211 }
206 212
207 res->flags |= IORESOURCE_MEM_64;
208 if ((sizeof(resource_size_t) < 8) && l) { 213 if ((sizeof(resource_size_t) < 8) && l) {
209 /* Address above 32-bit boundary; disable the BAR */ 214 /* Address above 32-bit boundary; disable the BAR */
210 pci_write_config_dword(dev, pos, 0); 215 pci_write_config_dword(dev, pos, 0);
@@ -230,7 +235,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
230 } 235 }
231 236
232 out: 237 out:
233 return (type == pci_bar_mem64) ? 1 : 0; 238 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
234 fail: 239 fail:
235 res->flags = 0; 240 res->flags = 0;
236 goto out; 241 goto out;
@@ -284,10 +289,6 @@ static void __devinit pci_read_bridge_io(struct pci_bus *child)
284 if (!res->end) 289 if (!res->end)
285 res->end = limit + 0xfff; 290 res->end = limit + 0xfff;
286 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); 291 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
287 } else {
288 dev_printk(KERN_DEBUG, &dev->dev,
289 " bridge window [io %#06lx-%#06lx] (disabled)\n",
290 base, limit);
291 } 292 }
292} 293}
293 294
@@ -308,10 +309,6 @@ static void __devinit pci_read_bridge_mmio(struct pci_bus *child)
308 res->start = base; 309 res->start = base;
309 res->end = limit + 0xfffff; 310 res->end = limit + 0xfffff;
310 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); 311 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
311 } else {
312 dev_printk(KERN_DEBUG, &dev->dev,
313 " bridge window [mem %#010lx-%#010lx] (disabled)\n",
314 base, limit + 0xfffff);
315 } 312 }
316} 313}
317 314
@@ -359,10 +356,6 @@ static void __devinit pci_read_bridge_mmio_pref(struct pci_bus *child)
359 res->start = base; 356 res->start = base;
360 res->end = limit + 0xfffff; 357 res->end = limit + 0xfffff;
361 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); 358 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
362 } else {
363 dev_printk(KERN_DEBUG, &dev->dev,
364 " bridge window [mem %#010lx-%#010lx pref] (disabled)\n",
365 base, limit + 0xfffff);
366 } 359 }
367} 360}
368 361
@@ -725,12 +718,14 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
725 pci_write_config_word(dev, PCI_STATUS, 0xffff); 718 pci_write_config_word(dev, PCI_STATUS, 0xffff);
726 719
727 /* Prevent assigning a bus number that already exists. 720 /* Prevent assigning a bus number that already exists.
728 * This can happen when a bridge is hot-plugged */ 721 * This can happen when a bridge is hot-plugged, so in
729 if (pci_find_bus(pci_domain_nr(bus), max+1)) 722 * this case we only re-scan this bus. */
730 goto out; 723 child = pci_find_bus(pci_domain_nr(bus), max+1);
731 child = pci_add_new_bus(bus, dev, ++max); 724 if (!child) {
732 if (!child) 725 child = pci_add_new_bus(bus, dev, ++max);
733 goto out; 726 if (!child)
727 goto out;
728 }
734 buses = (buses & 0xff000000) 729 buses = (buses & 0xff000000)
735 | ((unsigned int)(child->primary) << 0) 730 | ((unsigned int)(child->primary) << 0)
736 | ((unsigned int)(child->secondary) << 8) 731 | ((unsigned int)(child->secondary) << 8)
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 9995842e45b5..8a1d3c7863a8 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -336,7 +336,6 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
336 /* Clear upper 16 bits of I/O base/limit. */ 336 /* Clear upper 16 bits of I/O base/limit. */
337 io_upper16 = 0; 337 io_upper16 = 0;
338 l = 0x00f0; 338 l = 0x00f0;
339 dev_info(&bridge->dev, " bridge window [io disabled]\n");
340 } 339 }
341 /* Temporarily disable the I/O range before updating PCI_IO_BASE. */ 340 /* Temporarily disable the I/O range before updating PCI_IO_BASE. */
342 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); 341 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
@@ -362,7 +361,6 @@ static void pci_setup_bridge_mmio(struct pci_bus *bus)
362 dev_info(&bridge->dev, " bridge window %pR\n", res); 361 dev_info(&bridge->dev, " bridge window %pR\n", res);
363 } else { 362 } else {
364 l = 0x0000fff0; 363 l = 0x0000fff0;
365 dev_info(&bridge->dev, " bridge window [mem disabled]\n");
366 } 364 }
367 pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); 365 pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
368} 366}
@@ -393,7 +391,6 @@ static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
393 dev_info(&bridge->dev, " bridge window %pR\n", res); 391 dev_info(&bridge->dev, " bridge window %pR\n", res);
394 } else { 392 } else {
395 l = 0x0000fff0; 393 l = 0x0000fff0;
396 dev_info(&bridge->dev, " bridge window [mem pref disabled]\n");
397 } 394 }
398 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l); 395 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
399 396
diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c
index eec9738f3492..eb219a1d16f7 100644
--- a/drivers/pci/setup-irq.c
+++ b/drivers/pci/setup-irq.c
@@ -21,7 +21,7 @@
21static void __init 21static void __init
22pdev_fixup_irq(struct pci_dev *dev, 22pdev_fixup_irq(struct pci_dev *dev,
23 u8 (*swizzle)(struct pci_dev *, u8 *), 23 u8 (*swizzle)(struct pci_dev *, u8 *),
24 int (*map_irq)(struct pci_dev *, u8, u8)) 24 int (*map_irq)(const struct pci_dev *, u8, u8))
25{ 25{
26 u8 pin, slot; 26 u8 pin, slot;
27 int irq = 0; 27 int irq = 0;
@@ -56,7 +56,7 @@ pdev_fixup_irq(struct pci_dev *dev,
56 56
57void __init 57void __init
58pci_fixup_irqs(u8 (*swizzle)(struct pci_dev *, u8 *), 58pci_fixup_irqs(u8 (*swizzle)(struct pci_dev *, u8 *),
59 int (*map_irq)(struct pci_dev *, u8, u8)) 59 int (*map_irq)(const struct pci_dev *, u8, u8))
60{ 60{
61 struct pci_dev *dev = NULL; 61 struct pci_dev *dev = NULL;
62 for_each_pci_dev(dev) 62 for_each_pci_dev(dev)
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index bc0e6eea0fff..319f359906e8 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -74,8 +74,7 @@ void pci_update_resource(struct pci_dev *dev, int resno)
74 resno, new, check); 74 resno, new, check);
75 } 75 }
76 76
77 if ((new & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == 77 if (res->flags & IORESOURCE_MEM_64) {
78 (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) {
79 new = region.start >> 16 >> 16; 78 new = region.start >> 16 >> 16;
80 pci_write_config_dword(dev, reg + 4, new); 79 pci_write_config_dword(dev, reg + 4, new);
81 pci_read_config_dword(dev, reg + 4, &check); 80 pci_read_config_dword(dev, reg + 4, &check);
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 492b7d807fe8..6fa215a38615 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -16,7 +16,7 @@
16#include <xen/interface/io/pciif.h> 16#include <xen/interface/io/pciif.h>
17#include <asm/xen/pci.h> 17#include <asm/xen/pci.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <asm/atomic.h> 19#include <linux/atomic.h>
20#include <linux/workqueue.h> 20#include <linux/workqueue.h>
21#include <linux/bitops.h> 21#include <linux/bitops.h>
22#include <linux/time.h> 22#include <linux/time.h>
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index df68618f6dbb..3195dbd3ec34 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -636,6 +636,29 @@ void rtc_irq_unregister(struct rtc_device *rtc, struct rtc_task *task)
636} 636}
637EXPORT_SYMBOL_GPL(rtc_irq_unregister); 637EXPORT_SYMBOL_GPL(rtc_irq_unregister);
638 638
639static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled)
640{
641 /*
642 * We unconditionally cancel the timer here, because otherwise
643 * we could run into BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
644 * when we manage to start the timer before the callback
645 * returns HRTIMER_RESTART.
646 *
647 * We cannot use hrtimer_cancel() here as a running callback
648 * could be blocked on rtc->irq_task_lock and hrtimer_cancel()
649 * would spin forever.
650 */
651 if (hrtimer_try_to_cancel(&rtc->pie_timer) < 0)
652 return -1;
653
654 if (enabled) {
655 ktime_t period = ktime_set(0, NSEC_PER_SEC / rtc->irq_freq);
656
657 hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL);
658 }
659 return 0;
660}
661
639/** 662/**
640 * rtc_irq_set_state - enable/disable 2^N Hz periodic IRQs 663 * rtc_irq_set_state - enable/disable 2^N Hz periodic IRQs
641 * @rtc: the rtc device 664 * @rtc: the rtc device
@@ -651,21 +674,21 @@ int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled
651 int err = 0; 674 int err = 0;
652 unsigned long flags; 675 unsigned long flags;
653 676
677retry:
654 spin_lock_irqsave(&rtc->irq_task_lock, flags); 678 spin_lock_irqsave(&rtc->irq_task_lock, flags);
655 if (rtc->irq_task != NULL && task == NULL) 679 if (rtc->irq_task != NULL && task == NULL)
656 err = -EBUSY; 680 err = -EBUSY;
657 if (rtc->irq_task != task) 681 if (rtc->irq_task != task)
658 err = -EACCES; 682 err = -EACCES;
659 683 if (!err) {
660 if (enabled) { 684 if (rtc_update_hrtimer(rtc, enabled) < 0) {
661 ktime_t period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq); 685 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
662 hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL); 686 cpu_relax();
663 } else { 687 goto retry;
664 hrtimer_cancel(&rtc->pie_timer); 688 }
689 rtc->pie_enabled = enabled;
665 } 690 }
666 rtc->pie_enabled = enabled;
667 spin_unlock_irqrestore(&rtc->irq_task_lock, flags); 691 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
668
669 return err; 692 return err;
670} 693}
671EXPORT_SYMBOL_GPL(rtc_irq_set_state); 694EXPORT_SYMBOL_GPL(rtc_irq_set_state);
@@ -685,22 +708,20 @@ int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq)
685 int err = 0; 708 int err = 0;
686 unsigned long flags; 709 unsigned long flags;
687 710
688 if (freq <= 0) 711 if (freq <= 0 || freq > 5000)
689 return -EINVAL; 712 return -EINVAL;
690 713retry:
691 spin_lock_irqsave(&rtc->irq_task_lock, flags); 714 spin_lock_irqsave(&rtc->irq_task_lock, flags);
692 if (rtc->irq_task != NULL && task == NULL) 715 if (rtc->irq_task != NULL && task == NULL)
693 err = -EBUSY; 716 err = -EBUSY;
694 if (rtc->irq_task != task) 717 if (rtc->irq_task != task)
695 err = -EACCES; 718 err = -EACCES;
696 if (err == 0) { 719 if (!err) {
697 rtc->irq_freq = freq; 720 rtc->irq_freq = freq;
698 if (rtc->pie_enabled) { 721 if (rtc->pie_enabled && rtc_update_hrtimer(rtc, 1) < 0) {
699 ktime_t period; 722 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
700 hrtimer_cancel(&rtc->pie_timer); 723 cpu_relax();
701 period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq); 724 goto retry;
702 hrtimer_start(&rtc->pie_timer, period,
703 HRTIMER_MODE_REL);
704 } 725 }
705 } 726 }
706 spin_unlock_irqrestore(&rtc->irq_task_lock, flags); 727 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 77f778b7b070..16c5208c3dc7 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -21,7 +21,7 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22 22
23#include <asm/uaccess.h> 23#include <asm/uaccess.h>
24#include <asm/atomic.h> 24#include <linux/atomic.h>
25#include <asm/ebcdic.h> 25#include <asm/ebcdic.h>
26 26
27#include "dasd_int.h" 27#include "dasd_int.h"
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 05909a7df8b3..a90a02c28d6a 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -13,7 +13,7 @@
13#include <linux/smp.h> 13#include <linux/smp.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/reboot.h> 15#include <linux/reboot.h>
16#include <asm/atomic.h> 16#include <linux/atomic.h>
17#include <asm/ptrace.h> 17#include <asm/ptrace.h>
18#include <asm/sigp.h> 18#include <asm/sigp.h>
19#include <asm/smp.h> 19#include <asm/smp.h>
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index c837d7419a6a..524d988d89dd 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -21,7 +21,7 @@
21#include <linux/types.h> 21#include <linux/types.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <asm/atomic.h> 24#include <linux/atomic.h>
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <asm/cpcmd.h> 26#include <asm/cpcmd.h>
27#include <asm/debug.h> 27#include <asm/debug.h>
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 7e297c7bb5ff..0b7245c72d5e 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -2,7 +2,7 @@
2#define S390_DEVICE_H 2#define S390_DEVICE_H
3 3
4#include <asm/ccwdev.h> 4#include <asm/ccwdev.h>
5#include <asm/atomic.h> 5#include <linux/atomic.h>
6#include <linux/wait.h> 6#include <linux/wait.h>
7#include <linux/notifier.h> 7#include <linux/notifier.h>
8#include "io_sch.h" 8#include "io_sch.h"
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 570d4da10696..e58169c32474 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -15,7 +15,7 @@
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/gfp.h> 16#include <linux/gfp.h>
17#include <linux/kernel_stat.h> 17#include <linux/kernel_stat.h>
18#include <asm/atomic.h> 18#include <linux/atomic.h>
19#include <asm/debug.h> 19#include <asm/debug.h>
20#include <asm/qdio.h> 20#include <asm/qdio.h>
21 21
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 68be6e157126..2a1d4dfaf859 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -9,7 +9,7 @@
9#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <linux/kernel_stat.h> 11#include <linux/kernel_stat.h>
12#include <asm/atomic.h> 12#include <linux/atomic.h>
13#include <asm/debug.h> 13#include <asm/debug.h>
14#include <asm/qdio.h> 14#include <asm/qdio.h>
15#include <asm/airq.h> 15#include <asm/airq.h>
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index f8134a44cefa..b77ae519d79c 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -41,7 +41,7 @@
41#include <linux/mutex.h> 41#include <linux/mutex.h>
42#include <asm/reset.h> 42#include <asm/reset.h>
43#include <asm/airq.h> 43#include <asm/airq.h>
44#include <asm/atomic.h> 44#include <linux/atomic.h>
45#include <asm/system.h> 45#include <asm/system.h>
46#include <asm/isc.h> 46#include <asm/isc.h>
47#include <linux/hrtimer.h> 47#include <linux/hrtimer.h>
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 8e65447f76b7..88ad33ed5d38 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -36,7 +36,7 @@
36#include <linux/seq_file.h> 36#include <linux/seq_file.h>
37#include <linux/compat.h> 37#include <linux/compat.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <asm/atomic.h> 39#include <linux/atomic.h>
40#include <asm/uaccess.h> 40#include <asm/uaccess.h>
41#include <linux/hw_random.h> 41#include <linux/hw_random.h>
42 42
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 2176d00b395e..da171b5f3996 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -30,7 +30,7 @@
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/err.h> 32#include <linux/err.h>
33#include <asm/atomic.h> 33#include <linux/atomic.h>
34#include <asm/uaccess.h> 34#include <asm/uaccess.h>
35 35
36#include "ap_bus.h" 36#include "ap_bus.h"
diff --git a/drivers/s390/crypto/zcrypt_mono.c b/drivers/s390/crypto/zcrypt_mono.c
index 44253fdd4136..eb313c3fb2d1 100644
--- a/drivers/s390/crypto/zcrypt_mono.c
+++ b/drivers/s390/crypto/zcrypt_mono.c
@@ -32,7 +32,7 @@
32#include <linux/fs.h> 32#include <linux/fs.h>
33#include <linux/proc_fs.h> 33#include <linux/proc_fs.h>
34#include <linux/compat.h> 34#include <linux/compat.h>
35#include <asm/atomic.h> 35#include <linux/atomic.h>
36#include <asm/uaccess.h> 36#include <asm/uaccess.h>
37 37
38#include "ap_bus.h" 38#include "ap_bus.h"
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index 1afb69c75fea..d84816f144df 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -30,7 +30,7 @@
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/err.h> 32#include <linux/err.h>
33#include <asm/atomic.h> 33#include <linux/atomic.h>
34#include <asm/uaccess.h> 34#include <asm/uaccess.h>
35 35
36#include "ap_bus.h" 36#include "ap_bus.h"
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index aa4c050a5694..bdbdbe192993 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -30,7 +30,7 @@
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/gfp.h> 31#include <linux/gfp.h>
32#include <linux/err.h> 32#include <linux/err.h>
33#include <asm/atomic.h> 33#include <linux/atomic.h>
34#include <asm/uaccess.h> 34#include <asm/uaccess.h>
35 35
36#include "ap_bus.h" 36#include "ap_bus.h"
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 4f85eb725f4f..dd4737808e06 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -31,7 +31,7 @@
31#include <linux/err.h> 31#include <linux/err.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <asm/atomic.h> 34#include <linux/atomic.h>
35#include <asm/uaccess.h> 35#include <asm/uaccess.h>
36 36
37#include "ap_bus.h" 37#include "ap_bus.h"
diff --git a/drivers/s390/net/fsm.h b/drivers/s390/net/fsm.h
index 1e8b235d95b5..a4510cf59034 100644
--- a/drivers/s390/net/fsm.h
+++ b/drivers/s390/net/fsm.h
@@ -8,7 +8,7 @@
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/string.h> 10#include <linux/string.h>
11#include <asm/atomic.h> 11#include <linux/atomic.h>
12 12
13/** 13/**
14 * Define this to get debugging messages. 14 * Define this to get debugging messages.
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 2a4991d6d4d5..7cac873c7383 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -13,7 +13,7 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <scsi/fc/fc_fcp.h> 14#include <scsi/fc/fc_fcp.h>
15#include <scsi/scsi_eh.h> 15#include <scsi/scsi_eh.h>
16#include <asm/atomic.h> 16#include <linux/atomic.h>
17#include "zfcp_ext.h" 17#include "zfcp_ext.h"
18#include "zfcp_dbf.h" 18#include "zfcp_dbf.h"
19#include "zfcp_fc.h" 19#include "zfcp_fc.h"
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 740da4465447..965a1fccd66a 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -16,7 +16,7 @@
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/of.h> 17#include <linux/of.h>
18#include <linux/of_device.h> 18#include <linux/of_device.h>
19#include <asm/atomic.h> 19#include <linux/atomic.h>
20#include <asm/uaccess.h> /* put_/get_user */ 20#include <asm/uaccess.h> /* put_/get_user */
21#include <asm/io.h> 21#include <asm/io.h>
22 22
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 081c171a1ed6..5ce5170254ca 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -397,7 +397,7 @@ struct amap_pdu_data_out {
397}; 397};
398 398
399struct be_cmd_bhs { 399struct be_cmd_bhs {
400 struct iscsi_cmd iscsi_hdr; 400 struct iscsi_scsi_req iscsi_hdr;
401 unsigned char pad1[16]; 401 unsigned char pad1[16];
402 struct pdu_data_out iscsi_data_pdu; 402 struct pdu_data_out iscsi_data_pdu;
403 unsigned char pad2[BE_SENSE_INFO_SIZE - 403 unsigned char pad2[BE_SENSE_INFO_SIZE -
@@ -428,7 +428,7 @@ struct be_nonio_bhs {
428}; 428};
429 429
430struct be_status_bhs { 430struct be_status_bhs {
431 struct iscsi_cmd iscsi_hdr; 431 struct iscsi_scsi_req iscsi_hdr;
432 unsigned char pad1[16]; 432 unsigned char pad1[16];
433 /** 433 /**
434 * The plus 2 below is to hold the sense info length that gets 434 * The plus 2 below is to hold the sense info length that gets
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
index 3b0af1102bf4..a796de935054 100644
--- a/drivers/scsi/bfa/bfa.h
+++ b/drivers/scsi/bfa/bfa.h
@@ -27,6 +27,7 @@
27struct bfa_s; 27struct bfa_s;
28 28
29typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m); 29typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
30typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status);
30 31
31/* 32/*
32 * Interrupt message handlers 33 * Interrupt message handlers
@@ -121,6 +122,7 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
121#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do { \ 122#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
122 (__hcb_qe)->cbfn = (__cbfn); \ 123 (__hcb_qe)->cbfn = (__cbfn); \
123 (__hcb_qe)->cbarg = (__cbarg); \ 124 (__hcb_qe)->cbarg = (__cbarg); \
125 (__hcb_qe)->pre_rmv = BFA_FALSE; \
124 list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \ 126 list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
125 } while (0) 127 } while (0)
126 128
@@ -135,6 +137,11 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
135 } \ 137 } \
136 } while (0) 138 } while (0)
137 139
140#define bfa_cb_queue_status(__bfa, __hcb_qe, __status) do { \
141 (__hcb_qe)->fw_status = (__status); \
142 list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
143} while (0)
144
138#define bfa_cb_queue_done(__hcb_qe) do { \ 145#define bfa_cb_queue_done(__hcb_qe) do { \
139 (__hcb_qe)->once = BFA_FALSE; \ 146 (__hcb_qe)->once = BFA_FALSE; \
140 } while (0) 147 } while (0)
@@ -177,7 +184,7 @@ struct bfa_msix_s {
177struct bfa_hwif_s { 184struct bfa_hwif_s {
178 void (*hw_reginit)(struct bfa_s *bfa); 185 void (*hw_reginit)(struct bfa_s *bfa);
179 void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq); 186 void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
180 void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq); 187 void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq, u32 ci);
181 void (*hw_msix_init)(struct bfa_s *bfa, int nvecs); 188 void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
182 void (*hw_msix_ctrl_install)(struct bfa_s *bfa); 189 void (*hw_msix_ctrl_install)(struct bfa_s *bfa);
183 void (*hw_msix_queue_install)(struct bfa_s *bfa); 190 void (*hw_msix_queue_install)(struct bfa_s *bfa);
@@ -268,10 +275,8 @@ struct bfa_iocfc_s {
268 ((__bfa)->iocfc.hwif.hw_msix_queue_install(__bfa)) 275 ((__bfa)->iocfc.hwif.hw_msix_queue_install(__bfa))
269#define bfa_msix_uninstall(__bfa) \ 276#define bfa_msix_uninstall(__bfa) \
270 ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa)) 277 ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa))
271#define bfa_isr_rspq_ack(__bfa, __queue) do { \ 278#define bfa_isr_rspq_ack(__bfa, __queue, __ci) \
272 if ((__bfa)->iocfc.hwif.hw_rspq_ack) \ 279 ((__bfa)->iocfc.hwif.hw_rspq_ack(__bfa, __queue, __ci))
273 (__bfa)->iocfc.hwif.hw_rspq_ack(__bfa, __queue); \
274} while (0)
275#define bfa_isr_reqq_ack(__bfa, __queue) do { \ 280#define bfa_isr_reqq_ack(__bfa, __queue) do { \
276 if ((__bfa)->iocfc.hwif.hw_reqq_ack) \ 281 if ((__bfa)->iocfc.hwif.hw_reqq_ack) \
277 (__bfa)->iocfc.hwif.hw_reqq_ack(__bfa, __queue); \ 282 (__bfa)->iocfc.hwif.hw_reqq_ack(__bfa, __queue); \
@@ -311,7 +316,7 @@ void bfa_msix_rspq(struct bfa_s *bfa, int vec);
311void bfa_msix_lpu_err(struct bfa_s *bfa, int vec); 316void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
312 317
313void bfa_hwcb_reginit(struct bfa_s *bfa); 318void bfa_hwcb_reginit(struct bfa_s *bfa);
314void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq); 319void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
315void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs); 320void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
316void bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa); 321void bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa);
317void bfa_hwcb_msix_queue_install(struct bfa_s *bfa); 322void bfa_hwcb_msix_queue_install(struct bfa_s *bfa);
@@ -324,7 +329,8 @@ void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
324void bfa_hwct_reginit(struct bfa_s *bfa); 329void bfa_hwct_reginit(struct bfa_s *bfa);
325void bfa_hwct2_reginit(struct bfa_s *bfa); 330void bfa_hwct2_reginit(struct bfa_s *bfa);
326void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq); 331void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
327void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq); 332void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
333void bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
328void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs); 334void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
329void bfa_hwct_msix_ctrl_install(struct bfa_s *bfa); 335void bfa_hwct_msix_ctrl_install(struct bfa_s *bfa);
330void bfa_hwct_msix_queue_install(struct bfa_s *bfa); 336void bfa_hwct_msix_queue_install(struct bfa_s *bfa);
@@ -376,6 +382,22 @@ int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
376#define bfa_get_fw_clock_res(__bfa) \ 382#define bfa_get_fw_clock_res(__bfa) \
377 ((__bfa)->iocfc.cfgrsp->fwcfg.fw_tick_res) 383 ((__bfa)->iocfc.cfgrsp->fwcfg.fw_tick_res)
378 384
385/*
386 * lun mask macros return NULL when min cfg is enabled and there is
387 * no memory allocated for lunmask.
388 */
389#define bfa_get_lun_mask(__bfa) \
390 ((&(__bfa)->modules.dconf_mod)->min_cfg) ? NULL : \
391 (&(BFA_DCONF_MOD(__bfa)->dconf->lun_mask))
392
393#define bfa_get_lun_mask_list(_bfa) \
394 ((&(_bfa)->modules.dconf_mod)->min_cfg) ? NULL : \
395 (bfa_get_lun_mask(_bfa)->lun_list)
396
397#define bfa_get_lun_mask_status(_bfa) \
398 (((&(_bfa)->modules.dconf_mod)->min_cfg) \
399 ? BFA_LUNMASK_MINCFG : ((bfa_get_lun_mask(_bfa))->status))
400
379void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids); 401void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids);
380void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg); 402void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg);
381void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg); 403void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg);
@@ -406,7 +428,22 @@ bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
406 428
407void bfa_iocfc_enable(struct bfa_s *bfa); 429void bfa_iocfc_enable(struct bfa_s *bfa);
408void bfa_iocfc_disable(struct bfa_s *bfa); 430void bfa_iocfc_disable(struct bfa_s *bfa);
431void bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status);
409#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \ 432#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \
410 bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout) 433 bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
411 434
435struct bfa_cb_pending_q_s {
436 struct bfa_cb_qe_s hcb_qe;
437 void *data; /* Driver buffer */
438};
439
440/* Common macros to operate on pending stats/attr apis */
441#define bfa_pending_q_init(__qe, __cbfn, __cbarg, __data) do { \
442 bfa_q_qe_init(&((__qe)->hcb_qe.qe)); \
443 (__qe)->hcb_qe.cbfn = (__cbfn); \
444 (__qe)->hcb_qe.cbarg = (__cbarg); \
445 (__qe)->hcb_qe.pre_rmv = BFA_TRUE; \
446 (__qe)->data = (__data); \
447} while (0)
448
412#endif /* __BFA_H__ */ 449#endif /* __BFA_H__ */
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index c38e589105a5..4bd546bcc240 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -33,6 +33,7 @@ static struct bfa_module_s *hal_mods[] = {
33 &hal_mod_uf, 33 &hal_mod_uf,
34 &hal_mod_rport, 34 &hal_mod_rport,
35 &hal_mod_fcp, 35 &hal_mod_fcp,
36 &hal_mod_dconf,
36 NULL 37 NULL
37}; 38};
38 39
@@ -237,8 +238,6 @@ bfa_isr_rspq(struct bfa_s *bfa, int qid)
237 u32 pi, ci; 238 u32 pi, ci;
238 struct list_head *waitq; 239 struct list_head *waitq;
239 240
240 bfa_isr_rspq_ack(bfa, qid);
241
242 ci = bfa_rspq_ci(bfa, qid); 241 ci = bfa_rspq_ci(bfa, qid);
243 pi = bfa_rspq_pi(bfa, qid); 242 pi = bfa_rspq_pi(bfa, qid);
244 243
@@ -251,11 +250,9 @@ bfa_isr_rspq(struct bfa_s *bfa, int qid)
251 } 250 }
252 251
253 /* 252 /*
254 * update CI 253 * acknowledge RME completions and update CI
255 */ 254 */
256 bfa_rspq_ci(bfa, qid) = pi; 255 bfa_isr_rspq_ack(bfa, qid, ci);
257 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
258 mmiowb();
259 256
260 /* 257 /*
261 * Resume any pending requests in the corresponding reqq. 258 * Resume any pending requests in the corresponding reqq.
@@ -325,23 +322,19 @@ bfa_intx(struct bfa_s *bfa)
325 int queue; 322 int queue;
326 323
327 intr = readl(bfa->iocfc.bfa_regs.intr_status); 324 intr = readl(bfa->iocfc.bfa_regs.intr_status);
328 if (!intr)
329 return BFA_FALSE;
330 325
331 qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK); 326 qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK);
332 if (qintr) 327 if (qintr)
333 writel(qintr, bfa->iocfc.bfa_regs.intr_status); 328 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
334 329
335 /* 330 /*
336 * RME completion queue interrupt 331 * Unconditional RME completion queue interrupt
337 */ 332 */
338 qintr = intr & __HFN_INT_RME_MASK; 333 if (bfa->queue_process) {
339 if (qintr && bfa->queue_process) {
340 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) 334 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
341 bfa_isr_rspq(bfa, queue); 335 bfa_isr_rspq(bfa, queue);
342 } 336 }
343 337
344 intr &= ~qintr;
345 if (!intr) 338 if (!intr)
346 return BFA_TRUE; 339 return BFA_TRUE;
347 340
@@ -432,7 +425,8 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
432 __HFN_INT_MBOX_LPU1_CT2); 425 __HFN_INT_MBOX_LPU1_CT2);
433 intr &= __HFN_INT_ERR_MASK_CT2; 426 intr &= __HFN_INT_ERR_MASK_CT2;
434 } else { 427 } else {
435 halt_isr = intr & __HFN_INT_LL_HALT; 428 halt_isr = bfa_asic_id_ct(bfa->ioc.pcidev.device_id) ?
429 (intr & __HFN_INT_LL_HALT) : 0;
436 pss_isr = intr & __HFN_INT_ERR_PSS; 430 pss_isr = intr & __HFN_INT_ERR_PSS;
437 lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1); 431 lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
438 intr &= __HFN_INT_ERR_MASK; 432 intr &= __HFN_INT_ERR_MASK;
@@ -578,7 +572,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
578 } else { 572 } else {
579 iocfc->hwif.hw_reginit = bfa_hwcb_reginit; 573 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
580 iocfc->hwif.hw_reqq_ack = NULL; 574 iocfc->hwif.hw_reqq_ack = NULL;
581 iocfc->hwif.hw_rspq_ack = NULL; 575 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
582 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; 576 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
583 iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install; 577 iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
584 iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install; 578 iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
@@ -595,7 +589,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
595 if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) { 589 if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
596 iocfc->hwif.hw_reginit = bfa_hwct2_reginit; 590 iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
597 iocfc->hwif.hw_isr_mode_set = NULL; 591 iocfc->hwif.hw_isr_mode_set = NULL;
598 iocfc->hwif.hw_rspq_ack = NULL; 592 iocfc->hwif.hw_rspq_ack = bfa_hwct2_rspq_ack;
599 } 593 }
600 594
601 iocfc->hwif.hw_reginit(bfa); 595 iocfc->hwif.hw_reginit(bfa);
@@ -685,7 +679,7 @@ bfa_iocfc_start_submod(struct bfa_s *bfa)
685 679
686 bfa->queue_process = BFA_TRUE; 680 bfa->queue_process = BFA_TRUE;
687 for (i = 0; i < BFI_IOC_MAX_CQS; i++) 681 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
688 bfa_isr_rspq_ack(bfa, i); 682 bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i));
689 683
690 for (i = 0; hal_mods[i]; i++) 684 for (i = 0; hal_mods[i]; i++)
691 hal_mods[i]->start(bfa); 685 hal_mods[i]->start(bfa);
@@ -709,7 +703,7 @@ bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
709 struct bfa_s *bfa = bfa_arg; 703 struct bfa_s *bfa = bfa_arg;
710 704
711 if (complete) { 705 if (complete) {
712 if (bfa->iocfc.cfgdone) 706 if (bfa->iocfc.cfgdone && BFA_DCONF_MOD(bfa)->flashdone)
713 bfa_cb_init(bfa->bfad, BFA_STATUS_OK); 707 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
714 else 708 else
715 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED); 709 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
@@ -822,9 +816,11 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
822 */ 816 */
823 bfa_fcport_init(bfa); 817 bfa_fcport_init(bfa);
824 818
825 if (iocfc->action == BFA_IOCFC_ACT_INIT) 819 if (iocfc->action == BFA_IOCFC_ACT_INIT) {
826 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa); 820 if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
827 else { 821 bfa_cb_queue(bfa, &iocfc->init_hcb_qe,
822 bfa_iocfc_init_cb, bfa);
823 } else {
828 if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE) 824 if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
829 bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe, 825 bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
830 bfa_iocfc_enable_cb, bfa); 826 bfa_iocfc_enable_cb, bfa);
@@ -1045,6 +1041,7 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
1045 } 1041 }
1046 1042
1047 bfa_iocfc_send_cfg(bfa); 1043 bfa_iocfc_send_cfg(bfa);
1044 bfa_dconf_modinit(bfa);
1048} 1045}
1049 1046
1050/* 1047/*
@@ -1207,7 +1204,9 @@ bfa_iocfc_stop(struct bfa_s *bfa)
1207 bfa->iocfc.action = BFA_IOCFC_ACT_STOP; 1204 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
1208 1205
1209 bfa->queue_process = BFA_FALSE; 1206 bfa->queue_process = BFA_FALSE;
1210 bfa_ioc_disable(&bfa->ioc); 1207 bfa_dconf_modexit(bfa);
1208 if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
1209 bfa_ioc_disable(&bfa->ioc);
1211} 1210}
1212 1211
1213void 1212void
@@ -1540,10 +1539,17 @@ bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
1540 struct list_head *qe; 1539 struct list_head *qe;
1541 struct list_head *qen; 1540 struct list_head *qen;
1542 struct bfa_cb_qe_s *hcb_qe; 1541 struct bfa_cb_qe_s *hcb_qe;
1542 bfa_cb_cbfn_status_t cbfn;
1543 1543
1544 list_for_each_safe(qe, qen, comp_q) { 1544 list_for_each_safe(qe, qen, comp_q) {
1545 hcb_qe = (struct bfa_cb_qe_s *) qe; 1545 hcb_qe = (struct bfa_cb_qe_s *) qe;
1546 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE); 1546 if (hcb_qe->pre_rmv) {
1547 /* qe is invalid after return, dequeue before cbfn() */
1548 list_del(qe);
1549 cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn);
1550 cbfn(hcb_qe->cbarg, hcb_qe->fw_status);
1551 } else
1552 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
1547 } 1553 }
1548} 1554}
1549 1555
@@ -1556,10 +1562,20 @@ bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
1556 while (!list_empty(comp_q)) { 1562 while (!list_empty(comp_q)) {
1557 bfa_q_deq(comp_q, &qe); 1563 bfa_q_deq(comp_q, &qe);
1558 hcb_qe = (struct bfa_cb_qe_s *) qe; 1564 hcb_qe = (struct bfa_cb_qe_s *) qe;
1565 WARN_ON(hcb_qe->pre_rmv);
1559 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE); 1566 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
1560 } 1567 }
1561} 1568}
1562 1569
1570void
1571bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status)
1572{
1573 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) {
1574 if (bfa->iocfc.cfgdone == BFA_TRUE)
1575 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
1576 bfa_iocfc_init_cb, bfa);
1577 }
1578}
1563 1579
1564/* 1580/*
1565 * Return the list of PCI vendor/device id lists supported by this 1581 * Return the list of PCI vendor/device id lists supported by this
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
index ed8d31b0188b..7b3d235d20b4 100644
--- a/drivers/scsi/bfa/bfa_defs.h
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -144,6 +144,7 @@ enum bfa_status {
144 BFA_STATUS_INVLD_DFSZ = 24, /* Invalid Max data field size */ 144 BFA_STATUS_INVLD_DFSZ = 24, /* Invalid Max data field size */
145 BFA_STATUS_CMD_NOTSUPP = 26, /* Command/API not supported */ 145 BFA_STATUS_CMD_NOTSUPP = 26, /* Command/API not supported */
146 BFA_STATUS_FABRIC_RJT = 29, /* Reject from attached fabric */ 146 BFA_STATUS_FABRIC_RJT = 29, /* Reject from attached fabric */
147 BFA_STATUS_UNKNOWN_VWWN = 30, /* VPORT PWWN not found */
147 BFA_STATUS_PORT_OFFLINE = 34, /* Port is not online */ 148 BFA_STATUS_PORT_OFFLINE = 34, /* Port is not online */
148 BFA_STATUS_VPORT_WWN_BP = 46, /* WWN is same as base port's WWN */ 149 BFA_STATUS_VPORT_WWN_BP = 46, /* WWN is same as base port's WWN */
149 BFA_STATUS_PORT_NOT_DISABLED = 47, /* Port not disabled disable port */ 150 BFA_STATUS_PORT_NOT_DISABLED = 47, /* Port not disabled disable port */
@@ -164,6 +165,8 @@ enum bfa_status {
164 BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */ 165 BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */
165 BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot 166 BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot
166 * configuration */ 167 * configuration */
168 BFA_STATUS_BAD_FWCFG = 156, /* Bad firmware configuration */
169 BFA_STATUS_INVALID_VENDOR = 158, /* Invalid switch vendor */
167 BFA_STATUS_SFP_NOT_READY = 159, /* SFP info is not ready. Retry */ 170 BFA_STATUS_SFP_NOT_READY = 159, /* SFP info is not ready. Retry */
168 BFA_STATUS_TRUNK_ENABLED = 164, /* Trunk is already enabled on 171 BFA_STATUS_TRUNK_ENABLED = 164, /* Trunk is already enabled on
169 * this adapter */ 172 * this adapter */
@@ -172,11 +175,15 @@ enum bfa_status {
172 BFA_STATUS_IOPROFILE_OFF = 175, /* IO profile OFF */ 175 BFA_STATUS_IOPROFILE_OFF = 175, /* IO profile OFF */
173 BFA_STATUS_PHY_NOT_PRESENT = 183, /* PHY module not present */ 176 BFA_STATUS_PHY_NOT_PRESENT = 183, /* PHY module not present */
174 BFA_STATUS_FEATURE_NOT_SUPPORTED = 192, /* Feature not supported */ 177 BFA_STATUS_FEATURE_NOT_SUPPORTED = 192, /* Feature not supported */
178 BFA_STATUS_ENTRY_EXISTS = 193, /* Entry already exists */
179 BFA_STATUS_ENTRY_NOT_EXISTS = 194, /* Entry does not exist */
180 BFA_STATUS_NO_CHANGE = 195, /* Feature already in that state */
175 BFA_STATUS_FAA_ENABLED = 197, /* FAA is already enabled */ 181 BFA_STATUS_FAA_ENABLED = 197, /* FAA is already enabled */
176 BFA_STATUS_FAA_DISABLED = 198, /* FAA is already disabled */ 182 BFA_STATUS_FAA_DISABLED = 198, /* FAA is already disabled */
177 BFA_STATUS_FAA_ACQUIRED = 199, /* FAA is already acquired */ 183 BFA_STATUS_FAA_ACQUIRED = 199, /* FAA is already acquired */
178 BFA_STATUS_FAA_ACQ_ADDR = 200, /* Acquiring addr */ 184 BFA_STATUS_FAA_ACQ_ADDR = 200, /* Acquiring addr */
179 BFA_STATUS_ERROR_TRUNK_ENABLED = 203, /* Trunk enabled on adapter */ 185 BFA_STATUS_ERROR_TRUNK_ENABLED = 203, /* Trunk enabled on adapter */
186 BFA_STATUS_MAX_ENTRY_REACHED = 212, /* MAX entry reached */
180 BFA_STATUS_MAX_VAL /* Unknown error code */ 187 BFA_STATUS_MAX_VAL /* Unknown error code */
181}; 188};
182#define bfa_status_t enum bfa_status 189#define bfa_status_t enum bfa_status
@@ -359,6 +366,139 @@ struct bfa_ioc_attr_s {
359}; 366};
360 367
361/* 368/*
369 * AEN related definitions
370 */
371enum bfa_aen_category {
372 BFA_AEN_CAT_ADAPTER = 1,
373 BFA_AEN_CAT_PORT = 2,
374 BFA_AEN_CAT_LPORT = 3,
375 BFA_AEN_CAT_RPORT = 4,
376 BFA_AEN_CAT_ITNIM = 5,
377 BFA_AEN_CAT_AUDIT = 8,
378 BFA_AEN_CAT_IOC = 9,
379};
380
381/* BFA adapter level events */
382enum bfa_adapter_aen_event {
383 BFA_ADAPTER_AEN_ADD = 1, /* New Adapter found event */
384 BFA_ADAPTER_AEN_REMOVE = 2, /* Adapter removed event */
385};
386
387struct bfa_adapter_aen_data_s {
388 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
389 u32 nports; /* Number of NPorts */
390 wwn_t pwwn; /* WWN of one of its physical port */
391};
392
393/* BFA physical port Level events */
394enum bfa_port_aen_event {
395 BFA_PORT_AEN_ONLINE = 1, /* Physical Port online event */
396 BFA_PORT_AEN_OFFLINE = 2, /* Physical Port offline event */
397 BFA_PORT_AEN_RLIR = 3, /* RLIR event, not supported */
398 BFA_PORT_AEN_SFP_INSERT = 4, /* SFP inserted event */
399 BFA_PORT_AEN_SFP_REMOVE = 5, /* SFP removed event */
400 BFA_PORT_AEN_SFP_POM = 6, /* SFP POM event */
401 BFA_PORT_AEN_ENABLE = 7, /* Physical Port enable event */
402 BFA_PORT_AEN_DISABLE = 8, /* Physical Port disable event */
403 BFA_PORT_AEN_AUTH_ON = 9, /* Physical Port auth success event */
404 BFA_PORT_AEN_AUTH_OFF = 10, /* Physical Port auth fail event */
405 BFA_PORT_AEN_DISCONNECT = 11, /* Physical Port disconnect event */
406 BFA_PORT_AEN_QOS_NEG = 12, /* Base Port QOS negotiation event */
407 BFA_PORT_AEN_FABRIC_NAME_CHANGE = 13, /* Fabric Name/WWN change */
408 BFA_PORT_AEN_SFP_ACCESS_ERROR = 14, /* SFP read error event */
409 BFA_PORT_AEN_SFP_UNSUPPORT = 15, /* Unsupported SFP event */
410};
411
412enum bfa_port_aen_sfp_pom {
413 BFA_PORT_AEN_SFP_POM_GREEN = 1, /* Normal */
414 BFA_PORT_AEN_SFP_POM_AMBER = 2, /* Warning */
415 BFA_PORT_AEN_SFP_POM_RED = 3, /* Critical */
416 BFA_PORT_AEN_SFP_POM_MAX = BFA_PORT_AEN_SFP_POM_RED
417};
418
419struct bfa_port_aen_data_s {
420 wwn_t pwwn; /* WWN of the physical port */
421 wwn_t fwwn; /* WWN of the fabric port */
422 u32 phy_port_num; /* For SFP related events */
423 u16 ioc_type;
424 u16 level; /* Only transitions will be informed */
425 mac_t mac; /* MAC address of the ethernet port */
426 u16 rsvd;
427};
428
429/* BFA AEN logical port events */
430enum bfa_lport_aen_event {
431 BFA_LPORT_AEN_NEW = 1, /* LPort created event */
432 BFA_LPORT_AEN_DELETE = 2, /* LPort deleted event */
433 BFA_LPORT_AEN_ONLINE = 3, /* LPort online event */
434 BFA_LPORT_AEN_OFFLINE = 4, /* LPort offline event */
435 BFA_LPORT_AEN_DISCONNECT = 5, /* LPort disconnect event */
436 BFA_LPORT_AEN_NEW_PROP = 6, /* VPort created event */
437 BFA_LPORT_AEN_DELETE_PROP = 7, /* VPort deleted event */
438 BFA_LPORT_AEN_NEW_STANDARD = 8, /* VPort created event */
439 BFA_LPORT_AEN_DELETE_STANDARD = 9, /* VPort deleted event */
440 BFA_LPORT_AEN_NPIV_DUP_WWN = 10, /* VPort with duplicate WWN */
441 BFA_LPORT_AEN_NPIV_FABRIC_MAX = 11, /* Max NPIV in fabric/fport */
442 BFA_LPORT_AEN_NPIV_UNKNOWN = 12, /* Unknown NPIV Error code */
443};
444
445struct bfa_lport_aen_data_s {
446 u16 vf_id; /* vf_id of this logical port */
447 u16 roles; /* Logical port mode,IM/TM/IP etc */
448 u32 rsvd;
449 wwn_t ppwwn; /* WWN of its physical port */
450 wwn_t lpwwn; /* WWN of this logical port */
451};
452
453/* BFA ITNIM events */
454enum bfa_itnim_aen_event {
455 BFA_ITNIM_AEN_ONLINE = 1, /* Target online */
456 BFA_ITNIM_AEN_OFFLINE = 2, /* Target offline */
457 BFA_ITNIM_AEN_DISCONNECT = 3, /* Target disconnected */
458};
459
460struct bfa_itnim_aen_data_s {
461 u16 vf_id; /* vf_id of the IT nexus */
462 u16 rsvd[3];
463 wwn_t ppwwn; /* WWN of its physical port */
464 wwn_t lpwwn; /* WWN of logical port */
465 wwn_t rpwwn; /* WWN of remote(target) port */
466};
467
468/* BFA audit events */
469enum bfa_audit_aen_event {
470 BFA_AUDIT_AEN_AUTH_ENABLE = 1,
471 BFA_AUDIT_AEN_AUTH_DISABLE = 2,
472 BFA_AUDIT_AEN_FLASH_ERASE = 3,
473 BFA_AUDIT_AEN_FLASH_UPDATE = 4,
474};
475
476struct bfa_audit_aen_data_s {
477 wwn_t pwwn;
478 int partition_inst;
479 int partition_type;
480};
481
482/* BFA IOC level events */
483enum bfa_ioc_aen_event {
484 BFA_IOC_AEN_HBGOOD = 1, /* Heart Beat restore event */
485 BFA_IOC_AEN_HBFAIL = 2, /* Heart Beat failure event */
486 BFA_IOC_AEN_ENABLE = 3, /* IOC enabled event */
487 BFA_IOC_AEN_DISABLE = 4, /* IOC disabled event */
488 BFA_IOC_AEN_FWMISMATCH = 5, /* IOC firmware mismatch */
489 BFA_IOC_AEN_FWCFG_ERROR = 6, /* IOC firmware config error */
490 BFA_IOC_AEN_INVALID_VENDOR = 7,
491 BFA_IOC_AEN_INVALID_NWWN = 8, /* Zero NWWN */
492 BFA_IOC_AEN_INVALID_PWWN = 9 /* Zero PWWN */
493};
494
495struct bfa_ioc_aen_data_s {
496 wwn_t pwwn;
497 u16 ioc_type;
498 mac_t mac;
499};
500
501/*
362 * ---------------------- mfg definitions ------------ 502 * ---------------------- mfg definitions ------------
363 */ 503 */
364 504
@@ -520,6 +660,20 @@ struct bfa_boot_bootlun_s {
520/* 660/*
521 * BOOT boot configuraton 661 * BOOT boot configuraton
522 */ 662 */
663struct bfa_boot_cfg_s {
664 u8 version;
665 u8 rsvd1;
666 u16 chksum;
667 u8 enable; /* enable/disable SAN boot */
668 u8 speed; /* boot speed settings */
669 u8 topology; /* boot topology setting */
670 u8 bootopt; /* bfa_boot_bootopt_t */
671 u32 nbluns; /* number of boot luns */
672 u32 rsvd2;
673 struct bfa_boot_bootlun_s blun[BFA_BOOT_BOOTLUN_MAX];
674 struct bfa_boot_bootlun_s blun_disc[BFA_BOOT_BOOTLUN_MAX];
675};
676
523struct bfa_boot_pbc_s { 677struct bfa_boot_pbc_s {
524 u8 enable; /* enable/disable SAN boot */ 678 u8 enable; /* enable/disable SAN boot */
525 u8 speed; /* boot speed settings */ 679 u8 speed; /* boot speed settings */
@@ -529,6 +683,15 @@ struct bfa_boot_pbc_s {
529 struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX]; 683 struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX];
530}; 684};
531 685
686struct bfa_ethboot_cfg_s {
687 u8 version;
688 u8 rsvd1;
689 u16 chksum;
690 u8 enable; /* enable/disable Eth/PXE boot */
691 u8 rsvd2;
692 u16 vlan;
693};
694
532/* 695/*
533 * ASIC block configuration related structures 696 * ASIC block configuration related structures
534 */ 697 */
@@ -587,6 +750,14 @@ struct bfa_ablk_cfg_s {
587 */ 750 */
588#define SFP_DIAGMON_SIZE 10 /* num bytes of diag monitor data */ 751#define SFP_DIAGMON_SIZE 10 /* num bytes of diag monitor data */
589 752
753/* SFP state change notification event */
754#define BFA_SFP_SCN_REMOVED 0
755#define BFA_SFP_SCN_INSERTED 1
756#define BFA_SFP_SCN_POM 2
757#define BFA_SFP_SCN_FAILED 3
758#define BFA_SFP_SCN_UNSUPPORT 4
759#define BFA_SFP_SCN_VALID 5
760
590enum bfa_defs_sfp_media_e { 761enum bfa_defs_sfp_media_e {
591 BFA_SFP_MEDIA_UNKNOWN = 0x00, 762 BFA_SFP_MEDIA_UNKNOWN = 0x00,
592 BFA_SFP_MEDIA_CU = 0x01, 763 BFA_SFP_MEDIA_CU = 0x01,
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 0b97525803fb..863c6ba7d5eb 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -268,6 +268,7 @@ struct bfa_fw_port_snsm_stats_s {
268 u32 error_resets; /* error resets initiated by upsm */ 268 u32 error_resets; /* error resets initiated by upsm */
269 u32 sync_lost; /* Sync loss count */ 269 u32 sync_lost; /* Sync loss count */
270 u32 sig_lost; /* Signal loss count */ 270 u32 sig_lost; /* Signal loss count */
271 u32 asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */
271}; 272};
272 273
273struct bfa_fw_port_physm_stats_s { 274struct bfa_fw_port_physm_stats_s {
@@ -468,6 +469,7 @@ struct bfa_fw_stats_s {
468 * QoS states 469 * QoS states
469 */ 470 */
470enum bfa_qos_state { 471enum bfa_qos_state {
472 BFA_QOS_DISABLED = 0, /* QoS is disabled */
471 BFA_QOS_ONLINE = 1, /* QoS is online */ 473 BFA_QOS_ONLINE = 1, /* QoS is online */
472 BFA_QOS_OFFLINE = 2, /* QoS is offline */ 474 BFA_QOS_OFFLINE = 2, /* QoS is offline */
473}; 475};
@@ -670,6 +672,12 @@ struct bfa_itnim_iostats_s {
670 u32 tm_iocdowns; /* TM cleaned-up due to IOC down */ 672 u32 tm_iocdowns; /* TM cleaned-up due to IOC down */
671 u32 tm_cleanups; /* TM cleanup requests */ 673 u32 tm_cleanups; /* TM cleanup requests */
672 u32 tm_cleanup_comps; /* TM cleanup completions */ 674 u32 tm_cleanup_comps; /* TM cleanup completions */
675 u32 lm_lun_across_sg; /* LM lun is across sg data buf */
676 u32 lm_lun_not_sup; /* LM lun not supported */
677 u32 lm_rpl_data_changed; /* LM report-lun data changed */
678 u32 lm_wire_residue_changed; /* LM report-lun rsp residue changed */
679 u32 lm_small_buf_addresidue; /* LM buf smaller than reported cnt */
680 u32 lm_lun_not_rdy; /* LM lun not ready */
673}; 681};
674 682
675/* Modify char* port_stt[] in bfal_port.c if a new state was added */ 683/* Modify char* port_stt[] in bfal_port.c if a new state was added */
@@ -785,8 +793,51 @@ enum bfa_port_linkstate_rsn {
785 CEE_ISCSI_PRI_PFC_OFF = 42, 793 CEE_ISCSI_PRI_PFC_OFF = 42,
786 CEE_ISCSI_PRI_OVERLAP_FCOE_PRI = 43 794 CEE_ISCSI_PRI_OVERLAP_FCOE_PRI = 43
787}; 795};
796
797#define MAX_LUN_MASK_CFG 16
798
799/*
800 * Initially flash content may be fff. On making LUN mask enable and disable
801 * state chnage. when report lun command is being processed it goes from
802 * BFA_LUN_MASK_ACTIVE to BFA_LUN_MASK_FETCH and comes back to
803 * BFA_LUN_MASK_ACTIVE.
804 */
805enum bfa_ioim_lun_mask_state_s {
806 BFA_IOIM_LUN_MASK_INACTIVE = 0,
807 BFA_IOIM_LUN_MASK_ACTIVE = 1,
808 BFA_IOIM_LUN_MASK_FETCHED = 2,
809};
810
811enum bfa_lunmask_state_s {
812 BFA_LUNMASK_DISABLED = 0x00,
813 BFA_LUNMASK_ENABLED = 0x01,
814 BFA_LUNMASK_MINCFG = 0x02,
815 BFA_LUNMASK_UNINITIALIZED = 0xff,
816};
817
788#pragma pack(1) 818#pragma pack(1)
789/* 819/*
820 * LUN mask configuration
821 */
822struct bfa_lun_mask_s {
823 wwn_t lp_wwn;
824 wwn_t rp_wwn;
825 struct scsi_lun lun;
826 u8 ua;
827 u8 rsvd[3];
828 u16 rp_tag;
829 u8 lp_tag;
830 u8 state;
831};
832
833#define MAX_LUN_MASK_CFG 16
834struct bfa_lunmask_cfg_s {
835 u32 status;
836 u32 rsvd;
837 struct bfa_lun_mask_s lun_list[MAX_LUN_MASK_CFG];
838};
839
840/*
790 * Physical port configuration 841 * Physical port configuration
791 */ 842 */
792struct bfa_port_cfg_s { 843struct bfa_port_cfg_s {
@@ -1228,4 +1279,52 @@ struct bfa_cee_stats_s {
1228 1279
1229#pragma pack() 1280#pragma pack()
1230 1281
1282/*
1283 * AEN related definitions
1284 */
1285#define BFAD_NL_VENDOR_ID (((u64)0x01 << SCSI_NL_VID_TYPE_SHIFT) \
1286 | BFA_PCI_VENDOR_ID_BROCADE)
1287
1288/* BFA remote port events */
1289enum bfa_rport_aen_event {
1290 BFA_RPORT_AEN_ONLINE = 1, /* RPort online event */
1291 BFA_RPORT_AEN_OFFLINE = 2, /* RPort offline event */
1292 BFA_RPORT_AEN_DISCONNECT = 3, /* RPort disconnect event */
1293 BFA_RPORT_AEN_QOS_PRIO = 4, /* QOS priority change event */
1294 BFA_RPORT_AEN_QOS_FLOWID = 5, /* QOS flow Id change event */
1295};
1296
1297struct bfa_rport_aen_data_s {
1298 u16 vf_id; /* vf_id of this logical port */
1299 u16 rsvd[3];
1300 wwn_t ppwwn; /* WWN of its physical port */
1301 wwn_t lpwwn; /* WWN of this logical port */
1302 wwn_t rpwwn; /* WWN of this remote port */
1303 union {
1304 struct bfa_rport_qos_attr_s qos;
1305 } priv;
1306};
1307
1308union bfa_aen_data_u {
1309 struct bfa_adapter_aen_data_s adapter;
1310 struct bfa_port_aen_data_s port;
1311 struct bfa_lport_aen_data_s lport;
1312 struct bfa_rport_aen_data_s rport;
1313 struct bfa_itnim_aen_data_s itnim;
1314 struct bfa_audit_aen_data_s audit;
1315 struct bfa_ioc_aen_data_s ioc;
1316};
1317
1318#define BFA_AEN_MAX_ENTRY 512
1319
1320struct bfa_aen_entry_s {
1321 struct list_head qe;
1322 enum bfa_aen_category aen_category;
1323 u32 aen_type;
1324 union bfa_aen_data_u aen_data;
1325 struct timeval aen_tv;
1326 u32 seq_num;
1327 u32 bfad_num;
1328};
1329
1231#endif /* __BFA_DEFS_SVC_H__ */ 1330#endif /* __BFA_DEFS_SVC_H__ */
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index 8d0b88f67a38..50b6a1c86195 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -56,6 +56,161 @@ struct scsi_cdb_s {
56 56
57#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */ 57#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */
58 58
59#define SCSI_SENSE_CUR_ERR 0x70
60#define SCSI_SENSE_DEF_ERR 0x71
61
62/*
63 * SCSI additional sense codes
64 */
65#define SCSI_ASC_LUN_NOT_READY 0x04
66#define SCSI_ASC_LUN_NOT_SUPPORTED 0x25
67#define SCSI_ASC_TOCC 0x3F
68
69/*
70 * SCSI additional sense code qualifiers
71 */
72#define SCSI_ASCQ_MAN_INTR_REQ 0x03 /* manual intervention req */
73#define SCSI_ASCQ_RL_DATA_CHANGED 0x0E /* report luns data changed */
74
75/*
76 * Methods of reporting informational exceptions
77 */
78#define SCSI_MP_IEC_UNIT_ATTN 0x2 /* generate unit attention */
79
80struct scsi_report_luns_data_s {
81 u32 lun_list_length; /* length of LUN list length */
82 u32 reserved;
83 struct scsi_lun lun[1]; /* first LUN in lun list */
84};
85
86struct scsi_inquiry_vendor_s {
87 u8 vendor_id[8];
88};
89
90struct scsi_inquiry_prodid_s {
91 u8 product_id[16];
92};
93
94struct scsi_inquiry_prodrev_s {
95 u8 product_rev[4];
96};
97
98struct scsi_inquiry_data_s {
99#ifdef __BIG_ENDIAN
100 u8 peripheral_qual:3; /* peripheral qualifier */
101 u8 device_type:5; /* peripheral device type */
102 u8 rmb:1; /* removable medium bit */
103 u8 device_type_mod:7; /* device type modifier */
104 u8 version;
105 u8 aenc:1; /* async evt notification capability */
106 u8 trm_iop:1; /* terminate I/O process */
107 u8 norm_aca:1; /* normal ACA supported */
108 u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
109 u8 rsp_data_format:4;
110 u8 additional_len;
111 u8 sccs:1;
112 u8 reserved1:7;
113 u8 reserved2:1;
114 u8 enc_serv:1; /* enclosure service component */
115 u8 reserved3:1;
116 u8 multi_port:1; /* multi-port device */
117 u8 m_chngr:1; /* device in medium transport element */
118 u8 ack_req_q:1; /* SIP specific bit */
119 u8 addr32:1; /* SIP specific bit */
120 u8 addr16:1; /* SIP specific bit */
121 u8 rel_adr:1; /* relative address */
122 u8 w_bus32:1;
123 u8 w_bus16:1;
124 u8 synchronous:1;
125 u8 linked_commands:1;
126 u8 trans_dis:1;
127 u8 cmd_queue:1; /* command queueing supported */
128 u8 soft_reset:1; /* soft reset alternative (VS) */
129#else
130 u8 device_type:5; /* peripheral device type */
131 u8 peripheral_qual:3; /* peripheral qualifier */
132 u8 device_type_mod:7; /* device type modifier */
133 u8 rmb:1; /* removable medium bit */
134 u8 version;
135 u8 rsp_data_format:4;
136 u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
137 u8 norm_aca:1; /* normal ACA supported */
138 u8 terminate_iop:1;/* terminate I/O process */
139 u8 aenc:1; /* async evt notification capability */
140 u8 additional_len;
141 u8 reserved1:7;
142 u8 sccs:1;
143 u8 addr16:1; /* SIP specific bit */
144 u8 addr32:1; /* SIP specific bit */
145 u8 ack_req_q:1; /* SIP specific bit */
146 u8 m_chngr:1; /* device in medium transport element */
147 u8 multi_port:1; /* multi-port device */
148 u8 reserved3:1; /* TBD - Vendor Specific */
149 u8 enc_serv:1; /* enclosure service component */
150 u8 reserved2:1;
151 u8 soft_seset:1; /* soft reset alternative (VS) */
152 u8 cmd_queue:1; /* command queueing supported */
153 u8 trans_dis:1;
154 u8 linked_commands:1;
155 u8 synchronous:1;
156 u8 w_bus16:1;
157 u8 w_bus32:1;
158 u8 rel_adr:1; /* relative address */
159#endif
160 struct scsi_inquiry_vendor_s vendor_id;
161 struct scsi_inquiry_prodid_s product_id;
162 struct scsi_inquiry_prodrev_s product_rev;
163 u8 vendor_specific[20];
164 u8 reserved4[40];
165};
166
167/*
168 * SCSI sense data format
169 */
170struct scsi_sense_s {
171#ifdef __BIG_ENDIAN
172 u8 valid:1;
173 u8 rsp_code:7;
174#else
175 u8 rsp_code:7;
176 u8 valid:1;
177#endif
178 u8 seg_num;
179#ifdef __BIG_ENDIAN
180 u8 file_mark:1;
181 u8 eom:1; /* end of media */
182 u8 ili:1; /* incorrect length indicator */
183 u8 reserved:1;
184 u8 sense_key:4;
185#else
186 u8 sense_key:4;
187 u8 reserved:1;
188 u8 ili:1; /* incorrect length indicator */
189 u8 eom:1; /* end of media */
190 u8 file_mark:1;
191#endif
192 u8 information[4]; /* device-type or cmd specific info */
193 u8 add_sense_length; /* additional sense length */
194 u8 command_info[4];/* command specific information */
195 u8 asc; /* additional sense code */
196 u8 ascq; /* additional sense code qualifier */
197 u8 fru_code; /* field replaceable unit code */
198#ifdef __BIG_ENDIAN
199 u8 sksv:1; /* sense key specific valid */
200 u8 c_d:1; /* command/data bit */
201 u8 res1:2;
202 u8 bpv:1; /* bit pointer valid */
203 u8 bpointer:3; /* bit pointer */
204#else
205 u8 bpointer:3; /* bit pointer */
206 u8 bpv:1; /* bit pointer valid */
207 u8 res1:2;
208 u8 c_d:1; /* command/data bit */
209 u8 sksv:1; /* sense key specific valid */
210#endif
211 u8 fpointer[2]; /* field pointer */
212};
213
59/* 214/*
60 * Fibre Channel Header Structure (FCHS) definition 215 * Fibre Channel Header Structure (FCHS) definition
61 */ 216 */
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index a4e7951c6063..e07bd4745d8b 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -24,6 +24,9 @@ BFA_TRC_FILE(HAL, FCPIM);
24 * BFA ITNIM Related definitions 24 * BFA ITNIM Related definitions
25 */ 25 */
26static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); 26static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
27static bfa_boolean_t bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim);
28static bfa_boolean_t bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim);
29static void bfa_ioim_lm_init(struct bfa_s *bfa);
27 30
28#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \ 31#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
29 (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1)))) 32 (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
@@ -57,6 +60,14 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
57 } \ 60 } \
58} while (0) 61} while (0)
59 62
63#define bfa_ioim_rp_wwn(__ioim) \
64 (((struct bfa_fcs_rport_s *) \
65 (__ioim)->itnim->rport->rport_drv)->pwwn)
66
67#define bfa_ioim_lp_wwn(__ioim) \
68 ((BFA_LPS_FROM_TAG(BFA_LPS_MOD((__ioim)->bfa), \
69 (__ioim)->itnim->rport->rport_info.lp_tag))->pwwn) \
70
60#define bfa_itnim_sler_cb(__itnim) do { \ 71#define bfa_itnim_sler_cb(__itnim) do { \
61 if ((__itnim)->bfa->fcs) \ 72 if ((__itnim)->bfa->fcs) \
62 bfa_cb_itnim_sler((__itnim)->ditn); \ 73 bfa_cb_itnim_sler((__itnim)->ditn); \
@@ -66,6 +77,18 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
66 } \ 77 } \
67} while (0) 78} while (0)
68 79
80enum bfa_ioim_lm_status {
81 BFA_IOIM_LM_PRESENT = 1,
82 BFA_IOIM_LM_LUN_NOT_SUP = 2,
83 BFA_IOIM_LM_RPL_DATA_CHANGED = 3,
84 BFA_IOIM_LM_LUN_NOT_RDY = 4,
85};
86
87enum bfa_ioim_lm_ua_status {
88 BFA_IOIM_LM_UA_RESET = 0,
89 BFA_IOIM_LM_UA_SET = 1,
90};
91
69/* 92/*
70 * itnim state machine event 93 * itnim state machine event
71 */ 94 */
@@ -122,6 +145,9 @@ enum bfa_ioim_event {
122 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */ 145 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
123 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */ 146 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
124 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */ 147 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
148 BFA_IOIM_SM_LM_LUN_NOT_SUP = 19,/* lunmask lun not supported */
149 BFA_IOIM_SM_LM_RPL_DC = 20, /* lunmask report-lun data changed */
150 BFA_IOIM_SM_LM_LUN_NOT_RDY = 21,/* lunmask lun not ready */
125}; 151};
126 152
127 153
@@ -219,6 +245,9 @@ static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
219static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete); 245static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
220static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete); 246static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
221static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim); 247static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
248static void __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete);
249static void __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete);
250static void __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete);
222 251
223/* 252/*
224 * forward declaration of BFA IO state machine 253 * forward declaration of BFA IO state machine
@@ -416,6 +445,12 @@ bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
416 bfa_fcpim_add_iostats(lstats, rstats, output_reqs); 445 bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
417 bfa_fcpim_add_iostats(lstats, rstats, rd_throughput); 446 bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
418 bfa_fcpim_add_iostats(lstats, rstats, wr_throughput); 447 bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
448 bfa_fcpim_add_iostats(lstats, rstats, lm_lun_across_sg);
449 bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_sup);
450 bfa_fcpim_add_iostats(lstats, rstats, lm_rpl_data_changed);
451 bfa_fcpim_add_iostats(lstats, rstats, lm_wire_residue_changed);
452 bfa_fcpim_add_iostats(lstats, rstats, lm_small_buf_addresidue);
453 bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_rdy);
419} 454}
420 455
421bfa_status_t 456bfa_status_t
@@ -437,6 +472,59 @@ bfa_fcpim_port_iostats(struct bfa_s *bfa,
437 return BFA_STATUS_OK; 472 return BFA_STATUS_OK;
438} 473}
439 474
475void
476bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
477{
478 struct bfa_itnim_latency_s *io_lat =
479 &(ioim->itnim->ioprofile.io_latency);
480 u32 val, idx;
481
482 val = (u32)(jiffies - ioim->start_time);
483 idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio));
484 bfa_itnim_ioprofile_update(ioim->itnim, idx);
485
486 io_lat->count[idx]++;
487 io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val;
488 io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val;
489 io_lat->avg[idx] += val;
490}
491
492void
493bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
494{
495 ioim->start_time = jiffies;
496}
497
498bfa_status_t
499bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
500{
501 struct bfa_itnim_s *itnim;
502 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
503 struct list_head *qe, *qen;
504
505 /* accumulate IO stats from itnim */
506 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
507 itnim = (struct bfa_itnim_s *) qe;
508 bfa_itnim_clear_stats(itnim);
509 }
510 fcpim->io_profile = BFA_TRUE;
511 fcpim->io_profile_start_time = time;
512 fcpim->profile_comp = bfa_ioim_profile_comp;
513 fcpim->profile_start = bfa_ioim_profile_start;
514 return BFA_STATUS_OK;
515}
516
517bfa_status_t
518bfa_fcpim_profile_off(struct bfa_s *bfa)
519{
520 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
521 fcpim->io_profile = BFA_FALSE;
522 fcpim->io_profile_start_time = 0;
523 fcpim->profile_comp = NULL;
524 fcpim->profile_start = NULL;
525 return BFA_STATUS_OK;
526}
527
440u16 528u16
441bfa_fcpim_qdepth_get(struct bfa_s *bfa) 529bfa_fcpim_qdepth_get(struct bfa_s *bfa)
442{ 530{
@@ -1401,6 +1489,26 @@ bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1401 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable)); 1489 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1402} 1490}
1403 1491
1492#define bfa_io_lat_clock_res_div HZ
1493#define bfa_io_lat_clock_res_mul 1000
1494bfa_status_t
1495bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
1496 struct bfa_itnim_ioprofile_s *ioprofile)
1497{
1498 struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
1499 if (!fcpim->io_profile)
1500 return BFA_STATUS_IOPROFILE_OFF;
1501
1502 itnim->ioprofile.index = BFA_IOBUCKET_MAX;
1503 itnim->ioprofile.io_profile_start_time =
1504 bfa_io_profile_start_time(itnim->bfa);
1505 itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
1506 itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
1507 *ioprofile = itnim->ioprofile;
1508
1509 return BFA_STATUS_OK;
1510}
1511
1404void 1512void
1405bfa_itnim_clear_stats(struct bfa_itnim_s *itnim) 1513bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1406{ 1514{
@@ -1469,7 +1577,28 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1469 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1577 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1470 WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)); 1578 WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1471 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, 1579 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1472 __bfa_cb_ioim_abort, ioim); 1580 __bfa_cb_ioim_abort, ioim);
1581 break;
1582
1583 case BFA_IOIM_SM_LM_LUN_NOT_SUP:
1584 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1585 bfa_ioim_move_to_comp_q(ioim);
1586 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1587 __bfa_cb_ioim_lm_lun_not_sup, ioim);
1588 break;
1589
1590 case BFA_IOIM_SM_LM_RPL_DC:
1591 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1592 bfa_ioim_move_to_comp_q(ioim);
1593 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1594 __bfa_cb_ioim_lm_rpl_dc, ioim);
1595 break;
1596
1597 case BFA_IOIM_SM_LM_LUN_NOT_RDY:
1598 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1599 bfa_ioim_move_to_comp_q(ioim);
1600 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1601 __bfa_cb_ioim_lm_lun_not_rdy, ioim);
1473 break; 1602 break;
1474 1603
1475 default: 1604 default:
@@ -2009,6 +2138,264 @@ bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2009 } 2138 }
2010} 2139}
2011 2140
2141/*
2142 * This is called from bfa_fcpim_start after the bfa_init() with flash read
2143 * is complete by driver. now invalidate the stale content of lun mask
2144 * like unit attention, rp tag and lp tag.
2145 */
2146static void
2147bfa_ioim_lm_init(struct bfa_s *bfa)
2148{
2149 struct bfa_lun_mask_s *lunm_list;
2150 int i;
2151
2152 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2153 return;
2154
2155 lunm_list = bfa_get_lun_mask_list(bfa);
2156 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2157 lunm_list[i].ua = BFA_IOIM_LM_UA_RESET;
2158 lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
2159 lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
2160 }
2161}
2162
2163/*
2164 * Validate LUN for LUN masking
2165 */
2166static enum bfa_ioim_lm_status
2167bfa_ioim_lm_check(struct bfa_ioim_s *ioim, struct bfa_lps_s *lps,
2168 struct bfa_rport_s *rp, struct scsi_lun lun)
2169{
2170 u8 i;
2171 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
2172 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
2173 struct scsi_cdb_s *cdb = (struct scsi_cdb_s *)cmnd->cmnd;
2174
2175 if ((cdb->scsi_cdb[0] == REPORT_LUNS) &&
2176 (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
2177 ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
2178 return BFA_IOIM_LM_PRESENT;
2179 }
2180
2181 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2182
2183 if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2184 continue;
2185
2186 if ((scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) ==
2187 scsilun_to_int((struct scsi_lun *)&lun))
2188 && (rp->rport_tag == lun_list[i].rp_tag)
2189 && ((u8)ioim->itnim->rport->rport_info.lp_tag ==
2190 lun_list[i].lp_tag)) {
2191 bfa_trc(ioim->bfa, lun_list[i].rp_tag);
2192 bfa_trc(ioim->bfa, lun_list[i].lp_tag);
2193 bfa_trc(ioim->bfa, scsilun_to_int(
2194 (struct scsi_lun *)&lun_list[i].lun));
2195
2196 if ((lun_list[i].ua == BFA_IOIM_LM_UA_SET) &&
2197 ((cdb->scsi_cdb[0] != INQUIRY) ||
2198 (cdb->scsi_cdb[0] != REPORT_LUNS))) {
2199 lun_list[i].ua = BFA_IOIM_LM_UA_RESET;
2200 return BFA_IOIM_LM_RPL_DATA_CHANGED;
2201 }
2202
2203 if (cdb->scsi_cdb[0] == REPORT_LUNS)
2204 ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
2205
2206 return BFA_IOIM_LM_PRESENT;
2207 }
2208 }
2209
2210 if ((cdb->scsi_cdb[0] == INQUIRY) &&
2211 (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
2212 ioim->proc_rsp_data = bfa_ioim_lm_proc_inq_data;
2213 return BFA_IOIM_LM_PRESENT;
2214 }
2215
2216 if (cdb->scsi_cdb[0] == TEST_UNIT_READY)
2217 return BFA_IOIM_LM_LUN_NOT_RDY;
2218
2219 return BFA_IOIM_LM_LUN_NOT_SUP;
2220}
2221
2222static bfa_boolean_t
2223bfa_ioim_lm_proc_rsp_data_dummy(struct bfa_ioim_s *ioim)
2224{
2225 return BFA_TRUE;
2226}
2227
2228static void
2229bfa_ioim_lm_fetch_lun(struct bfa_ioim_s *ioim, u8 *rl_data, int offset,
2230 int buf_lun_cnt)
2231{
2232 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
2233 struct scsi_lun *lun_data = (struct scsi_lun *)(rl_data + offset);
2234 struct scsi_lun lun;
2235 int i, j;
2236
2237 bfa_trc(ioim->bfa, buf_lun_cnt);
2238 for (j = 0; j < buf_lun_cnt; j++) {
2239 lun = *((struct scsi_lun *)(lun_data + j));
2240 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2241 if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2242 continue;
2243 if ((lun_list[i].rp_wwn == bfa_ioim_rp_wwn(ioim)) &&
2244 (lun_list[i].lp_wwn == bfa_ioim_lp_wwn(ioim)) &&
2245 (scsilun_to_int((struct scsi_lun *)&lun_list[i].lun)
2246 == scsilun_to_int((struct scsi_lun *)&lun))) {
2247 lun_list[i].state = BFA_IOIM_LUN_MASK_FETCHED;
2248 break;
2249 }
2250 } /* next lun in mask DB */
2251 } /* next lun in buf */
2252}
2253
2254static int
2255bfa_ioim_lm_update_lun_sg(struct bfa_ioim_s *ioim, u32 *pgdlen,
2256 struct scsi_report_luns_data_s *rl)
2257{
2258 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
2259 struct scatterlist *sg = scsi_sglist(cmnd);
2260 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
2261 struct scsi_lun *prev_rl_data = NULL, *base_rl_data;
2262 int i, j, sgeid, lun_fetched_cnt = 0, prev_sg_len = 0, base_count;
2263 int lun_across_sg_bytes, bytes_from_next_buf;
2264 u64 last_lun, temp_last_lun;
2265
2266 /* fetch luns from the first sg element */
2267 bfa_ioim_lm_fetch_lun(ioim, (u8 *)(rl->lun), 0,
2268 (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1);
2269
2270 /* fetch luns from multiple sg elements */
2271 scsi_for_each_sg(cmnd, sg, scsi_sg_count(cmnd), sgeid) {
2272 if (sgeid == 0) {
2273 prev_sg_len = sg_dma_len(sg);
2274 prev_rl_data = (struct scsi_lun *)
2275 phys_to_virt(sg_dma_address(sg));
2276 continue;
2277 }
2278
2279 /* if the buf is having more data */
2280 lun_across_sg_bytes = prev_sg_len % sizeof(struct scsi_lun);
2281 if (lun_across_sg_bytes) {
2282 bfa_trc(ioim->bfa, lun_across_sg_bytes);
2283 bfa_stats(ioim->itnim, lm_lun_across_sg);
2284 bytes_from_next_buf = sizeof(struct scsi_lun) -
2285 lun_across_sg_bytes;
2286
2287 /* from next buf take higher bytes */
2288 temp_last_lun = *((u64 *)
2289 phys_to_virt(sg_dma_address(sg)));
2290 last_lun |= temp_last_lun >>
2291 (lun_across_sg_bytes * BITS_PER_BYTE);
2292
2293 /* from prev buf take higher bytes */
2294 temp_last_lun = *((u64 *)(prev_rl_data +
2295 (prev_sg_len - lun_across_sg_bytes)));
2296 temp_last_lun >>= bytes_from_next_buf * BITS_PER_BYTE;
2297 last_lun = last_lun | (temp_last_lun <<
2298 (bytes_from_next_buf * BITS_PER_BYTE));
2299
2300 bfa_ioim_lm_fetch_lun(ioim, (u8 *)&last_lun, 0, 1);
2301 } else
2302 bytes_from_next_buf = 0;
2303
2304 *pgdlen += sg_dma_len(sg);
2305 prev_sg_len = sg_dma_len(sg);
2306 prev_rl_data = (struct scsi_lun *)
2307 phys_to_virt(sg_dma_address(sg));
2308 bfa_ioim_lm_fetch_lun(ioim, (u8 *)prev_rl_data,
2309 bytes_from_next_buf,
2310 sg_dma_len(sg) / sizeof(struct scsi_lun));
2311 }
2312
2313 /* update the report luns data - based on fetched luns */
2314 sg = scsi_sglist(cmnd);
2315 base_rl_data = (struct scsi_lun *)rl->lun;
2316 base_count = (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1;
2317 for (i = 0, j = 0; i < MAX_LUN_MASK_CFG; i++) {
2318 if (lun_list[i].state == BFA_IOIM_LUN_MASK_FETCHED) {
2319 base_rl_data[j] = lun_list[i].lun;
2320 lun_list[i].state = BFA_IOIM_LUN_MASK_ACTIVE;
2321 j++;
2322 lun_fetched_cnt++;
2323 }
2324
2325 if (j > base_count) {
2326 j = 0;
2327 sg = sg_next(sg);
2328 base_rl_data = (struct scsi_lun *)
2329 phys_to_virt(sg_dma_address(sg));
2330 base_count = sg_dma_len(sg) / sizeof(struct scsi_lun);
2331 }
2332 }
2333
2334 bfa_trc(ioim->bfa, lun_fetched_cnt);
2335 return lun_fetched_cnt;
2336}
2337
2338static bfa_boolean_t
2339bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim)
2340{
2341 struct scsi_inquiry_data_s *inq;
2342 struct scatterlist *sg = scsi_sglist((struct scsi_cmnd *)ioim->dio);
2343
2344 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2345 inq = (struct scsi_inquiry_data_s *)phys_to_virt(sg_dma_address(sg));
2346
2347 bfa_trc(ioim->bfa, inq->device_type);
2348 inq->peripheral_qual = SCSI_INQ_PQ_NOT_CON;
2349 return 0;
2350}
2351
2352static bfa_boolean_t
2353bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim)
2354{
2355 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
2356 struct scatterlist *sg = scsi_sglist(cmnd);
2357 struct bfi_ioim_rsp_s *m;
2358 struct scsi_report_luns_data_s *rl = NULL;
2359 int lun_count = 0, lun_fetched_cnt = 0;
2360 u32 residue, pgdlen = 0;
2361
2362 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2363 if (bfa_get_lun_mask_status(ioim->bfa) != BFA_LUNMASK_ENABLED)
2364 return BFA_TRUE;
2365
2366 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2367 if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION)
2368 return BFA_TRUE;
2369
2370 pgdlen = sg_dma_len(sg);
2371 bfa_trc(ioim->bfa, pgdlen);
2372 rl = (struct scsi_report_luns_data_s *)phys_to_virt(sg_dma_address(sg));
2373 lun_count = cpu_to_be32(rl->lun_list_length) / sizeof(struct scsi_lun);
2374 lun_fetched_cnt = bfa_ioim_lm_update_lun_sg(ioim, &pgdlen, rl);
2375
2376 if (lun_count == lun_fetched_cnt)
2377 return BFA_TRUE;
2378
2379 bfa_trc(ioim->bfa, lun_count);
2380 bfa_trc(ioim->bfa, lun_fetched_cnt);
2381 bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
2382
2383 if (be32_to_cpu(rl->lun_list_length) <= pgdlen)
2384 rl->lun_list_length = be32_to_cpu(lun_fetched_cnt) *
2385 sizeof(struct scsi_lun);
2386 else
2387 bfa_stats(ioim->itnim, lm_small_buf_addresidue);
2388
2389 bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
2390 bfa_trc(ioim->bfa, be32_to_cpu(m->residue));
2391
2392 residue = be32_to_cpu(m->residue);
2393 residue += (lun_count - lun_fetched_cnt) * sizeof(struct scsi_lun);
2394 bfa_stats(ioim->itnim, lm_wire_residue_changed);
2395 m->residue = be32_to_cpu(residue);
2396 bfa_trc(ioim->bfa, ioim->nsges);
2397 return BFA_FALSE;
2398}
2012 2399
2013static void 2400static void
2014__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete) 2401__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
@@ -2068,6 +2455,299 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2068} 2455}
2069 2456
2070static void 2457static void
2458__bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete)
2459{
2460 struct bfa_ioim_s *ioim = cbarg;
2461 int sns_len = 0xD;
2462 u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
2463 struct scsi_sense_s *snsinfo;
2464
2465 if (!complete) {
2466 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2467 return;
2468 }
2469
2470 snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
2471 ioim->fcpim->fcp, ioim->iotag);
2472 snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
2473 snsinfo->add_sense_length = 0xa;
2474 snsinfo->asc = SCSI_ASC_LUN_NOT_SUPPORTED;
2475 snsinfo->sense_key = ILLEGAL_REQUEST;
2476 bfa_trc(ioim->bfa, residue);
2477 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
2478 SCSI_STATUS_CHECK_CONDITION, sns_len,
2479 (u8 *)snsinfo, residue);
2480}
2481
2482static void
2483__bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete)
2484{
2485 struct bfa_ioim_s *ioim = cbarg;
2486 int sns_len = 0xD;
2487 u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
2488 struct scsi_sense_s *snsinfo;
2489
2490 if (!complete) {
2491 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2492 return;
2493 }
2494
2495 snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
2496 ioim->iotag);
2497 snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
2498 snsinfo->sense_key = SCSI_MP_IEC_UNIT_ATTN;
2499 snsinfo->asc = SCSI_ASC_TOCC;
2500 snsinfo->add_sense_length = 0x6;
2501 snsinfo->ascq = SCSI_ASCQ_RL_DATA_CHANGED;
2502 bfa_trc(ioim->bfa, residue);
2503 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
2504 SCSI_STATUS_CHECK_CONDITION, sns_len,
2505 (u8 *)snsinfo, residue);
2506}
2507
2508static void
2509__bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete)
2510{
2511 struct bfa_ioim_s *ioim = cbarg;
2512 int sns_len = 0xD;
2513 u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
2514 struct scsi_sense_s *snsinfo;
2515
2516 if (!complete) {
2517 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2518 return;
2519 }
2520
2521 snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
2522 ioim->fcpim->fcp, ioim->iotag);
2523 snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
2524 snsinfo->add_sense_length = 0xa;
2525 snsinfo->sense_key = NOT_READY;
2526 snsinfo->asc = SCSI_ASC_LUN_NOT_READY;
2527 snsinfo->ascq = SCSI_ASCQ_MAN_INTR_REQ;
2528 bfa_trc(ioim->bfa, residue);
2529 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
2530 SCSI_STATUS_CHECK_CONDITION, sns_len,
2531 (u8 *)snsinfo, residue);
2532}
2533
2534void
2535bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
2536 u16 rp_tag, u8 lp_tag)
2537{
2538 struct bfa_lun_mask_s *lun_list;
2539 u8 i;
2540
2541 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2542 return;
2543
2544 lun_list = bfa_get_lun_mask_list(bfa);
2545 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2546 if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
2547 if ((lun_list[i].lp_wwn == lp_wwn) &&
2548 (lun_list[i].rp_wwn == rp_wwn)) {
2549 lun_list[i].rp_tag = rp_tag;
2550 lun_list[i].lp_tag = lp_tag;
2551 }
2552 }
2553 }
2554}
2555
2556/*
2557 * set UA for all active luns in LM DB
2558 */
2559static void
2560bfa_ioim_lm_set_ua(struct bfa_s *bfa)
2561{
2562 struct bfa_lun_mask_s *lunm_list;
2563 int i;
2564
2565 lunm_list = bfa_get_lun_mask_list(bfa);
2566 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2567 if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2568 continue;
2569 lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2570 }
2571}
2572
2573bfa_status_t
2574bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update)
2575{
2576 struct bfa_lunmask_cfg_s *lun_mask;
2577
2578 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2579 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2580 return BFA_STATUS_FAILED;
2581
2582 if (bfa_get_lun_mask_status(bfa) == update)
2583 return BFA_STATUS_NO_CHANGE;
2584
2585 lun_mask = bfa_get_lun_mask(bfa);
2586 lun_mask->status = update;
2587
2588 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED)
2589 bfa_ioim_lm_set_ua(bfa);
2590
2591 return bfa_dconf_update(bfa);
2592}
2593
2594bfa_status_t
2595bfa_fcpim_lunmask_clear(struct bfa_s *bfa)
2596{
2597 int i;
2598 struct bfa_lun_mask_s *lunm_list;
2599
2600 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2601 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2602 return BFA_STATUS_FAILED;
2603
2604 lunm_list = bfa_get_lun_mask_list(bfa);
2605 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2606 if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
2607 if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID)
2608 bfa_rport_unset_lunmask(bfa,
2609 BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag));
2610 }
2611 }
2612
2613 memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG);
2614 return bfa_dconf_update(bfa);
2615}
2616
2617bfa_status_t
2618bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf)
2619{
2620 struct bfa_lunmask_cfg_s *lun_mask;
2621
2622 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2623 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2624 return BFA_STATUS_FAILED;
2625
2626 lun_mask = bfa_get_lun_mask(bfa);
2627 memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s));
2628 return BFA_STATUS_OK;
2629}
2630
2631bfa_status_t
2632bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2633 wwn_t rpwwn, struct scsi_lun lun)
2634{
2635 struct bfa_lun_mask_s *lunm_list;
2636 struct bfa_rport_s *rp = NULL;
2637 int i, free_index = MAX_LUN_MASK_CFG + 1;
2638 struct bfa_fcs_lport_s *port = NULL;
2639 struct bfa_fcs_rport_s *rp_fcs;
2640
2641 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2642 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2643 return BFA_STATUS_FAILED;
2644
2645 port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs,
2646 vf_id, *pwwn);
2647 if (port) {
2648 *pwwn = port->port_cfg.pwwn;
2649 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2650 rp = rp_fcs->bfa_rport;
2651 }
2652
2653 lunm_list = bfa_get_lun_mask_list(bfa);
2654 /* if entry exists */
2655 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2656 if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2657 free_index = i;
2658 if ((lunm_list[i].lp_wwn == *pwwn) &&
2659 (lunm_list[i].rp_wwn == rpwwn) &&
2660 (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
2661 scsilun_to_int((struct scsi_lun *)&lun)))
2662 return BFA_STATUS_ENTRY_EXISTS;
2663 }
2664
2665 if (free_index > MAX_LUN_MASK_CFG)
2666 return BFA_STATUS_MAX_ENTRY_REACHED;
2667
2668 if (rp) {
2669 lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa,
2670 rp->rport_info.local_pid);
2671 lunm_list[free_index].rp_tag = rp->rport_tag;
2672 } else {
2673 lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID;
2674 lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID;
2675 }
2676
2677 lunm_list[free_index].lp_wwn = *pwwn;
2678 lunm_list[free_index].rp_wwn = rpwwn;
2679 lunm_list[free_index].lun = lun;
2680 lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE;
2681
2682 /* set for all luns in this rp */
2683 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2684 if ((lunm_list[i].lp_wwn == *pwwn) &&
2685 (lunm_list[i].rp_wwn == rpwwn))
2686 lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2687 }
2688
2689 return bfa_dconf_update(bfa);
2690}
2691
2692bfa_status_t
2693bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2694 wwn_t rpwwn, struct scsi_lun lun)
2695{
2696 struct bfa_lun_mask_s *lunm_list;
2697 struct bfa_rport_s *rp = NULL;
2698 struct bfa_fcs_lport_s *port = NULL;
2699 struct bfa_fcs_rport_s *rp_fcs;
2700 int i;
2701
2702 /* in min cfg lunm_list could be NULL but no commands should run. */
2703 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2704 return BFA_STATUS_FAILED;
2705
2706 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2707 bfa_trc(bfa, *pwwn);
2708 bfa_trc(bfa, rpwwn);
2709 bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun));
2710
2711 if (*pwwn == 0) {
2712 port = bfa_fcs_lookup_port(
2713 &((struct bfad_s *)bfa->bfad)->bfa_fcs,
2714 vf_id, *pwwn);
2715 if (port) {
2716 *pwwn = port->port_cfg.pwwn;
2717 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2718 rp = rp_fcs->bfa_rport;
2719 }
2720 }
2721
2722 lunm_list = bfa_get_lun_mask_list(bfa);
2723 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2724 if ((lunm_list[i].lp_wwn == *pwwn) &&
2725 (lunm_list[i].rp_wwn == rpwwn) &&
2726 (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
2727 scsilun_to_int((struct scsi_lun *)&lun))) {
2728 lunm_list[i].lp_wwn = 0;
2729 lunm_list[i].rp_wwn = 0;
2730 int_to_scsilun(0, &lunm_list[i].lun);
2731 lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE;
2732 if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) {
2733 lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
2734 lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
2735 }
2736 return bfa_dconf_update(bfa);
2737 }
2738 }
2739
2740 /* set for all luns in this rp */
2741 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2742 if ((lunm_list[i].lp_wwn == *pwwn) &&
2743 (lunm_list[i].rp_wwn == rpwwn))
2744 lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2745 }
2746
2747 return BFA_STATUS_ENTRY_NOT_EXISTS;
2748}
2749
2750static void
2071__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete) 2751__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2072{ 2752{
2073 struct bfa_ioim_s *ioim = cbarg; 2753 struct bfa_ioim_s *ioim = cbarg;
@@ -2077,6 +2757,7 @@ __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2077 return; 2757 return;
2078 } 2758 }
2079 2759
2760 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2080 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED, 2761 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2081 0, 0, NULL, 0); 2762 0, 0, NULL, 0);
2082} 2763}
@@ -2092,6 +2773,7 @@ __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2092 return; 2773 return;
2093 } 2774 }
2094 2775
2776 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2095 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV, 2777 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2096 0, 0, NULL, 0); 2778 0, 0, NULL, 0);
2097} 2779}
@@ -2106,6 +2788,7 @@ __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2106 return; 2788 return;
2107 } 2789 }
2108 2790
2791 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2109 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio); 2792 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2110} 2793}
2111 2794
@@ -2449,6 +3132,7 @@ bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
2449 ioim->bfa = fcpim->bfa; 3132 ioim->bfa = fcpim->bfa;
2450 ioim->fcpim = fcpim; 3133 ioim->fcpim = fcpim;
2451 ioim->iosp = iosp; 3134 ioim->iosp = iosp;
3135 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2452 INIT_LIST_HEAD(&ioim->sgpg_q); 3136 INIT_LIST_HEAD(&ioim->sgpg_q);
2453 bfa_reqq_winit(&ioim->iosp->reqq_wait, 3137 bfa_reqq_winit(&ioim->iosp->reqq_wait,
2454 bfa_ioim_qresume, ioim); 3138 bfa_ioim_qresume, ioim);
@@ -2486,6 +3170,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2486 evt = BFA_IOIM_SM_DONE; 3170 evt = BFA_IOIM_SM_DONE;
2487 else 3171 else
2488 evt = BFA_IOIM_SM_COMP; 3172 evt = BFA_IOIM_SM_COMP;
3173 ioim->proc_rsp_data(ioim);
2489 break; 3174 break;
2490 3175
2491 case BFI_IOIM_STS_TIMEDOUT: 3176 case BFI_IOIM_STS_TIMEDOUT:
@@ -2521,6 +3206,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2521 if (rsp->abort_tag != ioim->abort_tag) { 3206 if (rsp->abort_tag != ioim->abort_tag) {
2522 bfa_trc(ioim->bfa, rsp->abort_tag); 3207 bfa_trc(ioim->bfa, rsp->abort_tag);
2523 bfa_trc(ioim->bfa, ioim->abort_tag); 3208 bfa_trc(ioim->bfa, ioim->abort_tag);
3209 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2524 return; 3210 return;
2525 } 3211 }
2526 3212
@@ -2539,6 +3225,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2539 WARN_ON(1); 3225 WARN_ON(1);
2540 } 3226 }
2541 3227
3228 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2542 bfa_sm_send_event(ioim, evt); 3229 bfa_sm_send_event(ioim, evt);
2543} 3230}
2544 3231
@@ -2556,7 +3243,16 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2556 WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag); 3243 WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
2557 3244
2558 bfa_ioim_cb_profile_comp(fcpim, ioim); 3245 bfa_ioim_cb_profile_comp(fcpim, ioim);
2559 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD); 3246
3247 if (bfa_get_lun_mask_status(bfa) != BFA_LUNMASK_ENABLED) {
3248 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
3249 return;
3250 }
3251
3252 if (ioim->proc_rsp_data(ioim) == BFA_TRUE)
3253 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
3254 else
3255 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP);
2560} 3256}
2561 3257
2562/* 3258/*
@@ -2668,6 +3364,35 @@ bfa_ioim_free(struct bfa_ioim_s *ioim)
2668void 3364void
2669bfa_ioim_start(struct bfa_ioim_s *ioim) 3365bfa_ioim_start(struct bfa_ioim_s *ioim)
2670{ 3366{
3367 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
3368 struct bfa_lps_s *lps;
3369 enum bfa_ioim_lm_status status;
3370 struct scsi_lun scsilun;
3371
3372 if (bfa_get_lun_mask_status(ioim->bfa) == BFA_LUNMASK_ENABLED) {
3373 lps = BFA_IOIM_TO_LPS(ioim);
3374 int_to_scsilun(cmnd->device->lun, &scsilun);
3375 status = bfa_ioim_lm_check(ioim, lps,
3376 ioim->itnim->rport, scsilun);
3377 if (status == BFA_IOIM_LM_LUN_NOT_RDY) {
3378 bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_RDY);
3379 bfa_stats(ioim->itnim, lm_lun_not_rdy);
3380 return;
3381 }
3382
3383 if (status == BFA_IOIM_LM_LUN_NOT_SUP) {
3384 bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_SUP);
3385 bfa_stats(ioim->itnim, lm_lun_not_sup);
3386 return;
3387 }
3388
3389 if (status == BFA_IOIM_LM_RPL_DATA_CHANGED) {
3390 bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_RPL_DC);
3391 bfa_stats(ioim->itnim, lm_rpl_data_changed);
3392 return;
3393 }
3394 }
3395
2671 bfa_ioim_cb_profile_start(ioim->fcpim, ioim); 3396 bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2672 3397
2673 /* 3398 /*
@@ -3411,6 +4136,13 @@ bfa_fcp_detach(struct bfa_s *bfa)
3411static void 4136static void
3412bfa_fcp_start(struct bfa_s *bfa) 4137bfa_fcp_start(struct bfa_s *bfa)
3413{ 4138{
4139 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
4140
4141 /*
4142 * bfa_init() with flash read is complete. now invalidate the stale
4143 * content of lun mask like unit attention, rp tag and lp tag.
4144 */
4145 bfa_ioim_lm_init(fcp->bfa);
3414} 4146}
3415 4147
3416static void 4148static void
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index 57b695ad4ee5..1080bcb81cb7 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -79,14 +79,22 @@ bfa_ioim_get_index(u32 n) {
79 if (n >= (1UL)<<22) 79 if (n >= (1UL)<<22)
80 return BFA_IOBUCKET_MAX - 1; 80 return BFA_IOBUCKET_MAX - 1;
81 n >>= 8; 81 n >>= 8;
82 if (n >= (1UL)<<16) 82 if (n >= (1UL)<<16) {
83 n >>= 16; pos += 16; 83 n >>= 16;
84 if (n >= 1 << 8) 84 pos += 16;
85 n >>= 8; pos += 8; 85 }
86 if (n >= 1 << 4) 86 if (n >= 1 << 8) {
87 n >>= 4; pos += 4; 87 n >>= 8;
88 if (n >= 1 << 2) 88 pos += 8;
89 n >>= 2; pos += 2; 89 }
90 if (n >= 1 << 4) {
91 n >>= 4;
92 pos += 4;
93 }
94 if (n >= 1 << 2) {
95 n >>= 2;
96 pos += 2;
97 }
90 if (n >= 1 << 1) 98 if (n >= 1 << 1)
91 pos += 1; 99 pos += 1;
92 100
@@ -102,6 +110,7 @@ struct bfad_ioim_s;
102struct bfad_tskim_s; 110struct bfad_tskim_s;
103 111
104typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim); 112typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
113typedef bfa_boolean_t (*bfa_ioim_lm_proc_rsp_data_t) (struct bfa_ioim_s *ioim);
105 114
106struct bfa_fcpim_s { 115struct bfa_fcpim_s {
107 struct bfa_s *bfa; 116 struct bfa_s *bfa;
@@ -115,7 +124,7 @@ struct bfa_fcpim_s {
115 u32 path_tov; 124 u32 path_tov;
116 u16 q_depth; 125 u16 q_depth;
117 u8 reqq; /* Request queue to be used */ 126 u8 reqq; /* Request queue to be used */
118 u8 rsvd; 127 u8 lun_masking_pending;
119 struct list_head itnim_q; /* queue of active itnim */ 128 struct list_head itnim_q; /* queue of active itnim */
120 struct list_head ioim_resfree_q; /* IOs waiting for f/w */ 129 struct list_head ioim_resfree_q; /* IOs waiting for f/w */
121 struct list_head ioim_comp_q; /* IO global comp Q */ 130 struct list_head ioim_comp_q; /* IO global comp Q */
@@ -170,7 +179,9 @@ struct bfa_ioim_s {
170 bfa_cb_cbfn_t io_cbfn; /* IO completion handler */ 179 bfa_cb_cbfn_t io_cbfn; /* IO completion handler */
171 struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */ 180 struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */
172 u8 reqq; /* Request queue for I/O */ 181 u8 reqq; /* Request queue for I/O */
182 u8 mode; /* IO is passthrough or not */
173 u64 start_time; /* IO's Profile start val */ 183 u64 start_time; /* IO's Profile start val */
184 bfa_ioim_lm_proc_rsp_data_t proc_rsp_data; /* RSP data adjust */
174}; 185};
175 186
176struct bfa_ioim_sp_s { 187struct bfa_ioim_sp_s {
@@ -250,6 +261,10 @@ struct bfa_itnim_s {
250 (__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \ 261 (__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \
251} while (0) 262} while (0)
252 263
264#define BFA_IOIM_TO_LPS(__ioim) \
265 BFA_LPS_FROM_TAG(BFA_LPS_MOD(__ioim->bfa), \
266 __ioim->itnim->rport->rport_info.lp_tag)
267
253static inline bfa_boolean_t 268static inline bfa_boolean_t
254bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim) 269bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
255{ 270{
@@ -297,6 +312,8 @@ bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
297 struct bfa_itnim_iostats_s *stats, u8 lp_tag); 312 struct bfa_itnim_iostats_s *stats, u8 lp_tag);
298void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats, 313void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
299 struct bfa_itnim_iostats_s *itnim_stats); 314 struct bfa_itnim_iostats_s *itnim_stats);
315bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time);
316bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
300 317
301#define bfa_fcpim_ioredirect_enabled(__bfa) \ 318#define bfa_fcpim_ioredirect_enabled(__bfa) \
302 (((struct bfa_fcpim_s *)(BFA_FCPIM(__bfa)))->ioredirect) 319 (((struct bfa_fcpim_s *)(BFA_FCPIM(__bfa)))->ioredirect)
@@ -397,4 +414,14 @@ void bfa_tskim_start(struct bfa_tskim_s *tskim,
397void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk, 414void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
398 enum bfi_tskim_status tsk_status); 415 enum bfi_tskim_status tsk_status);
399 416
417void bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn,
418 wwn_t rp_wwn, u16 rp_tag, u8 lp_tag);
419bfa_status_t bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 on_off);
420bfa_status_t bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf);
421bfa_status_t bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id,
422 wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun);
423bfa_status_t bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id,
424 wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun);
425bfa_status_t bfa_fcpim_lunmask_clear(struct bfa_s *bfa);
426
400#endif /* __BFA_FCPIM_H__ */ 427#endif /* __BFA_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index a9b22bc48bc3..eaac57e1ddec 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -20,6 +20,7 @@
20 */ 20 */
21 21
22#include "bfad_drv.h" 22#include "bfad_drv.h"
23#include "bfad_im.h"
23#include "bfa_fcs.h" 24#include "bfa_fcs.h"
24#include "bfa_fcbuild.h" 25#include "bfa_fcbuild.h"
25 26
@@ -1327,6 +1328,29 @@ bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1327 bfa_trc(fabric->fcs, status); 1328 bfa_trc(fabric->fcs, status);
1328} 1329}
1329 1330
1331
1332/*
1333 * Send AEN notification
1334 */
1335static void
1336bfa_fcs_fabric_aen_post(struct bfa_fcs_lport_s *port,
1337 enum bfa_port_aen_event event)
1338{
1339 struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
1340 struct bfa_aen_entry_s *aen_entry;
1341
1342 bfad_get_aen_entry(bfad, aen_entry);
1343 if (!aen_entry)
1344 return;
1345
1346 aen_entry->aen_data.port.pwwn = bfa_fcs_lport_get_pwwn(port);
1347 aen_entry->aen_data.port.fwwn = bfa_fcs_lport_get_fabric_name(port);
1348
1349 /* Send the AEN notification */
1350 bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
1351 BFA_AEN_CAT_PORT, event);
1352}
1353
1330/* 1354/*
1331 * 1355 *
1332 * @param[in] fabric - fabric 1356 * @param[in] fabric - fabric
@@ -1358,6 +1382,8 @@ bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
1358 BFA_LOG(KERN_WARNING, bfad, bfa_log_level, 1382 BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
1359 "Base port WWN = %s Fabric WWN = %s\n", 1383 "Base port WWN = %s Fabric WWN = %s\n",
1360 pwwn_ptr, fwwn_ptr); 1384 pwwn_ptr, fwwn_ptr);
1385 bfa_fcs_fabric_aen_post(&fabric->bport,
1386 BFA_PORT_AEN_FABRIC_NAME_CHANGE);
1361 } 1387 }
1362} 1388}
1363 1389
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index a5f1faf335a7..e75e07d25915 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -675,6 +675,7 @@ struct bfa_fcs_s {
675 struct bfa_fcs_fabric_s fabric; /* base fabric state machine */ 675 struct bfa_fcs_fabric_s fabric; /* base fabric state machine */
676 struct bfa_fcs_stats_s stats; /* FCS statistics */ 676 struct bfa_fcs_stats_s stats; /* FCS statistics */
677 struct bfa_wc_s wc; /* waiting counter */ 677 struct bfa_wc_s wc; /* waiting counter */
678 int fcs_aen_seq;
678}; 679};
679 680
680/* 681/*
diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
index 29b4108be269..9272840a2409 100644
--- a/drivers/scsi/bfa/bfa_fcs_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
@@ -37,6 +37,8 @@ static void bfa_fcs_itnim_prli_response(void *fcsarg,
37 struct bfa_fcxp_s *fcxp, void *cbarg, 37 struct bfa_fcxp_s *fcxp, void *cbarg,
38 bfa_status_t req_status, u32 rsp_len, 38 bfa_status_t req_status, u32 rsp_len,
39 u32 resid_len, struct fchs_s *rsp_fchs); 39 u32 resid_len, struct fchs_s *rsp_fchs);
40static void bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
41 enum bfa_itnim_aen_event event);
40 42
41/* 43/*
42 * fcs_itnim_sm FCS itnim state machine events 44 * fcs_itnim_sm FCS itnim state machine events
@@ -269,6 +271,7 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
269 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 271 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
270 "Target (WWN = %s) is online for initiator (WWN = %s)\n", 272 "Target (WWN = %s) is online for initiator (WWN = %s)\n",
271 rpwwn_buf, lpwwn_buf); 273 rpwwn_buf, lpwwn_buf);
274 bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_ONLINE);
272 break; 275 break;
273 276
274 case BFA_FCS_ITNIM_SM_OFFLINE: 277 case BFA_FCS_ITNIM_SM_OFFLINE:
@@ -305,14 +308,17 @@ bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
305 bfa_itnim_offline(itnim->bfa_itnim); 308 bfa_itnim_offline(itnim->bfa_itnim);
306 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port)); 309 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port));
307 wwn2str(rpwwn_buf, itnim->rport->pwwn); 310 wwn2str(rpwwn_buf, itnim->rport->pwwn);
308 if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE) 311 if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE) {
309 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 312 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
310 "Target (WWN = %s) connectivity lost for " 313 "Target (WWN = %s) connectivity lost for "
311 "initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf); 314 "initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf);
312 else 315 bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT);
316 } else {
313 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 317 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
314 "Target (WWN = %s) offlined by initiator (WWN = %s)\n", 318 "Target (WWN = %s) offlined by initiator (WWN = %s)\n",
315 rpwwn_buf, lpwwn_buf); 319 rpwwn_buf, lpwwn_buf);
320 bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE);
321 }
316 break; 322 break;
317 323
318 case BFA_FCS_ITNIM_SM_DELETE: 324 case BFA_FCS_ITNIM_SM_DELETE:
@@ -382,6 +388,33 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
382} 388}
383 389
384static void 390static void
391bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
392 enum bfa_itnim_aen_event event)
393{
394 struct bfa_fcs_rport_s *rport = itnim->rport;
395 struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad;
396 struct bfa_aen_entry_s *aen_entry;
397
398 /* Don't post events for well known addresses */
399 if (BFA_FCS_PID_IS_WKA(rport->pid))
400 return;
401
402 bfad_get_aen_entry(bfad, aen_entry);
403 if (!aen_entry)
404 return;
405
406 aen_entry->aen_data.itnim.vf_id = rport->port->fabric->vf_id;
407 aen_entry->aen_data.itnim.ppwwn = bfa_fcs_lport_get_pwwn(
408 bfa_fcs_get_base_port(itnim->fcs));
409 aen_entry->aen_data.itnim.lpwwn = bfa_fcs_lport_get_pwwn(rport->port);
410 aen_entry->aen_data.itnim.rpwwn = rport->pwwn;
411
412 /* Send the AEN notification */
413 bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq,
414 BFA_AEN_CAT_ITNIM, event);
415}
416
417static void
385bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced) 418bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced)
386{ 419{
387 struct bfa_fcs_itnim_s *itnim = itnim_cbarg; 420 struct bfa_fcs_itnim_s *itnim = itnim_cbarg;
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index f8251a91ba91..d4f951fe753e 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -16,6 +16,7 @@
16 */ 16 */
17 17
18#include "bfad_drv.h" 18#include "bfad_drv.h"
19#include "bfad_im.h"
19#include "bfa_fcs.h" 20#include "bfa_fcs.h"
20#include "bfa_fcbuild.h" 21#include "bfa_fcbuild.h"
21#include "bfa_fc.h" 22#include "bfa_fc.h"
@@ -300,6 +301,31 @@ bfa_fcs_lport_sm_deleting(
300 */ 301 */
301 302
302/* 303/*
304 * Send AEN notification
305 */
306static void
307bfa_fcs_lport_aen_post(struct bfa_fcs_lport_s *port,
308 enum bfa_lport_aen_event event)
309{
310 struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
311 struct bfa_aen_entry_s *aen_entry;
312
313 bfad_get_aen_entry(bfad, aen_entry);
314 if (!aen_entry)
315 return;
316
317 aen_entry->aen_data.lport.vf_id = port->fabric->vf_id;
318 aen_entry->aen_data.lport.roles = port->port_cfg.roles;
319 aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn(
320 bfa_fcs_get_base_port(port->fcs));
321 aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port);
322
323 /* Send the AEN notification */
324 bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
325 BFA_AEN_CAT_LPORT, event);
326}
327
328/*
303 * Send a LS reject 329 * Send a LS reject
304 */ 330 */
305static void 331static void
@@ -593,6 +619,7 @@ bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port)
593 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 619 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
594 "Logical port online: WWN = %s Role = %s\n", 620 "Logical port online: WWN = %s Role = %s\n",
595 lpwwn_buf, "Initiator"); 621 lpwwn_buf, "Initiator");
622 bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_ONLINE);
596 623
597 bfad->bfad_flags |= BFAD_PORT_ONLINE; 624 bfad->bfad_flags |= BFAD_PORT_ONLINE;
598} 625}
@@ -611,14 +638,17 @@ bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port)
611 638
612 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); 639 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
613 if (bfa_sm_cmp_state(port->fabric, 640 if (bfa_sm_cmp_state(port->fabric,
614 bfa_fcs_fabric_sm_online) == BFA_TRUE) 641 bfa_fcs_fabric_sm_online) == BFA_TRUE) {
615 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 642 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
616 "Logical port lost fabric connectivity: WWN = %s Role = %s\n", 643 "Logical port lost fabric connectivity: WWN = %s Role = %s\n",
617 lpwwn_buf, "Initiator"); 644 lpwwn_buf, "Initiator");
618 else 645 bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DISCONNECT);
646 } else {
619 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 647 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
620 "Logical port taken offline: WWN = %s Role = %s\n", 648 "Logical port taken offline: WWN = %s Role = %s\n",
621 lpwwn_buf, "Initiator"); 649 lpwwn_buf, "Initiator");
650 bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_OFFLINE);
651 }
622 652
623 list_for_each_safe(qe, qen, &port->rport_q) { 653 list_for_each_safe(qe, qen, &port->rport_q) {
624 rport = (struct bfa_fcs_rport_s *) qe; 654 rport = (struct bfa_fcs_rport_s *) qe;
@@ -676,6 +706,7 @@ bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port)
676 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 706 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
677 "Logical port deleted: WWN = %s Role = %s\n", 707 "Logical port deleted: WWN = %s Role = %s\n",
678 lpwwn_buf, "Initiator"); 708 lpwwn_buf, "Initiator");
709 bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DELETE);
679 710
680 /* Base port will be deleted by the OS driver */ 711 /* Base port will be deleted by the OS driver */
681 if (port->vport) { 712 if (port->vport) {
@@ -973,6 +1004,7 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
973 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 1004 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
974 "New logical port created: WWN = %s Role = %s\n", 1005 "New logical port created: WWN = %s Role = %s\n",
975 lpwwn_buf, "Initiator"); 1006 lpwwn_buf, "Initiator");
1007 bfa_fcs_lport_aen_post(lport, BFA_LPORT_AEN_NEW);
976 1008
977 bfa_sm_set_state(lport, bfa_fcs_lport_sm_uninit); 1009 bfa_sm_set_state(lport, bfa_fcs_lport_sm_uninit);
978 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE); 1010 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
@@ -5559,6 +5591,31 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
5559 * fcs_vport_private FCS virtual port private functions 5591 * fcs_vport_private FCS virtual port private functions
5560 */ 5592 */
5561/* 5593/*
5594 * Send AEN notification
5595 */
5596static void
5597bfa_fcs_vport_aen_post(struct bfa_fcs_lport_s *port,
5598 enum bfa_lport_aen_event event)
5599{
5600 struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
5601 struct bfa_aen_entry_s *aen_entry;
5602
5603 bfad_get_aen_entry(bfad, aen_entry);
5604 if (!aen_entry)
5605 return;
5606
5607 aen_entry->aen_data.lport.vf_id = port->fabric->vf_id;
5608 aen_entry->aen_data.lport.roles = port->port_cfg.roles;
5609 aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn(
5610 bfa_fcs_get_base_port(port->fcs));
5611 aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port);
5612
5613 /* Send the AEN notification */
5614 bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
5615 BFA_AEN_CAT_LPORT, event);
5616}
5617
5618/*
5562 * This routine will be called to send a FDISC command. 5619 * This routine will be called to send a FDISC command.
5563 */ 5620 */
5564static void 5621static void
@@ -5585,8 +5642,11 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
5585 case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */ 5642 case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */
5586 if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) 5643 if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
5587 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); 5644 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
5588 else 5645 else {
5646 bfa_fcs_vport_aen_post(&vport->lport,
5647 BFA_LPORT_AEN_NPIV_DUP_WWN);
5589 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_DUP_WWN); 5648 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_DUP_WWN);
5649 }
5590 break; 5650 break;
5591 5651
5592 case FC_LS_RJT_EXP_INSUFF_RES: 5652 case FC_LS_RJT_EXP_INSUFF_RES:
@@ -5596,11 +5656,17 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
5596 */ 5656 */
5597 if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) 5657 if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
5598 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); 5658 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
5599 else 5659 else {
5660 bfa_fcs_vport_aen_post(&vport->lport,
5661 BFA_LPORT_AEN_NPIV_FABRIC_MAX);
5600 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED); 5662 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED);
5663 }
5601 break; 5664 break;
5602 5665
5603 default: 5666 default:
5667 if (vport->fdisc_retries == 0)
5668 bfa_fcs_vport_aen_post(&vport->lport,
5669 BFA_LPORT_AEN_NPIV_UNKNOWN);
5604 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); 5670 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
5605 } 5671 }
5606} 5672}
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index 2c514458a6b4..52628d5d3c9b 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -20,6 +20,7 @@
20 */ 20 */
21 21
22#include "bfad_drv.h" 22#include "bfad_drv.h"
23#include "bfad_im.h"
23#include "bfa_fcs.h" 24#include "bfa_fcs.h"
24#include "bfa_fcbuild.h" 25#include "bfa_fcbuild.h"
25 26
@@ -2041,6 +2042,35 @@ bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
2041} 2042}
2042 2043
2043static void 2044static void
2045bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport,
2046 enum bfa_rport_aen_event event,
2047 struct bfa_rport_aen_data_s *data)
2048{
2049 struct bfa_fcs_lport_s *port = rport->port;
2050 struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
2051 struct bfa_aen_entry_s *aen_entry;
2052
2053 bfad_get_aen_entry(bfad, aen_entry);
2054 if (!aen_entry)
2055 return;
2056
2057 if (event == BFA_RPORT_AEN_QOS_PRIO)
2058 aen_entry->aen_data.rport.priv.qos = data->priv.qos;
2059 else if (event == BFA_RPORT_AEN_QOS_FLOWID)
2060 aen_entry->aen_data.rport.priv.qos = data->priv.qos;
2061
2062 aen_entry->aen_data.rport.vf_id = rport->port->fabric->vf_id;
2063 aen_entry->aen_data.rport.ppwwn = bfa_fcs_lport_get_pwwn(
2064 bfa_fcs_get_base_port(rport->fcs));
2065 aen_entry->aen_data.rport.lpwwn = bfa_fcs_lport_get_pwwn(rport->port);
2066 aen_entry->aen_data.rport.rpwwn = rport->pwwn;
2067
2068 /* Send the AEN notification */
2069 bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq,
2070 BFA_AEN_CAT_RPORT, event);
2071}
2072
2073static void
2044bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport) 2074bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
2045{ 2075{
2046 struct bfa_fcs_lport_s *port = rport->port; 2076 struct bfa_fcs_lport_s *port = rport->port;
@@ -2063,10 +2093,12 @@ bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
2063 2093
2064 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); 2094 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
2065 wwn2str(rpwwn_buf, rport->pwwn); 2095 wwn2str(rpwwn_buf, rport->pwwn);
2066 if (!BFA_FCS_PID_IS_WKA(rport->pid)) 2096 if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
2067 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2097 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2068 "Remote port (WWN = %s) online for logical port (WWN = %s)\n", 2098 "Remote port (WWN = %s) online for logical port (WWN = %s)\n",
2069 rpwwn_buf, lpwwn_buf); 2099 rpwwn_buf, lpwwn_buf);
2100 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_ONLINE, NULL);
2101 }
2070} 2102}
2071 2103
2072static void 2104static void
@@ -2083,16 +2115,21 @@ bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
2083 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); 2115 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
2084 wwn2str(rpwwn_buf, rport->pwwn); 2116 wwn2str(rpwwn_buf, rport->pwwn);
2085 if (!BFA_FCS_PID_IS_WKA(rport->pid)) { 2117 if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
2086 if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE) 2118 if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE) {
2087 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 2119 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2088 "Remote port (WWN = %s) connectivity lost for " 2120 "Remote port (WWN = %s) connectivity lost for "
2089 "logical port (WWN = %s)\n", 2121 "logical port (WWN = %s)\n",
2090 rpwwn_buf, lpwwn_buf); 2122 rpwwn_buf, lpwwn_buf);
2091 else 2123 bfa_fcs_rport_aen_post(rport,
2124 BFA_RPORT_AEN_DISCONNECT, NULL);
2125 } else {
2092 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2126 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2093 "Remote port (WWN = %s) offlined by " 2127 "Remote port (WWN = %s) offlined by "
2094 "logical port (WWN = %s)\n", 2128 "logical port (WWN = %s)\n",
2095 rpwwn_buf, lpwwn_buf); 2129 rpwwn_buf, lpwwn_buf);
2130 bfa_fcs_rport_aen_post(rport,
2131 BFA_RPORT_AEN_OFFLINE, NULL);
2132 }
2096 } 2133 }
2097 2134
2098 if (bfa_fcs_lport_is_initiator(port)) { 2135 if (bfa_fcs_lport_is_initiator(port)) {
@@ -2366,8 +2403,11 @@ bfa_cb_rport_qos_scn_flowid(void *cbarg,
2366 struct bfa_rport_qos_attr_s new_qos_attr) 2403 struct bfa_rport_qos_attr_s new_qos_attr)
2367{ 2404{
2368 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; 2405 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
2406 struct bfa_rport_aen_data_s aen_data;
2369 2407
2370 bfa_trc(rport->fcs, rport->pwwn); 2408 bfa_trc(rport->fcs, rport->pwwn);
2409 aen_data.priv.qos = new_qos_attr;
2410 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data);
2371} 2411}
2372 2412
2373/* 2413/*
@@ -2390,8 +2430,11 @@ bfa_cb_rport_qos_scn_prio(void *cbarg,
2390 struct bfa_rport_qos_attr_s new_qos_attr) 2430 struct bfa_rport_qos_attr_s new_qos_attr)
2391{ 2431{
2392 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; 2432 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
2433 struct bfa_rport_aen_data_s aen_data;
2393 2434
2394 bfa_trc(rport->fcs, rport->pwwn); 2435 bfa_trc(rport->fcs, rport->pwwn);
2436 aen_data.priv.qos = new_qos_attr;
2437 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_PRIO, &aen_data);
2395} 2438}
2396 2439
2397/* 2440/*
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
index e7ffd8205dc7..ea24d4c6e67a 100644
--- a/drivers/scsi/bfa/bfa_hw_cb.c
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -42,11 +42,36 @@ bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
42 bfa->iocfc.bfa_regs.intr_status); 42 bfa->iocfc.bfa_regs.intr_status);
43} 43}
44 44
45/*
46 * Actions to respond RME Interrupt for Crossbow ASIC:
47 * - Write 1 to Interrupt Status register
48 * INTX - done in bfa_intx()
49 * MSIX - done in bfa_hwcb_rspq_ack_msix()
50 * - Update CI (only if new CI)
51 */
45static void 52static void
46bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq) 53bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq, u32 ci)
47{ 54{
48 writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq), 55 writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq),
49 bfa->iocfc.bfa_regs.intr_status); 56 bfa->iocfc.bfa_regs.intr_status);
57
58 if (bfa_rspq_ci(bfa, rspq) == ci)
59 return;
60
61 bfa_rspq_ci(bfa, rspq) = ci;
62 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
63 mmiowb();
64}
65
66void
67bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
68{
69 if (bfa_rspq_ci(bfa, rspq) == ci)
70 return;
71
72 bfa_rspq_ci(bfa, rspq) = ci;
73 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
74 mmiowb();
50} 75}
51 76
52void 77void
@@ -149,8 +174,13 @@ bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
149void 174void
150bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix) 175bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
151{ 176{
152 bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix; 177 if (msix) {
153 bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix; 178 bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix;
179 bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
180 } else {
181 bfa->iocfc.hwif.hw_reqq_ack = NULL;
182 bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
183 }
154} 184}
155 185
156void 186void
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
index 989bbce9b296..637527f48b40 100644
--- a/drivers/scsi/bfa/bfa_hw_ct.c
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -64,13 +64,36 @@ bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq)
64 writel(r32, bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]); 64 writel(r32, bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
65} 65}
66 66
67/*
68 * Actions to respond RME Interrupt for Catapult ASIC:
69 * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx())
70 * - Acknowledge by writing to RME Queue Control register
71 * - Update CI
72 */
67void 73void
68bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq) 74bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
69{ 75{
70 u32 r32; 76 u32 r32;
71 77
72 r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); 78 r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
73 writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); 79 writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
80
81 bfa_rspq_ci(bfa, rspq) = ci;
82 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
83 mmiowb();
84}
85
86/*
87 * Actions to respond RME Interrupt for Catapult2 ASIC:
88 * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx())
89 * - Update CI
90 */
91void
92bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
93{
94 bfa_rspq_ci(bfa, rspq) = ci;
95 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
96 mmiowb();
74} 97}
75 98
76void 99void
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index d6c2bf3865d2..1ac5aecf25a6 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -16,6 +16,7 @@
16 */ 16 */
17 17
18#include "bfad_drv.h" 18#include "bfad_drv.h"
19#include "bfad_im.h"
19#include "bfa_ioc.h" 20#include "bfa_ioc.h"
20#include "bfi_reg.h" 21#include "bfi_reg.h"
21#include "bfa_defs.h" 22#include "bfa_defs.h"
@@ -458,6 +459,7 @@ bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
458 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); 459 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
459 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED); 460 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
460 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n"); 461 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
462 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
461} 463}
462 464
463static void 465static void
@@ -502,6 +504,7 @@ bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
502 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; 504 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
503 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE); 505 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
504 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n"); 506 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
507 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
505} 508}
506 509
507/* 510/*
@@ -1966,6 +1969,7 @@ bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
1966 1969
1967 BFA_LOG(KERN_CRIT, bfad, bfa_log_level, 1970 BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
1968 "Heart Beat of IOC has failed\n"); 1971 "Heart Beat of IOC has failed\n");
1972 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
1969 1973
1970} 1974}
1971 1975
@@ -1980,6 +1984,7 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1980 BFA_LOG(KERN_WARNING, bfad, bfa_log_level, 1984 BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
1981 "Running firmware version is incompatible " 1985 "Running firmware version is incompatible "
1982 "with the driver version\n"); 1986 "with the driver version\n");
1987 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
1983} 1988}
1984 1989
1985bfa_status_t 1990bfa_status_t
@@ -2679,6 +2684,43 @@ bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2679} 2684}
2680 2685
2681/* 2686/*
2687 * Send AEN notification
2688 */
2689void
2690bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2691{
2692 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2693 struct bfa_aen_entry_s *aen_entry;
2694 enum bfa_ioc_type_e ioc_type;
2695
2696 bfad_get_aen_entry(bfad, aen_entry);
2697 if (!aen_entry)
2698 return;
2699
2700 ioc_type = bfa_ioc_get_type(ioc);
2701 switch (ioc_type) {
2702 case BFA_IOC_TYPE_FC:
2703 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2704 break;
2705 case BFA_IOC_TYPE_FCoE:
2706 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2707 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2708 break;
2709 case BFA_IOC_TYPE_LL:
2710 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2711 break;
2712 default:
2713 WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
2714 break;
2715 }
2716
2717 /* Send the AEN notification */
2718 aen_entry->aen_data.ioc.ioc_type = ioc_type;
2719 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
2720 BFA_AEN_CAT_IOC, event);
2721}
2722
2723/*
2682 * Retrieve saved firmware trace from a prior IOC failure. 2724 * Retrieve saved firmware trace from a prior IOC failure.
2683 */ 2725 */
2684bfa_status_t 2726bfa_status_t
@@ -2879,6 +2921,10 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
2879{ 2921{
2880 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL) 2922 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2881 return; 2923 return;
2924 if (ioc->attr->nwwn == 0)
2925 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN);
2926 if (ioc->attr->pwwn == 0)
2927 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN);
2882} 2928}
2883 2929
2884/* 2930/*
@@ -3443,6 +3489,54 @@ bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3443} 3489}
3444 3490
3445/* 3491/*
3492 * SFP's State Change Notification post to AEN
3493 */
3494static void
3495bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
3496{
3497 struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
3498 struct bfa_aen_entry_s *aen_entry;
3499 enum bfa_port_aen_event aen_evt = 0;
3500
3501 bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
3502 ((u64)rsp->event));
3503
3504 bfad_get_aen_entry(bfad, aen_entry);
3505 if (!aen_entry)
3506 return;
3507
3508 aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
3509 aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
3510 aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
3511
3512 switch (rsp->event) {
3513 case BFA_SFP_SCN_INSERTED:
3514 aen_evt = BFA_PORT_AEN_SFP_INSERT;
3515 break;
3516 case BFA_SFP_SCN_REMOVED:
3517 aen_evt = BFA_PORT_AEN_SFP_REMOVE;
3518 break;
3519 case BFA_SFP_SCN_FAILED:
3520 aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
3521 break;
3522 case BFA_SFP_SCN_UNSUPPORT:
3523 aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
3524 break;
3525 case BFA_SFP_SCN_POM:
3526 aen_evt = BFA_PORT_AEN_SFP_POM;
3527 aen_entry->aen_data.port.level = rsp->pomlvl;
3528 break;
3529 default:
3530 bfa_trc(sfp, rsp->event);
3531 WARN_ON(1);
3532 }
3533
3534 /* Send the AEN notification */
3535 bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
3536 BFA_AEN_CAT_PORT, aen_evt);
3537}
3538
3539/*
3446 * SFP get data send 3540 * SFP get data send
3447 */ 3541 */
3448static void 3542static void
@@ -3482,6 +3576,50 @@ bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3482} 3576}
3483 3577
3484/* 3578/*
3579 * SFP scn handler
3580 */
3581static void
3582bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3583{
3584 struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
3585
3586 switch (rsp->event) {
3587 case BFA_SFP_SCN_INSERTED:
3588 sfp->state = BFA_SFP_STATE_INSERTED;
3589 sfp->data_valid = 0;
3590 bfa_sfp_scn_aen_post(sfp, rsp);
3591 break;
3592 case BFA_SFP_SCN_REMOVED:
3593 sfp->state = BFA_SFP_STATE_REMOVED;
3594 sfp->data_valid = 0;
3595 bfa_sfp_scn_aen_post(sfp, rsp);
3596 break;
3597 case BFA_SFP_SCN_FAILED:
3598 sfp->state = BFA_SFP_STATE_FAILED;
3599 sfp->data_valid = 0;
3600 bfa_sfp_scn_aen_post(sfp, rsp);
3601 break;
3602 case BFA_SFP_SCN_UNSUPPORT:
3603 sfp->state = BFA_SFP_STATE_UNSUPPORT;
3604 bfa_sfp_scn_aen_post(sfp, rsp);
3605 if (!sfp->lock)
3606 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3607 break;
3608 case BFA_SFP_SCN_POM:
3609 bfa_sfp_scn_aen_post(sfp, rsp);
3610 break;
3611 case BFA_SFP_SCN_VALID:
3612 sfp->state = BFA_SFP_STATE_VALID;
3613 if (!sfp->lock)
3614 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3615 break;
3616 default:
3617 bfa_trc(sfp, rsp->event);
3618 WARN_ON(1);
3619 }
3620}
3621
3622/*
3485 * SFP show complete 3623 * SFP show complete
3486 */ 3624 */
3487static void 3625static void
@@ -3645,7 +3783,7 @@ bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
3645 break; 3783 break;
3646 3784
3647 case BFI_SFP_I2H_SCN: 3785 case BFI_SFP_I2H_SCN:
3648 bfa_trc(sfp, msg->mh.msg_id); 3786 bfa_sfp_scn(sfp, msg);
3649 break; 3787 break;
3650 3788
3651 default: 3789 default:
@@ -3838,6 +3976,26 @@ bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
3838 BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ) 3976 BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
3839 3977
3840static void 3978static void
3979bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
3980 int inst, int type)
3981{
3982 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
3983 struct bfa_aen_entry_s *aen_entry;
3984
3985 bfad_get_aen_entry(bfad, aen_entry);
3986 if (!aen_entry)
3987 return;
3988
3989 aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
3990 aen_entry->aen_data.audit.partition_inst = inst;
3991 aen_entry->aen_data.audit.partition_type = type;
3992
3993 /* Send the AEN notification */
3994 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
3995 BFA_AEN_CAT_AUDIT, event);
3996}
3997
3998static void
3841bfa_flash_cb(struct bfa_flash_s *flash) 3999bfa_flash_cb(struct bfa_flash_s *flash)
3842{ 4000{
3843 flash->op_busy = 0; 4001 flash->op_busy = 0;
@@ -3978,6 +4136,7 @@ bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
3978 struct bfi_flash_erase_rsp_s *erase; 4136 struct bfi_flash_erase_rsp_s *erase;
3979 struct bfi_flash_write_rsp_s *write; 4137 struct bfi_flash_write_rsp_s *write;
3980 struct bfi_flash_read_rsp_s *read; 4138 struct bfi_flash_read_rsp_s *read;
4139 struct bfi_flash_event_s *event;
3981 struct bfi_mbmsg_s *msg; 4140 struct bfi_mbmsg_s *msg;
3982 } m; 4141 } m;
3983 4142
@@ -4061,8 +4220,19 @@ bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
4061 } 4220 }
4062 break; 4221 break;
4063 case BFI_FLASH_I2H_BOOT_VER_RSP: 4222 case BFI_FLASH_I2H_BOOT_VER_RSP:
4223 break;
4064 case BFI_FLASH_I2H_EVENT: 4224 case BFI_FLASH_I2H_EVENT:
4065 bfa_trc(flash, msg->mh.msg_id); 4225 status = be32_to_cpu(m.event->status);
4226 bfa_trc(flash, status);
4227 if (status == BFA_STATUS_BAD_FWCFG)
4228 bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
4229 else if (status == BFA_STATUS_INVALID_VENDOR) {
4230 u32 param;
4231 param = be32_to_cpu(m.event->param);
4232 bfa_trc(flash, param);
4233 bfa_ioc_aen_post(flash->ioc,
4234 BFA_IOC_AEN_INVALID_VENDOR);
4235 }
4066 break; 4236 break;
4067 4237
4068 default: 4238 default:
@@ -4204,6 +4374,8 @@ bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4204 flash->instance = instance; 4374 flash->instance = instance;
4205 4375
4206 bfa_flash_erase_send(flash); 4376 bfa_flash_erase_send(flash);
4377 bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
4378 instance, type);
4207 return BFA_STATUS_OK; 4379 return BFA_STATUS_OK;
4208} 4380}
4209 4381
@@ -5416,3 +5588,396 @@ bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5416 WARN_ON(1); 5588 WARN_ON(1);
5417 } 5589 }
5418} 5590}
5591
5592/*
5593 * DCONF module specific
5594 */
5595
5596BFA_MODULE(dconf);
5597
5598/*
5599 * DCONF state machine events
5600 */
5601enum bfa_dconf_event {
5602 BFA_DCONF_SM_INIT = 1, /* dconf Init */
5603 BFA_DCONF_SM_FLASH_COMP = 2, /* read/write to flash */
5604 BFA_DCONF_SM_WR = 3, /* binding change, map */
5605 BFA_DCONF_SM_TIMEOUT = 4, /* Start timer */
5606 BFA_DCONF_SM_EXIT = 5, /* exit dconf module */
5607 BFA_DCONF_SM_IOCDISABLE = 6, /* IOC disable event */
5608};
5609
5610/* forward declaration of DCONF state machine */
5611static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
5612 enum bfa_dconf_event event);
5613static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5614 enum bfa_dconf_event event);
5615static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
5616 enum bfa_dconf_event event);
5617static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
5618 enum bfa_dconf_event event);
5619static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
5620 enum bfa_dconf_event event);
5621static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5622 enum bfa_dconf_event event);
5623static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5624 enum bfa_dconf_event event);
5625
5626static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
5627static void bfa_dconf_timer(void *cbarg);
5628static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
5629static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
5630
5631/*
5632 * Begining state of dconf module. Waiting for an event to start.
5633 */
5634static void
5635bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5636{
5637 bfa_status_t bfa_status;
5638 bfa_trc(dconf->bfa, event);
5639
5640 switch (event) {
5641 case BFA_DCONF_SM_INIT:
5642 if (dconf->min_cfg) {
5643 bfa_trc(dconf->bfa, dconf->min_cfg);
5644 return;
5645 }
5646 bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
5647 dconf->flashdone = BFA_FALSE;
5648 bfa_trc(dconf->bfa, dconf->flashdone);
5649 bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5650 BFA_FLASH_PART_DRV, dconf->instance,
5651 dconf->dconf,
5652 sizeof(struct bfa_dconf_s), 0,
5653 bfa_dconf_init_cb, dconf->bfa);
5654 if (bfa_status != BFA_STATUS_OK) {
5655 bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
5656 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5657 return;
5658 }
5659 break;
5660 case BFA_DCONF_SM_EXIT:
5661 dconf->flashdone = BFA_TRUE;
5662 case BFA_DCONF_SM_IOCDISABLE:
5663 case BFA_DCONF_SM_WR:
5664 case BFA_DCONF_SM_FLASH_COMP:
5665 break;
5666 default:
5667 bfa_sm_fault(dconf->bfa, event);
5668 }
5669}
5670
5671/*
5672 * Read flash for dconf entries and make a call back to the driver once done.
5673 */
5674static void
5675bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5676 enum bfa_dconf_event event)
5677{
5678 bfa_trc(dconf->bfa, event);
5679
5680 switch (event) {
5681 case BFA_DCONF_SM_FLASH_COMP:
5682 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5683 break;
5684 case BFA_DCONF_SM_TIMEOUT:
5685 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5686 break;
5687 case BFA_DCONF_SM_EXIT:
5688 dconf->flashdone = BFA_TRUE;
5689 bfa_trc(dconf->bfa, dconf->flashdone);
5690 case BFA_DCONF_SM_IOCDISABLE:
5691 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5692 break;
5693 default:
5694 bfa_sm_fault(dconf->bfa, event);
5695 }
5696}
5697
5698/*
5699 * DCONF Module is in ready state. Has completed the initialization.
5700 */
5701static void
5702bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5703{
5704 bfa_trc(dconf->bfa, event);
5705
5706 switch (event) {
5707 case BFA_DCONF_SM_WR:
5708 bfa_timer_start(dconf->bfa, &dconf->timer,
5709 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5710 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5711 break;
5712 case BFA_DCONF_SM_EXIT:
5713 dconf->flashdone = BFA_TRUE;
5714 bfa_trc(dconf->bfa, dconf->flashdone);
5715 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5716 break;
5717 case BFA_DCONF_SM_INIT:
5718 case BFA_DCONF_SM_IOCDISABLE:
5719 break;
5720 default:
5721 bfa_sm_fault(dconf->bfa, event);
5722 }
5723}
5724
5725/*
5726 * entries are dirty, write back to the flash.
5727 */
5728
5729static void
5730bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5731{
5732 bfa_trc(dconf->bfa, event);
5733
5734 switch (event) {
5735 case BFA_DCONF_SM_TIMEOUT:
5736 bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
5737 bfa_dconf_flash_write(dconf);
5738 break;
5739 case BFA_DCONF_SM_WR:
5740 bfa_timer_stop(&dconf->timer);
5741 bfa_timer_start(dconf->bfa, &dconf->timer,
5742 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5743 break;
5744 case BFA_DCONF_SM_EXIT:
5745 bfa_timer_stop(&dconf->timer);
5746 bfa_timer_start(dconf->bfa, &dconf->timer,
5747 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5748 bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5749 bfa_dconf_flash_write(dconf);
5750 break;
5751 case BFA_DCONF_SM_FLASH_COMP:
5752 break;
5753 case BFA_DCONF_SM_IOCDISABLE:
5754 bfa_timer_stop(&dconf->timer);
5755 bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5756 break;
5757 default:
5758 bfa_sm_fault(dconf->bfa, event);
5759 }
5760}
5761
5762/*
5763 * Sync the dconf entries to the flash.
5764 */
5765static void
5766bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5767 enum bfa_dconf_event event)
5768{
5769 bfa_trc(dconf->bfa, event);
5770
5771 switch (event) {
5772 case BFA_DCONF_SM_IOCDISABLE:
5773 case BFA_DCONF_SM_FLASH_COMP:
5774 bfa_timer_stop(&dconf->timer);
5775 case BFA_DCONF_SM_TIMEOUT:
5776 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5777 dconf->flashdone = BFA_TRUE;
5778 bfa_trc(dconf->bfa, dconf->flashdone);
5779 bfa_ioc_disable(&dconf->bfa->ioc);
5780 break;
5781 default:
5782 bfa_sm_fault(dconf->bfa, event);
5783 }
5784}
5785
5786static void
5787bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5788{
5789 bfa_trc(dconf->bfa, event);
5790
5791 switch (event) {
5792 case BFA_DCONF_SM_FLASH_COMP:
5793 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5794 break;
5795 case BFA_DCONF_SM_WR:
5796 bfa_timer_start(dconf->bfa, &dconf->timer,
5797 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5798 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5799 break;
5800 case BFA_DCONF_SM_EXIT:
5801 bfa_timer_start(dconf->bfa, &dconf->timer,
5802 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5803 bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5804 break;
5805 case BFA_DCONF_SM_IOCDISABLE:
5806 bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5807 break;
5808 default:
5809 bfa_sm_fault(dconf->bfa, event);
5810 }
5811}
5812
5813static void
5814bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5815 enum bfa_dconf_event event)
5816{
5817 bfa_trc(dconf->bfa, event);
5818
5819 switch (event) {
5820 case BFA_DCONF_SM_INIT:
5821 bfa_timer_start(dconf->bfa, &dconf->timer,
5822 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5823 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5824 break;
5825 case BFA_DCONF_SM_EXIT:
5826 dconf->flashdone = BFA_TRUE;
5827 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5828 break;
5829 case BFA_DCONF_SM_IOCDISABLE:
5830 break;
5831 default:
5832 bfa_sm_fault(dconf->bfa, event);
5833 }
5834}
5835
5836/*
5837 * Compute and return memory needed by DRV_CFG module.
5838 */
5839static void
5840bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
5841 struct bfa_s *bfa)
5842{
5843 struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
5844
5845 if (cfg->drvcfg.min_cfg)
5846 bfa_mem_kva_setup(meminfo, dconf_kva,
5847 sizeof(struct bfa_dconf_hdr_s));
5848 else
5849 bfa_mem_kva_setup(meminfo, dconf_kva,
5850 sizeof(struct bfa_dconf_s));
5851}
5852
5853static void
5854bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5855 struct bfa_pcidev_s *pcidev)
5856{
5857 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5858
5859 dconf->bfad = bfad;
5860 dconf->bfa = bfa;
5861 dconf->instance = bfa->ioc.port_id;
5862 bfa_trc(bfa, dconf->instance);
5863
5864 dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
5865 if (cfg->drvcfg.min_cfg) {
5866 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
5867 dconf->min_cfg = BFA_TRUE;
5868 /*
5869 * Set the flashdone flag to TRUE explicitly as no flash
5870 * write will happen in min_cfg mode.
5871 */
5872 dconf->flashdone = BFA_TRUE;
5873 } else {
5874 dconf->min_cfg = BFA_FALSE;
5875 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
5876 }
5877
5878 bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
5879 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5880}
5881
5882static void
5883bfa_dconf_init_cb(void *arg, bfa_status_t status)
5884{
5885 struct bfa_s *bfa = arg;
5886 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5887
5888 dconf->flashdone = BFA_TRUE;
5889 bfa_trc(bfa, dconf->flashdone);
5890 bfa_iocfc_cb_dconf_modinit(bfa, status);
5891 if (status == BFA_STATUS_OK) {
5892 bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
5893 if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
5894 dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
5895 if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
5896 dconf->dconf->hdr.version = BFI_DCONF_VERSION;
5897 }
5898 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5899}
5900
5901void
5902bfa_dconf_modinit(struct bfa_s *bfa)
5903{
5904 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5905 bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
5906}
5907static void
5908bfa_dconf_start(struct bfa_s *bfa)
5909{
5910}
5911
5912static void
5913bfa_dconf_stop(struct bfa_s *bfa)
5914{
5915}
5916
5917static void bfa_dconf_timer(void *cbarg)
5918{
5919 struct bfa_dconf_mod_s *dconf = cbarg;
5920 bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
5921}
5922static void
5923bfa_dconf_iocdisable(struct bfa_s *bfa)
5924{
5925 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5926 bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
5927}
5928
5929static void
5930bfa_dconf_detach(struct bfa_s *bfa)
5931{
5932}
5933
5934static bfa_status_t
5935bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
5936{
5937 bfa_status_t bfa_status;
5938 bfa_trc(dconf->bfa, 0);
5939
5940 bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
5941 BFA_FLASH_PART_DRV, dconf->instance,
5942 dconf->dconf, sizeof(struct bfa_dconf_s), 0,
5943 bfa_dconf_cbfn, dconf);
5944 if (bfa_status != BFA_STATUS_OK)
5945 WARN_ON(bfa_status);
5946 bfa_trc(dconf->bfa, bfa_status);
5947
5948 return bfa_status;
5949}
5950
5951bfa_status_t
5952bfa_dconf_update(struct bfa_s *bfa)
5953{
5954 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5955 bfa_trc(dconf->bfa, 0);
5956 if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
5957 return BFA_STATUS_FAILED;
5958
5959 if (dconf->min_cfg) {
5960 bfa_trc(dconf->bfa, dconf->min_cfg);
5961 return BFA_STATUS_FAILED;
5962 }
5963
5964 bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
5965 return BFA_STATUS_OK;
5966}
5967
5968static void
5969bfa_dconf_cbfn(void *arg, bfa_status_t status)
5970{
5971 struct bfa_dconf_mod_s *dconf = arg;
5972 WARN_ON(status);
5973 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5974}
5975
5976void
5977bfa_dconf_modexit(struct bfa_s *bfa)
5978{
5979 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5980 BFA_DCONF_MOD(bfa)->flashdone = BFA_FALSE;
5981 bfa_trc(bfa, BFA_DCONF_MOD(bfa)->flashdone);
5982 bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
5983}
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index c5ecd2edc95d..546d46b37101 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -327,6 +327,7 @@ struct bfa_ioc_s {
327 enum bfa_mode_s port_mode; 327 enum bfa_mode_s port_mode;
328 u8 ad_cap_bm; /* adapter cap bit mask */ 328 u8 ad_cap_bm; /* adapter cap bit mask */
329 u8 port_mode_cfg; /* config port mode */ 329 u8 port_mode_cfg; /* config port mode */
330 int ioc_aen_seq;
330}; 331};
331 332
332struct bfa_ioc_hwif_s { 333struct bfa_ioc_hwif_s {
@@ -366,6 +367,8 @@ struct bfa_cb_qe_s {
366 struct list_head qe; 367 struct list_head qe;
367 bfa_cb_cbfn_t cbfn; 368 bfa_cb_cbfn_t cbfn;
368 bfa_boolean_t once; 369 bfa_boolean_t once;
370 bfa_boolean_t pre_rmv; /* set for stack based qe(s) */
371 bfa_status_t fw_status; /* to access fw status in comp proc */
369 void *cbarg; 372 void *cbarg;
370}; 373};
371 374
@@ -658,7 +661,6 @@ struct bfa_phy_s {
658 struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */ 661 struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */
659 struct bfa_mem_dma_s phy_dma; 662 struct bfa_mem_dma_s phy_dma;
660}; 663};
661
662#define BFA_PHY(__bfa) (&(__bfa)->modules.phy) 664#define BFA_PHY(__bfa) (&(__bfa)->modules.phy)
663#define BFA_MEM_PHY_DMA(__bfa) (&(BFA_PHY(__bfa)->phy_dma)) 665#define BFA_MEM_PHY_DMA(__bfa) (&(BFA_PHY(__bfa)->phy_dma))
664 666
@@ -684,6 +686,49 @@ void bfa_phy_memclaim(struct bfa_phy_s *phy,
684void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg); 686void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg);
685 687
686/* 688/*
689 * Driver Config( dconf) specific
690 */
691#define BFI_DCONF_SIGNATURE 0xabcdabcd
692#define BFI_DCONF_VERSION 1
693
694#pragma pack(1)
695struct bfa_dconf_hdr_s {
696 u32 signature;
697 u32 version;
698};
699
700struct bfa_dconf_s {
701 struct bfa_dconf_hdr_s hdr;
702 struct bfa_lunmask_cfg_s lun_mask;
703};
704#pragma pack()
705
706struct bfa_dconf_mod_s {
707 bfa_sm_t sm;
708 u8 instance;
709 bfa_boolean_t flashdone;
710 bfa_boolean_t read_data_valid;
711 bfa_boolean_t min_cfg;
712 struct bfa_timer_s timer;
713 struct bfa_s *bfa;
714 void *bfad;
715 void *trcmod;
716 struct bfa_dconf_s *dconf;
717 struct bfa_mem_kva_s kva_seg;
718};
719
720#define BFA_DCONF_MOD(__bfa) \
721 (&(__bfa)->modules.dconf_mod)
722#define BFA_MEM_DCONF_KVA(__bfa) (&(BFA_DCONF_MOD(__bfa)->kva_seg))
723#define bfa_dconf_read_data_valid(__bfa) \
724 (BFA_DCONF_MOD(__bfa)->read_data_valid)
725#define BFA_DCONF_UPDATE_TOV 5000 /* memtest timeout in msec */
726
727void bfa_dconf_modinit(struct bfa_s *bfa);
728void bfa_dconf_modexit(struct bfa_s *bfa);
729bfa_status_t bfa_dconf_update(struct bfa_s *bfa);
730
731/*
687 * IOC specfic macros 732 * IOC specfic macros
688 */ 733 */
689#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) 734#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
@@ -803,6 +848,7 @@ void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
803 struct bfi_ioc_image_hdr_s *fwhdr); 848 struct bfi_ioc_image_hdr_s *fwhdr);
804bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, 849bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
805 struct bfi_ioc_image_hdr_s *fwhdr); 850 struct bfi_ioc_image_hdr_s *fwhdr);
851void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event);
806bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats); 852bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats);
807bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc); 853bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
808 854
diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
index 1c6efd40a673..2d36e4823835 100644
--- a/drivers/scsi/bfa/bfa_modules.h
+++ b/drivers/scsi/bfa/bfa_modules.h
@@ -44,6 +44,7 @@ struct bfa_modules_s {
44 struct bfa_flash_s flash; /* flash module */ 44 struct bfa_flash_s flash; /* flash module */
45 struct bfa_diag_s diag_mod; /* diagnostics module */ 45 struct bfa_diag_s diag_mod; /* diagnostics module */
46 struct bfa_phy_s phy; /* phy module */ 46 struct bfa_phy_s phy; /* phy module */
47 struct bfa_dconf_mod_s dconf_mod; /* DCONF common module */
47}; 48};
48 49
49/* 50/*
@@ -119,6 +120,7 @@ struct bfa_s {
119 struct list_head reqq_waitq[BFI_IOC_MAX_CQS]; 120 struct list_head reqq_waitq[BFI_IOC_MAX_CQS];
120 bfa_boolean_t fcs; /* FCS is attached to BFA */ 121 bfa_boolean_t fcs; /* FCS is attached to BFA */
121 struct bfa_msix_s msix; 122 struct bfa_msix_s msix;
123 int bfa_aen_seq;
122}; 124};
123 125
124extern bfa_boolean_t bfa_auto_recover; 126extern bfa_boolean_t bfa_auto_recover;
@@ -130,5 +132,6 @@ extern struct bfa_module_s hal_mod_lps;
130extern struct bfa_module_s hal_mod_uf; 132extern struct bfa_module_s hal_mod_uf;
131extern struct bfa_module_s hal_mod_rport; 133extern struct bfa_module_s hal_mod_rport;
132extern struct bfa_module_s hal_mod_fcp; 134extern struct bfa_module_s hal_mod_fcp;
135extern struct bfa_module_s hal_mod_dconf;
133 136
134#endif /* __BFA_MODULES_H__ */ 137#endif /* __BFA_MODULES_H__ */
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 21caaefce99f..aa8a0eaf91f9 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -16,6 +16,7 @@
16 */ 16 */
17 17
18#include "bfad_drv.h" 18#include "bfad_drv.h"
19#include "bfad_im.h"
19#include "bfa_plog.h" 20#include "bfa_plog.h"
20#include "bfa_cs.h" 21#include "bfa_cs.h"
21#include "bfa_modules.h" 22#include "bfa_modules.h"
@@ -2007,6 +2008,24 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2007 } 2008 }
2008} 2009}
2009 2010
2011static void
2012bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
2013{
2014 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2015 struct bfa_aen_entry_s *aen_entry;
2016
2017 bfad_get_aen_entry(bfad, aen_entry);
2018 if (!aen_entry)
2019 return;
2020
2021 aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
2022 aen_entry->aen_data.port.pwwn = fcport->pwwn;
2023
2024 /* Send the AEN notification */
2025 bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
2026 BFA_AEN_CAT_PORT, event);
2027}
2028
2010/* 2029/*
2011 * FC PORT state machine functions 2030 * FC PORT state machine functions
2012 */ 2031 */
@@ -2095,6 +2114,7 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2095 wwn2str(pwwn_buf, fcport->pwwn); 2114 wwn2str(pwwn_buf, fcport->pwwn);
2096 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2115 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2097 "Base port disabled: WWN = %s\n", pwwn_buf); 2116 "Base port disabled: WWN = %s\n", pwwn_buf);
2117 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2098 break; 2118 break;
2099 2119
2100 case BFA_FCPORT_SM_LINKUP: 2120 case BFA_FCPORT_SM_LINKUP:
@@ -2155,6 +2175,7 @@ bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2155 wwn2str(pwwn_buf, fcport->pwwn); 2175 wwn2str(pwwn_buf, fcport->pwwn);
2156 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2176 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2157 "Base port disabled: WWN = %s\n", pwwn_buf); 2177 "Base port disabled: WWN = %s\n", pwwn_buf);
2178 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2158 break; 2179 break;
2159 2180
2160 case BFA_FCPORT_SM_STOP: 2181 case BFA_FCPORT_SM_STOP:
@@ -2208,6 +2229,12 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2208 wwn2str(pwwn_buf, fcport->pwwn); 2229 wwn2str(pwwn_buf, fcport->pwwn);
2209 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2230 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2210 "Base port online: WWN = %s\n", pwwn_buf); 2231 "Base port online: WWN = %s\n", pwwn_buf);
2232 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
2233
2234 /* If QoS is enabled and it is not online, send AEN */
2235 if (fcport->cfg.qos_enabled &&
2236 fcport->qos_attr.state != BFA_QOS_ONLINE)
2237 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
2211 break; 2238 break;
2212 2239
2213 case BFA_FCPORT_SM_LINKDOWN: 2240 case BFA_FCPORT_SM_LINKDOWN:
@@ -2234,6 +2261,7 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2234 wwn2str(pwwn_buf, fcport->pwwn); 2261 wwn2str(pwwn_buf, fcport->pwwn);
2235 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2262 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2236 "Base port disabled: WWN = %s\n", pwwn_buf); 2263 "Base port disabled: WWN = %s\n", pwwn_buf);
2264 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2237 break; 2265 break;
2238 2266
2239 case BFA_FCPORT_SM_STOP: 2267 case BFA_FCPORT_SM_STOP:
@@ -2279,8 +2307,10 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2279 wwn2str(pwwn_buf, fcport->pwwn); 2307 wwn2str(pwwn_buf, fcport->pwwn);
2280 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2308 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2281 "Base port offline: WWN = %s\n", pwwn_buf); 2309 "Base port offline: WWN = %s\n", pwwn_buf);
2310 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2282 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2311 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2283 "Base port disabled: WWN = %s\n", pwwn_buf); 2312 "Base port disabled: WWN = %s\n", pwwn_buf);
2313 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2284 break; 2314 break;
2285 2315
2286 case BFA_FCPORT_SM_LINKDOWN: 2316 case BFA_FCPORT_SM_LINKDOWN:
@@ -2290,26 +2320,32 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2290 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 2320 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2291 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown"); 2321 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2292 wwn2str(pwwn_buf, fcport->pwwn); 2322 wwn2str(pwwn_buf, fcport->pwwn);
2293 if (BFA_PORT_IS_DISABLED(fcport->bfa)) 2323 if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2294 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2324 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2295 "Base port offline: WWN = %s\n", pwwn_buf); 2325 "Base port offline: WWN = %s\n", pwwn_buf);
2296 else 2326 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2327 } else {
2297 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 2328 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2298 "Base port (WWN = %s) " 2329 "Base port (WWN = %s) "
2299 "lost fabric connectivity\n", pwwn_buf); 2330 "lost fabric connectivity\n", pwwn_buf);
2331 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2332 }
2300 break; 2333 break;
2301 2334
2302 case BFA_FCPORT_SM_STOP: 2335 case BFA_FCPORT_SM_STOP:
2303 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); 2336 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2304 bfa_fcport_reset_linkinfo(fcport); 2337 bfa_fcport_reset_linkinfo(fcport);
2305 wwn2str(pwwn_buf, fcport->pwwn); 2338 wwn2str(pwwn_buf, fcport->pwwn);
2306 if (BFA_PORT_IS_DISABLED(fcport->bfa)) 2339 if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2307 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2340 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2308 "Base port offline: WWN = %s\n", pwwn_buf); 2341 "Base port offline: WWN = %s\n", pwwn_buf);
2309 else 2342 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2343 } else {
2310 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 2344 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2311 "Base port (WWN = %s) " 2345 "Base port (WWN = %s) "
2312 "lost fabric connectivity\n", pwwn_buf); 2346 "lost fabric connectivity\n", pwwn_buf);
2347 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2348 }
2313 break; 2349 break;
2314 2350
2315 case BFA_FCPORT_SM_HWFAIL: 2351 case BFA_FCPORT_SM_HWFAIL:
@@ -2317,13 +2353,16 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2317 bfa_fcport_reset_linkinfo(fcport); 2353 bfa_fcport_reset_linkinfo(fcport);
2318 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); 2354 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2319 wwn2str(pwwn_buf, fcport->pwwn); 2355 wwn2str(pwwn_buf, fcport->pwwn);
2320 if (BFA_PORT_IS_DISABLED(fcport->bfa)) 2356 if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2321 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2357 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2322 "Base port offline: WWN = %s\n", pwwn_buf); 2358 "Base port offline: WWN = %s\n", pwwn_buf);
2323 else 2359 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2360 } else {
2324 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 2361 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2325 "Base port (WWN = %s) " 2362 "Base port (WWN = %s) "
2326 "lost fabric connectivity\n", pwwn_buf); 2363 "lost fabric connectivity\n", pwwn_buf);
2364 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2365 }
2327 break; 2366 break;
2328 2367
2329 default: 2368 default:
@@ -2454,6 +2493,7 @@ bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2454 wwn2str(pwwn_buf, fcport->pwwn); 2493 wwn2str(pwwn_buf, fcport->pwwn);
2455 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2494 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2456 "Base port enabled: WWN = %s\n", pwwn_buf); 2495 "Base port enabled: WWN = %s\n", pwwn_buf);
2496 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2457 break; 2497 break;
2458 2498
2459 case BFA_FCPORT_SM_STOP: 2499 case BFA_FCPORT_SM_STOP:
@@ -2508,6 +2548,7 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2508 wwn2str(pwwn_buf, fcport->pwwn); 2548 wwn2str(pwwn_buf, fcport->pwwn);
2509 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2549 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2510 "Base port enabled: WWN = %s\n", pwwn_buf); 2550 "Base port enabled: WWN = %s\n", pwwn_buf);
2551 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2511 break; 2552 break;
2512 2553
2513 case BFA_FCPORT_SM_DISABLE: 2554 case BFA_FCPORT_SM_DISABLE:
@@ -2874,6 +2915,9 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2874 2915
2875 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS; 2916 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
2876 2917
2918 INIT_LIST_HEAD(&fcport->stats_pending_q);
2919 INIT_LIST_HEAD(&fcport->statsclr_pending_q);
2920
2877 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport); 2921 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
2878} 2922}
2879 2923
@@ -3102,30 +3146,38 @@ bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3102static void 3146static void
3103__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete) 3147__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3104{ 3148{
3105 struct bfa_fcport_s *fcport = cbarg; 3149 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
3150 struct bfa_cb_pending_q_s *cb;
3151 struct list_head *qe, *qen;
3152 union bfa_fcport_stats_u *ret;
3106 3153
3107 if (complete) { 3154 if (complete) {
3108 if (fcport->stats_status == BFA_STATUS_OK) { 3155 struct timeval tv;
3109 struct timeval tv; 3156 if (fcport->stats_status == BFA_STATUS_OK)
3110 3157 do_gettimeofday(&tv);
3111 /* Swap FC QoS or FCoE stats */ 3158
3112 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { 3159 list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
3113 bfa_fcport_qos_stats_swap( 3160 bfa_q_deq(&fcport->stats_pending_q, &qe);
3114 &fcport->stats_ret->fcqos, 3161 cb = (struct bfa_cb_pending_q_s *)qe;
3115 &fcport->stats->fcqos); 3162 if (fcport->stats_status == BFA_STATUS_OK) {
3116 } else { 3163 ret = (union bfa_fcport_stats_u *)cb->data;
3117 bfa_fcport_fcoe_stats_swap( 3164 /* Swap FC QoS or FCoE stats */
3118 &fcport->stats_ret->fcoe, 3165 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
3119 &fcport->stats->fcoe); 3166 bfa_fcport_qos_stats_swap(&ret->fcqos,
3120 3167 &fcport->stats->fcqos);
3121 do_gettimeofday(&tv); 3168 else {
3122 fcport->stats_ret->fcoe.secs_reset = 3169 bfa_fcport_fcoe_stats_swap(&ret->fcoe,
3170 &fcport->stats->fcoe);
3171 ret->fcoe.secs_reset =
3123 tv.tv_sec - fcport->stats_reset_time; 3172 tv.tv_sec - fcport->stats_reset_time;
3173 }
3124 } 3174 }
3175 bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3176 fcport->stats_status);
3125 } 3177 }
3126 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); 3178 fcport->stats_status = BFA_STATUS_OK;
3127 } else { 3179 } else {
3128 fcport->stats_busy = BFA_FALSE; 3180 INIT_LIST_HEAD(&fcport->stats_pending_q);
3129 fcport->stats_status = BFA_STATUS_OK; 3181 fcport->stats_status = BFA_STATUS_OK;
3130 } 3182 }
3131} 3183}
@@ -3143,8 +3195,7 @@ bfa_fcport_stats_get_timeout(void *cbarg)
3143 } 3195 }
3144 3196
3145 fcport->stats_status = BFA_STATUS_ETIMER; 3197 fcport->stats_status = BFA_STATUS_ETIMER;
3146 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get, 3198 __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3147 fcport);
3148} 3199}
3149 3200
3150static void 3201static void
@@ -3174,7 +3225,9 @@ bfa_fcport_send_stats_get(void *cbarg)
3174static void 3225static void
3175__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete) 3226__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3176{ 3227{
3177 struct bfa_fcport_s *fcport = cbarg; 3228 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3229 struct bfa_cb_pending_q_s *cb;
3230 struct list_head *qe, *qen;
3178 3231
3179 if (complete) { 3232 if (complete) {
3180 struct timeval tv; 3233 struct timeval tv;
@@ -3184,10 +3237,15 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3184 */ 3237 */
3185 do_gettimeofday(&tv); 3238 do_gettimeofday(&tv);
3186 fcport->stats_reset_time = tv.tv_sec; 3239 fcport->stats_reset_time = tv.tv_sec;
3187 3240 list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
3188 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); 3241 bfa_q_deq(&fcport->statsclr_pending_q, &qe);
3242 cb = (struct bfa_cb_pending_q_s *)qe;
3243 bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3244 fcport->stats_status);
3245 }
3246 fcport->stats_status = BFA_STATUS_OK;
3189 } else { 3247 } else {
3190 fcport->stats_busy = BFA_FALSE; 3248 INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3191 fcport->stats_status = BFA_STATUS_OK; 3249 fcport->stats_status = BFA_STATUS_OK;
3192 } 3250 }
3193} 3251}
@@ -3205,8 +3263,7 @@ bfa_fcport_stats_clr_timeout(void *cbarg)
3205 } 3263 }
3206 3264
3207 fcport->stats_status = BFA_STATUS_ETIMER; 3265 fcport->stats_status = BFA_STATUS_ETIMER;
3208 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, 3266 __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3209 __bfa_cb_fcport_stats_clr, fcport);
3210} 3267}
3211 3268
3212static void 3269static void
@@ -3402,6 +3459,11 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3402 fcport->use_flash_cfg = BFA_FALSE; 3459 fcport->use_flash_cfg = BFA_FALSE;
3403 } 3460 }
3404 3461
3462 if (fcport->cfg.qos_enabled)
3463 fcport->qos_attr.state = BFA_QOS_OFFLINE;
3464 else
3465 fcport->qos_attr.state = BFA_QOS_DISABLED;
3466
3405 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); 3467 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3406 } 3468 }
3407 break; 3469 break;
@@ -3426,28 +3488,26 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3426 /* 3488 /*
3427 * check for timer pop before processing the rsp 3489 * check for timer pop before processing the rsp
3428 */ 3490 */
3429 if (fcport->stats_busy == BFA_FALSE || 3491 if (list_empty(&fcport->stats_pending_q) ||
3430 fcport->stats_status == BFA_STATUS_ETIMER) 3492 (fcport->stats_status == BFA_STATUS_ETIMER))
3431 break; 3493 break;
3432 3494
3433 bfa_timer_stop(&fcport->timer); 3495 bfa_timer_stop(&fcport->timer);
3434 fcport->stats_status = i2hmsg.pstatsget_rsp->status; 3496 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3435 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, 3497 __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3436 __bfa_cb_fcport_stats_get, fcport);
3437 break; 3498 break;
3438 3499
3439 case BFI_FCPORT_I2H_STATS_CLEAR_RSP: 3500 case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3440 /* 3501 /*
3441 * check for timer pop before processing the rsp 3502 * check for timer pop before processing the rsp
3442 */ 3503 */
3443 if (fcport->stats_busy == BFA_FALSE || 3504 if (list_empty(&fcport->statsclr_pending_q) ||
3444 fcport->stats_status == BFA_STATUS_ETIMER) 3505 (fcport->stats_status == BFA_STATUS_ETIMER))
3445 break; 3506 break;
3446 3507
3447 bfa_timer_stop(&fcport->timer); 3508 bfa_timer_stop(&fcport->timer);
3448 fcport->stats_status = BFA_STATUS_OK; 3509 fcport->stats_status = BFA_STATUS_OK;
3449 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, 3510 __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3450 __bfa_cb_fcport_stats_clr, fcport);
3451 break; 3511 break;
3452 3512
3453 case BFI_FCPORT_I2H_ENABLE_AEN: 3513 case BFI_FCPORT_I2H_ENABLE_AEN:
@@ -3779,25 +3839,25 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3779 * Fetch port statistics (FCQoS or FCoE). 3839 * Fetch port statistics (FCQoS or FCoE).
3780 */ 3840 */
3781bfa_status_t 3841bfa_status_t
3782bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, 3842bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
3783 bfa_cb_port_t cbfn, void *cbarg)
3784{ 3843{
3785 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3844 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3786 3845
3787 if (fcport->stats_busy) { 3846 if (bfa_ioc_is_disabled(&bfa->ioc))
3788 bfa_trc(bfa, fcport->stats_busy); 3847 return BFA_STATUS_IOC_DISABLED;
3789 return BFA_STATUS_DEVBUSY;
3790 }
3791 3848
3792 fcport->stats_busy = BFA_TRUE; 3849 if (!list_empty(&fcport->statsclr_pending_q))
3793 fcport->stats_ret = stats; 3850 return BFA_STATUS_DEVBUSY;
3794 fcport->stats_cbfn = cbfn;
3795 fcport->stats_cbarg = cbarg;
3796 3851
3797 bfa_fcport_send_stats_get(fcport); 3852 if (list_empty(&fcport->stats_pending_q)) {
3853 list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
3854 bfa_fcport_send_stats_get(fcport);
3855 bfa_timer_start(bfa, &fcport->timer,
3856 bfa_fcport_stats_get_timeout,
3857 fcport, BFA_FCPORT_STATS_TOV);
3858 } else
3859 list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
3798 3860
3799 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
3800 fcport, BFA_FCPORT_STATS_TOV);
3801 return BFA_STATUS_OK; 3861 return BFA_STATUS_OK;
3802} 3862}
3803 3863
@@ -3805,27 +3865,25 @@ bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3805 * Reset port statistics (FCQoS or FCoE). 3865 * Reset port statistics (FCQoS or FCoE).
3806 */ 3866 */
3807bfa_status_t 3867bfa_status_t
3808bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg) 3868bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
3809{ 3869{
3810 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3870 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3811 3871
3812 if (fcport->stats_busy) { 3872 if (!list_empty(&fcport->stats_pending_q))
3813 bfa_trc(bfa, fcport->stats_busy);
3814 return BFA_STATUS_DEVBUSY; 3873 return BFA_STATUS_DEVBUSY;
3815 }
3816
3817 fcport->stats_busy = BFA_TRUE;
3818 fcport->stats_cbfn = cbfn;
3819 fcport->stats_cbarg = cbarg;
3820 3874
3821 bfa_fcport_send_stats_clear(fcport); 3875 if (list_empty(&fcport->statsclr_pending_q)) {
3876 list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
3877 bfa_fcport_send_stats_clear(fcport);
3878 bfa_timer_start(bfa, &fcport->timer,
3879 bfa_fcport_stats_clr_timeout,
3880 fcport, BFA_FCPORT_STATS_TOV);
3881 } else
3882 list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
3822 3883
3823 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
3824 fcport, BFA_FCPORT_STATS_TOV);
3825 return BFA_STATUS_OK; 3884 return BFA_STATUS_OK;
3826} 3885}
3827 3886
3828
3829/* 3887/*
3830 * Fetch port attributes. 3888 * Fetch port attributes.
3831 */ 3889 */
@@ -4619,6 +4677,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4619 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle); 4677 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4620 rp->fw_handle = msg.create_rsp->fw_handle; 4678 rp->fw_handle = msg.create_rsp->fw_handle;
4621 rp->qos_attr = msg.create_rsp->qos_attr; 4679 rp->qos_attr = msg.create_rsp->qos_attr;
4680 bfa_rport_set_lunmask(bfa, rp);
4622 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK); 4681 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
4623 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); 4682 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4624 break; 4683 break;
@@ -4626,6 +4685,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4626 case BFI_RPORT_I2H_DELETE_RSP: 4685 case BFI_RPORT_I2H_DELETE_RSP:
4627 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle); 4686 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4628 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK); 4687 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
4688 bfa_rport_unset_lunmask(bfa, rp);
4629 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); 4689 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4630 break; 4690 break;
4631 4691
@@ -4706,6 +4766,37 @@ bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4706 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED); 4766 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4707} 4767}
4708 4768
4769/* Set Rport LUN Mask */
4770void
4771bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
4772{
4773 struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa);
4774 wwn_t lp_wwn, rp_wwn;
4775 u8 lp_tag = (u8)rp->rport_info.lp_tag;
4776
4777 rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
4778 lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
4779
4780 BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
4781 rp->lun_mask = BFA_TRUE;
4782 bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
4783}
4784
4785/* Unset Rport LUN mask */
4786void
4787bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
4788{
4789 struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa);
4790 wwn_t lp_wwn, rp_wwn;
4791
4792 rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
4793 lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
4794
4795 BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
4796 rp->lun_mask = BFA_FALSE;
4797 bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
4798 BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
4799}
4709 4800
4710/* 4801/*
4711 * SGPG related functions 4802 * SGPG related functions
@@ -5517,11 +5608,29 @@ bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
5517 return BFA_STATUS_PORT_NOT_DISABLED; 5608 return BFA_STATUS_PORT_NOT_DISABLED;
5518 } 5609 }
5519 5610
5520 /* Check if the speed is supported */ 5611 /*
5521 bfa_fcport_get_attr(bfa, &attr); 5612 * Check if input speed is supported by the port mode
5522 bfa_trc(fcdiag, attr.speed_supported); 5613 */
5523 if (speed > attr.speed_supported) 5614 if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5524 return BFA_STATUS_UNSUPP_SPEED; 5615 if (!(speed == BFA_PORT_SPEED_1GBPS ||
5616 speed == BFA_PORT_SPEED_2GBPS ||
5617 speed == BFA_PORT_SPEED_4GBPS ||
5618 speed == BFA_PORT_SPEED_8GBPS ||
5619 speed == BFA_PORT_SPEED_16GBPS ||
5620 speed == BFA_PORT_SPEED_AUTO)) {
5621 bfa_trc(fcdiag, speed);
5622 return BFA_STATUS_UNSUPP_SPEED;
5623 }
5624 bfa_fcport_get_attr(bfa, &attr);
5625 bfa_trc(fcdiag, attr.speed_supported);
5626 if (speed > attr.speed_supported)
5627 return BFA_STATUS_UNSUPP_SPEED;
5628 } else {
5629 if (speed != BFA_PORT_SPEED_10GBPS) {
5630 bfa_trc(fcdiag, speed);
5631 return BFA_STATUS_UNSUPP_SPEED;
5632 }
5633 }
5525 5634
5526 /* For Mezz card, port speed entered needs to be checked */ 5635 /* For Mezz card, port speed entered needs to be checked */
5527 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) { 5636 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index fbe513a671b5..95adb86d3769 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -297,6 +297,7 @@ struct bfa_rport_s {
297 void *rport_drv; /* fcs/driver rport object */ 297 void *rport_drv; /* fcs/driver rport object */
298 u16 fw_handle; /* firmware rport handle */ 298 u16 fw_handle; /* firmware rport handle */
299 u16 rport_tag; /* BFA rport tag */ 299 u16 rport_tag; /* BFA rport tag */
300 u8 lun_mask; /* LUN mask flag */
300 struct bfa_rport_info_s rport_info; /* rport info from fcs/driver */ 301 struct bfa_rport_info_s rport_info; /* rport info from fcs/driver */
301 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ 302 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
302 struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */ 303 struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */
@@ -404,6 +405,7 @@ struct bfa_lps_s {
404 u8 bb_scn; /* local BB_SCN */ 405 u8 bb_scn; /* local BB_SCN */
405 u8 lsrjt_rsn; /* LSRJT reason */ 406 u8 lsrjt_rsn; /* LSRJT reason */
406 u8 lsrjt_expl; /* LSRJT explanation */ 407 u8 lsrjt_expl; /* LSRJT explanation */
408 u8 lun_mask; /* LUN mask flag */
407 wwn_t pwwn; /* port wwn of lport */ 409 wwn_t pwwn; /* port wwn of lport */
408 wwn_t nwwn; /* node wwn of lport */ 410 wwn_t nwwn; /* node wwn of lport */
409 wwn_t pr_pwwn; /* port wwn of lport peer */ 411 wwn_t pr_pwwn; /* port wwn of lport peer */
@@ -441,7 +443,6 @@ void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
441 */ 443 */
442 444
443#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port)) 445#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port))
444typedef void (*bfa_cb_port_t) (void *cbarg, enum bfa_status status);
445 446
446/* 447/*
447 * Link notification data structure 448 * Link notification data structure
@@ -495,13 +496,11 @@ struct bfa_fcport_s {
495 u8 *stats_kva; 496 u8 *stats_kva;
496 u64 stats_pa; 497 u64 stats_pa;
497 union bfa_fcport_stats_u *stats; 498 union bfa_fcport_stats_u *stats;
498 union bfa_fcport_stats_u *stats_ret; /* driver stats location */
499 bfa_status_t stats_status; /* stats/statsclr status */ 499 bfa_status_t stats_status; /* stats/statsclr status */
500 bfa_boolean_t stats_busy; /* outstanding stats/statsclr */ 500 struct list_head stats_pending_q;
501 struct list_head statsclr_pending_q;
501 bfa_boolean_t stats_qfull; 502 bfa_boolean_t stats_qfull;
502 u32 stats_reset_time; /* stats reset time stamp */ 503 u32 stats_reset_time; /* stats reset time stamp */
503 bfa_cb_port_t stats_cbfn; /* driver callback function */
504 void *stats_cbarg; /* *!< user callback arg */
505 bfa_boolean_t diag_busy; /* diag busy status */ 504 bfa_boolean_t diag_busy; /* diag busy status */
506 bfa_boolean_t beacon; /* port beacon status */ 505 bfa_boolean_t beacon; /* port beacon status */
507 bfa_boolean_t link_e2e_beacon; /* link beacon status */ 506 bfa_boolean_t link_e2e_beacon; /* link beacon status */
@@ -552,10 +551,9 @@ void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
552 bfa_boolean_t link_e2e_beacon); 551 bfa_boolean_t link_e2e_beacon);
553bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa); 552bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa);
554bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa, 553bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
555 union bfa_fcport_stats_u *stats, 554 struct bfa_cb_pending_q_s *cb);
556 bfa_cb_port_t cbfn, void *cbarg); 555bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa,
557bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, 556 struct bfa_cb_pending_q_s *cb);
558 void *cbarg);
559bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa); 557bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
560bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa); 558bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa);
561bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa); 559bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa);
@@ -578,6 +576,19 @@ void bfa_cb_rport_qos_scn_prio(void *rport,
578 struct bfa_rport_qos_attr_s new_qos_attr); 576 struct bfa_rport_qos_attr_s new_qos_attr);
579 577
580/* 578/*
579 * Rport LUN masking related
580 */
581#define BFA_RPORT_TAG_INVALID 0xffff
582#define BFA_LP_TAG_INVALID 0xff
583void bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
584void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
585bfa_boolean_t bfa_rport_lunmask_active(struct bfa_rport_s *rp);
586wwn_t bfa_rport_get_pwwn(struct bfa_s *bfa, struct bfa_rport_s *rp);
587struct bfa_rport_s *bfa_rport_get_by_wwn(struct bfa_s *bfa, u16 vf_id,
588 wwn_t *lpwwn, wwn_t rpwwn);
589void *bfa_cb_get_rp_by_wwn(void *arg, u16 vf_id, wwn_t *lpwwn, wwn_t rpwwn);
590
591/*
581 * bfa fcxp API functions 592 * bfa fcxp API functions
582 */ 593 */
583struct bfa_fcxp_s *bfa_fcxp_alloc(void *bfad_fcxp, struct bfa_s *bfa, 594struct bfa_fcxp_s *bfa_fcxp_alloc(void *bfad_fcxp, struct bfa_s *bfa,
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index beb30a748ea5..66fb72531b34 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1348,7 +1348,7 @@ int
1348bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) 1348bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1349{ 1349{
1350 struct bfad_s *bfad; 1350 struct bfad_s *bfad;
1351 int error = -ENODEV, retval; 1351 int error = -ENODEV, retval, i;
1352 1352
1353 /* For single port cards - only claim function 0 */ 1353 /* For single port cards - only claim function 0 */
1354 if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) && 1354 if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) &&
@@ -1372,6 +1372,12 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1372 bfa_trc_init(bfad->trcmod); 1372 bfa_trc_init(bfad->trcmod);
1373 bfa_trc(bfad, bfad_inst); 1373 bfa_trc(bfad, bfad_inst);
1374 1374
1375 /* AEN INIT */
1376 INIT_LIST_HEAD(&bfad->free_aen_q);
1377 INIT_LIST_HEAD(&bfad->active_aen_q);
1378 for (i = 0; i < BFA_AEN_MAX_ENTRY; i++)
1379 list_add_tail(&bfad->aen_list[i].qe, &bfad->free_aen_q);
1380
1375 if (!(bfad_load_fwimg(pdev))) { 1381 if (!(bfad_load_fwimg(pdev))) {
1376 kfree(bfad->trcmod); 1382 kfree(bfad->trcmod);
1377 goto out_alloc_trace_failure; 1383 goto out_alloc_trace_failure;
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 89f863ed2334..06fc00caeb41 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -56,7 +56,7 @@ bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
56 spin_lock_irqsave(&bfad->bfad_lock, flags); 56 spin_lock_irqsave(&bfad->bfad_lock, flags);
57 if (bfad->disable_active) { 57 if (bfad->disable_active) {
58 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 58 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
59 return EBUSY; 59 return -EBUSY;
60 } 60 }
61 61
62 bfad->disable_active = BFA_TRUE; 62 bfad->disable_active = BFA_TRUE;
@@ -90,6 +90,7 @@ bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
90 bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum); 90 bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
91 iocmd->factorynwwn = pattr.factorynwwn; 91 iocmd->factorynwwn = pattr.factorynwwn;
92 iocmd->factorypwwn = pattr.factorypwwn; 92 iocmd->factorypwwn = pattr.factorypwwn;
93 iocmd->bfad_num = bfad->inst_no;
93 im_port = bfad->pport.im_port; 94 im_port = bfad->pport.im_port;
94 iocmd->host = im_port->shost->host_no; 95 iocmd->host = im_port->shost->host_no;
95 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 96 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -178,6 +179,38 @@ out:
178} 179}
179 180
180int 181int
182bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
183{
184 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
185 unsigned long flags;
186
187 if (v_cmd == IOCMD_IOC_RESET_STATS) {
188 bfa_ioc_clear_stats(&bfad->bfa);
189 iocmd->status = BFA_STATUS_OK;
190 } else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) {
191 spin_lock_irqsave(&bfad->bfad_lock, flags);
192 iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc);
193 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
194 }
195
196 return 0;
197}
198
199int
200bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
201{
202 struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd;
203
204 if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME)
205 strcpy(bfad->adapter_name, iocmd->name);
206 else if (v_cmd == IOCMD_IOC_SET_PORT_NAME)
207 strcpy(bfad->port_name, iocmd->name);
208
209 iocmd->status = BFA_STATUS_OK;
210 return 0;
211}
212
213int
181bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd) 214bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
182{ 215{
183 struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd; 216 struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
@@ -306,6 +339,81 @@ out:
306 return 0; 339 return 0;
307} 340}
308 341
342int
343bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd)
344{
345 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
346 struct bfad_hal_comp fcomp;
347 unsigned long flags;
348
349 init_completion(&fcomp.comp);
350 spin_lock_irqsave(&bfad->bfad_lock, flags);
351 iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port,
352 bfad_hcb_comp, &fcomp);
353 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
354 if (iocmd->status != BFA_STATUS_OK) {
355 bfa_trc(bfad, iocmd->status);
356 return 0;
357 }
358 wait_for_completion(&fcomp.comp);
359 iocmd->status = fcomp.status;
360 return 0;
361}
362
363int
364bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd)
365{
366 struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd;
367 unsigned long flags;
368
369 spin_lock_irqsave(&bfad->bfad_lock, flags);
370 if (v_cmd == IOCMD_PORT_CFG_TOPO)
371 cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param);
372 else if (v_cmd == IOCMD_PORT_CFG_SPEED)
373 cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param);
374 else if (v_cmd == IOCMD_PORT_CFG_ALPA)
375 cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param);
376 else if (v_cmd == IOCMD_PORT_CLR_ALPA)
377 cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa);
378 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
379
380 return 0;
381}
382
383int
384bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd)
385{
386 struct bfa_bsg_port_cfg_maxfrsize_s *iocmd =
387 (struct bfa_bsg_port_cfg_maxfrsize_s *)cmd;
388 unsigned long flags;
389
390 spin_lock_irqsave(&bfad->bfad_lock, flags);
391 iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize);
392 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
393
394 return 0;
395}
396
397int
398bfad_iocmd_port_cfg_bbsc(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
399{
400 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
401 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
402 unsigned long flags;
403
404 spin_lock_irqsave(&bfad->bfad_lock, flags);
405 if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
406 if (v_cmd == IOCMD_PORT_BBSC_ENABLE)
407 fcport->cfg.bb_scn_state = BFA_TRUE;
408 else if (v_cmd == IOCMD_PORT_BBSC_DISABLE)
409 fcport->cfg.bb_scn_state = BFA_FALSE;
410 }
411 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
412
413 iocmd->status = BFA_STATUS_OK;
414 return 0;
415}
416
309static int 417static int
310bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd) 418bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
311{ 419{
@@ -354,6 +462,40 @@ out:
354} 462}
355 463
356int 464int
465bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd)
466{
467 struct bfa_fcs_lport_s *fcs_port;
468 struct bfa_bsg_reset_stats_s *iocmd =
469 (struct bfa_bsg_reset_stats_s *)cmd;
470 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
471 struct list_head *qe, *qen;
472 struct bfa_itnim_s *itnim;
473 unsigned long flags;
474
475 spin_lock_irqsave(&bfad->bfad_lock, flags);
476 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
477 iocmd->vf_id, iocmd->vpwwn);
478 if (fcs_port == NULL) {
479 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
480 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
481 goto out;
482 }
483
484 bfa_fcs_lport_clear_stats(fcs_port);
485 /* clear IO stats from all active itnims */
486 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
487 itnim = (struct bfa_itnim_s *) qe;
488 if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag)
489 continue;
490 bfa_itnim_clear_stats(itnim);
491 }
492 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
493 iocmd->status = BFA_STATUS_OK;
494out:
495 return 0;
496}
497
498int
357bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd) 499bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
358{ 500{
359 struct bfa_fcs_lport_s *fcs_port; 501 struct bfa_fcs_lport_s *fcs_port;
@@ -389,7 +531,7 @@ bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
389 void *iocmd_bufptr; 531 void *iocmd_bufptr;
390 532
391 if (iocmd->nrports == 0) 533 if (iocmd->nrports == 0)
392 return EINVAL; 534 return -EINVAL;
393 535
394 if (bfad_chk_iocmd_sz(payload_len, 536 if (bfad_chk_iocmd_sz(payload_len,
395 sizeof(struct bfa_bsg_lport_get_rports_s), 537 sizeof(struct bfa_bsg_lport_get_rports_s),
@@ -539,6 +681,152 @@ out:
539 return 0; 681 return 0;
540} 682}
541 683
684int
685bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
686{
687 struct bfa_bsg_rport_reset_stats_s *iocmd =
688 (struct bfa_bsg_rport_reset_stats_s *)cmd;
689 struct bfa_fcs_lport_s *fcs_port;
690 struct bfa_fcs_rport_s *fcs_rport;
691 struct bfa_rport_s *rport;
692 unsigned long flags;
693
694 spin_lock_irqsave(&bfad->bfad_lock, flags);
695 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
696 iocmd->vf_id, iocmd->pwwn);
697 if (fcs_port == NULL) {
698 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
699 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
700 goto out;
701 }
702
703 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
704 if (fcs_rport == NULL) {
705 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
706 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
707 goto out;
708 }
709
710 memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s));
711 rport = bfa_fcs_rport_get_halrport(fcs_rport);
712 memset(&rport->stats, 0, sizeof(rport->stats));
713 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
714 iocmd->status = BFA_STATUS_OK;
715out:
716 return 0;
717}
718
719int
720bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
721{
722 struct bfa_bsg_rport_set_speed_s *iocmd =
723 (struct bfa_bsg_rport_set_speed_s *)cmd;
724 struct bfa_fcs_lport_s *fcs_port;
725 struct bfa_fcs_rport_s *fcs_rport;
726 unsigned long flags;
727
728 spin_lock_irqsave(&bfad->bfad_lock, flags);
729 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
730 iocmd->vf_id, iocmd->pwwn);
731 if (fcs_port == NULL) {
732 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
733 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
734 goto out;
735 }
736
737 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
738 if (fcs_rport == NULL) {
739 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
740 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
741 goto out;
742 }
743
744 fcs_rport->rpf.assigned_speed = iocmd->speed;
745 /* Set this speed in f/w only if the RPSC speed is not available */
746 if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
747 bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
748 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
749 iocmd->status = BFA_STATUS_OK;
750out:
751 return 0;
752}
753
754int
755bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd)
756{
757 struct bfa_fcs_vport_s *fcs_vport;
758 struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd;
759 unsigned long flags;
760
761 spin_lock_irqsave(&bfad->bfad_lock, flags);
762 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
763 iocmd->vf_id, iocmd->vpwwn);
764 if (fcs_vport == NULL) {
765 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
766 iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
767 goto out;
768 }
769
770 bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr);
771 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
772 iocmd->status = BFA_STATUS_OK;
773out:
774 return 0;
775}
776
777int
778bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd)
779{
780 struct bfa_fcs_vport_s *fcs_vport;
781 struct bfa_bsg_vport_stats_s *iocmd =
782 (struct bfa_bsg_vport_stats_s *)cmd;
783 unsigned long flags;
784
785 spin_lock_irqsave(&bfad->bfad_lock, flags);
786 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
787 iocmd->vf_id, iocmd->vpwwn);
788 if (fcs_vport == NULL) {
789 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
790 iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
791 goto out;
792 }
793
794 memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats,
795 sizeof(struct bfa_vport_stats_s));
796 memcpy((void *)&iocmd->vport_stats.port_stats,
797 (void *)&fcs_vport->lport.stats,
798 sizeof(struct bfa_lport_stats_s));
799 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
800 iocmd->status = BFA_STATUS_OK;
801out:
802 return 0;
803}
804
805int
806bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd)
807{
808 struct bfa_fcs_vport_s *fcs_vport;
809 struct bfa_bsg_reset_stats_s *iocmd =
810 (struct bfa_bsg_reset_stats_s *)cmd;
811 unsigned long flags;
812
813 spin_lock_irqsave(&bfad->bfad_lock, flags);
814 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
815 iocmd->vf_id, iocmd->vpwwn);
816 if (fcs_vport == NULL) {
817 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
818 iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
819 goto out;
820 }
821
822 memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
823 memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s));
824 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
825 iocmd->status = BFA_STATUS_OK;
826out:
827 return 0;
828}
829
542static int 830static int
543bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd, 831bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
544 unsigned int payload_len) 832 unsigned int payload_len)
@@ -582,6 +870,66 @@ out:
582} 870}
583 871
584int 872int
873bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
874{
875 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
876 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
877 unsigned long flags;
878
879 spin_lock_irqsave(&bfad->bfad_lock, flags);
880
881 if (cmd == IOCMD_RATELIM_ENABLE)
882 fcport->cfg.ratelimit = BFA_TRUE;
883 else if (cmd == IOCMD_RATELIM_DISABLE)
884 fcport->cfg.ratelimit = BFA_FALSE;
885
886 if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
887 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
888
889 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
890 iocmd->status = BFA_STATUS_OK;
891
892 return 0;
893}
894
895int
896bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
897{
898 struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd;
899 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
900 unsigned long flags;
901
902 spin_lock_irqsave(&bfad->bfad_lock, flags);
903
904 /* Auto and speeds greater than the supported speed, are invalid */
905 if ((iocmd->speed == BFA_PORT_SPEED_AUTO) ||
906 (iocmd->speed > fcport->speed_sup)) {
907 iocmd->status = BFA_STATUS_UNSUPP_SPEED;
908 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
909 return 0;
910 }
911
912 fcport->cfg.trl_def_speed = iocmd->speed;
913 iocmd->status = BFA_STATUS_OK;
914 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
915
916 return 0;
917}
918
919int
920bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd)
921{
922 struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd;
923 unsigned long flags;
924
925 spin_lock_irqsave(&bfad->bfad_lock, flags);
926 bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param);
927 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
928 iocmd->status = BFA_STATUS_OK;
929 return 0;
930}
931
932int
585bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd) 933bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
586{ 934{
587 struct bfa_bsg_fcpim_modstats_s *iocmd = 935 struct bfa_bsg_fcpim_modstats_s *iocmd =
@@ -604,6 +952,28 @@ bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
604} 952}
605 953
606int 954int
955bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd)
956{
957 struct bfa_bsg_fcpim_modstatsclr_s *iocmd =
958 (struct bfa_bsg_fcpim_modstatsclr_s *)cmd;
959 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
960 struct list_head *qe, *qen;
961 struct bfa_itnim_s *itnim;
962 unsigned long flags;
963
964 spin_lock_irqsave(&bfad->bfad_lock, flags);
965 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
966 itnim = (struct bfa_itnim_s *) qe;
967 bfa_itnim_clear_stats(itnim);
968 }
969 memset(&fcpim->del_itn_stats, 0,
970 sizeof(struct bfa_fcpim_del_itn_stats_s));
971 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
972 iocmd->status = BFA_STATUS_OK;
973 return 0;
974}
975
976int
607bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd) 977bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd)
608{ 978{
609 struct bfa_bsg_fcpim_del_itn_stats_s *iocmd = 979 struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
@@ -670,6 +1040,35 @@ bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
670} 1040}
671 1041
672static int 1042static int
1043bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd)
1044{
1045 struct bfa_bsg_rport_reset_stats_s *iocmd =
1046 (struct bfa_bsg_rport_reset_stats_s *)cmd;
1047 struct bfa_fcs_lport_s *fcs_port;
1048 struct bfa_fcs_itnim_s *itnim;
1049 unsigned long flags;
1050
1051 spin_lock_irqsave(&bfad->bfad_lock, flags);
1052 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1053 iocmd->vf_id, iocmd->pwwn);
1054 if (!fcs_port)
1055 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1056 else {
1057 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1058 if (itnim == NULL)
1059 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
1060 else {
1061 iocmd->status = BFA_STATUS_OK;
1062 bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn);
1063 bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim));
1064 }
1065 }
1066 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1067
1068 return 0;
1069}
1070
1071static int
673bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd) 1072bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
674{ 1073{
675 struct bfa_bsg_itnim_itnstats_s *iocmd = 1074 struct bfa_bsg_itnim_itnstats_s *iocmd =
@@ -1511,11 +1910,545 @@ out:
1511 return 0; 1910 return 0;
1512} 1911}
1513 1912
1913#define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */
1914int
1915bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
1916 unsigned int payload_len)
1917{
1918 struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
1919 void *iocmd_bufptr;
1920 unsigned long flags;
1921
1922 if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
1923 BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) {
1924 iocmd->status = BFA_STATUS_VERSION_FAIL;
1925 return 0;
1926 }
1927
1928 if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ ||
1929 !IS_ALIGNED(iocmd->bufsz, sizeof(u16)) ||
1930 !IS_ALIGNED(iocmd->offset, sizeof(u32))) {
1931 bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ);
1932 iocmd->status = BFA_STATUS_EINVAL;
1933 goto out;
1934 }
1935
1936 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
1937 spin_lock_irqsave(&bfad->bfad_lock, flags);
1938 iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
1939 (u32 *)&iocmd->offset, &iocmd->bufsz);
1940 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1941out:
1942 return 0;
1943}
1944
1945int
1946bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
1947{
1948 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1949 unsigned long flags;
1950
1951 if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) {
1952 spin_lock_irqsave(&bfad->bfad_lock, flags);
1953 bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE;
1954 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1955 } else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR)
1956 bfad->plog_buf.head = bfad->plog_buf.tail = 0;
1957 else if (v_cmd == IOCMD_DEBUG_START_DTRC)
1958 bfa_trc_init(bfad->trcmod);
1959 else if (v_cmd == IOCMD_DEBUG_STOP_DTRC)
1960 bfa_trc_stop(bfad->trcmod);
1961
1962 iocmd->status = BFA_STATUS_OK;
1963 return 0;
1964}
1965
1966int
1967bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd)
1968{
1969 struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd;
1970
1971 if (iocmd->ctl == BFA_TRUE)
1972 bfad->plog_buf.plog_enabled = 1;
1973 else
1974 bfad->plog_buf.plog_enabled = 0;
1975
1976 iocmd->status = BFA_STATUS_OK;
1977 return 0;
1978}
1979
1980int
1981bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
1982{
1983 struct bfa_bsg_fcpim_profile_s *iocmd =
1984 (struct bfa_bsg_fcpim_profile_s *)cmd;
1985 struct timeval tv;
1986 unsigned long flags;
1987
1988 do_gettimeofday(&tv);
1989 spin_lock_irqsave(&bfad->bfad_lock, flags);
1990 if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
1991 iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec);
1992 else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
1993 iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
1994 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1995
1996 return 0;
1997}
1998
1999static int
2000bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd)
2001{
2002 struct bfa_bsg_itnim_ioprofile_s *iocmd =
2003 (struct bfa_bsg_itnim_ioprofile_s *)cmd;
2004 struct bfa_fcs_lport_s *fcs_port;
2005 struct bfa_fcs_itnim_s *itnim;
2006 unsigned long flags;
2007
2008 spin_lock_irqsave(&bfad->bfad_lock, flags);
2009 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
2010 iocmd->vf_id, iocmd->lpwwn);
2011 if (!fcs_port)
2012 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
2013 else {
2014 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
2015 if (itnim == NULL)
2016 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
2017 else
2018 iocmd->status = bfa_itnim_get_ioprofile(
2019 bfa_fcs_itnim_get_halitn(itnim),
2020 &iocmd->ioprofile);
2021 }
2022 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2023 return 0;
2024}
2025
2026int
2027bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
2028{
2029 struct bfa_bsg_fcport_stats_s *iocmd =
2030 (struct bfa_bsg_fcport_stats_s *)cmd;
2031 struct bfad_hal_comp fcomp;
2032 unsigned long flags;
2033 struct bfa_cb_pending_q_s cb_qe;
2034
2035 init_completion(&fcomp.comp);
2036 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2037 &fcomp, &iocmd->stats);
2038 spin_lock_irqsave(&bfad->bfad_lock, flags);
2039 iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
2040 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2041 if (iocmd->status != BFA_STATUS_OK) {
2042 bfa_trc(bfad, iocmd->status);
2043 goto out;
2044 }
2045 wait_for_completion(&fcomp.comp);
2046 iocmd->status = fcomp.status;
2047out:
2048 return 0;
2049}
2050
2051int
2052bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
2053{
2054 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2055 struct bfad_hal_comp fcomp;
2056 unsigned long flags;
2057 struct bfa_cb_pending_q_s cb_qe;
2058
2059 init_completion(&fcomp.comp);
2060 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
2061
2062 spin_lock_irqsave(&bfad->bfad_lock, flags);
2063 iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
2064 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2065 if (iocmd->status != BFA_STATUS_OK) {
2066 bfa_trc(bfad, iocmd->status);
2067 goto out;
2068 }
2069 wait_for_completion(&fcomp.comp);
2070 iocmd->status = fcomp.status;
2071out:
2072 return 0;
2073}
2074
2075int
2076bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd)
2077{
2078 struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
2079 struct bfad_hal_comp fcomp;
2080 unsigned long flags;
2081
2082 init_completion(&fcomp.comp);
2083 spin_lock_irqsave(&bfad->bfad_lock, flags);
2084 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
2085 BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn),
2086 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
2087 bfad_hcb_comp, &fcomp);
2088 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2089 if (iocmd->status != BFA_STATUS_OK)
2090 goto out;
2091 wait_for_completion(&fcomp.comp);
2092 iocmd->status = fcomp.status;
2093out:
2094 return 0;
2095}
2096
2097int
2098bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd)
2099{
2100 struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
2101 struct bfad_hal_comp fcomp;
2102 unsigned long flags;
2103
2104 init_completion(&fcomp.comp);
2105 spin_lock_irqsave(&bfad->bfad_lock, flags);
2106 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
2107 BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn),
2108 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
2109 bfad_hcb_comp, &fcomp);
2110 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2111 if (iocmd->status != BFA_STATUS_OK)
2112 goto out;
2113 wait_for_completion(&fcomp.comp);
2114 iocmd->status = fcomp.status;
2115out:
2116 return 0;
2117}
2118
2119int
2120bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd)
2121{
2122 struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd;
2123 struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp;
2124 struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg;
2125 unsigned long flags;
2126
2127 spin_lock_irqsave(&bfad->bfad_lock, flags);
2128 pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
2129 pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
2130 pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
2131 memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
2132 iocmd->status = BFA_STATUS_OK;
2133 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2134
2135 return 0;
2136}
2137
2138int
2139bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd)
2140{
2141 struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
2142 struct bfad_hal_comp fcomp;
2143 unsigned long flags;
2144
2145 init_completion(&fcomp.comp);
2146 spin_lock_irqsave(&bfad->bfad_lock, flags);
2147 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
2148 BFA_FLASH_PART_PXECFG,
2149 bfad->bfa.ioc.port_id, &iocmd->cfg,
2150 sizeof(struct bfa_ethboot_cfg_s), 0,
2151 bfad_hcb_comp, &fcomp);
2152 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2153 if (iocmd->status != BFA_STATUS_OK)
2154 goto out;
2155 wait_for_completion(&fcomp.comp);
2156 iocmd->status = fcomp.status;
2157out:
2158 return 0;
2159}
2160
2161int
2162bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd)
2163{
2164 struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
2165 struct bfad_hal_comp fcomp;
2166 unsigned long flags;
2167
2168 init_completion(&fcomp.comp);
2169 spin_lock_irqsave(&bfad->bfad_lock, flags);
2170 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
2171 BFA_FLASH_PART_PXECFG,
2172 bfad->bfa.ioc.port_id, &iocmd->cfg,
2173 sizeof(struct bfa_ethboot_cfg_s), 0,
2174 bfad_hcb_comp, &fcomp);
2175 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2176 if (iocmd->status != BFA_STATUS_OK)
2177 goto out;
2178 wait_for_completion(&fcomp.comp);
2179 iocmd->status = fcomp.status;
2180out:
2181 return 0;
2182}
2183
2184int
2185bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2186{
2187 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2188 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2189 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2190 unsigned long flags;
2191
2192 spin_lock_irqsave(&bfad->bfad_lock, flags);
2193
2194 if (v_cmd == IOCMD_TRUNK_ENABLE) {
2195 trunk->attr.state = BFA_TRUNK_OFFLINE;
2196 bfa_fcport_disable(&bfad->bfa);
2197 fcport->cfg.trunked = BFA_TRUE;
2198 } else if (v_cmd == IOCMD_TRUNK_DISABLE) {
2199 trunk->attr.state = BFA_TRUNK_DISABLED;
2200 bfa_fcport_disable(&bfad->bfa);
2201 fcport->cfg.trunked = BFA_FALSE;
2202 }
2203
2204 if (!bfa_fcport_is_disabled(&bfad->bfa))
2205 bfa_fcport_enable(&bfad->bfa);
2206
2207 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2208
2209 iocmd->status = BFA_STATUS_OK;
2210 return 0;
2211}
2212
2213int
2214bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
2215{
2216 struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd;
2217 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2218 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2219 unsigned long flags;
2220
2221 spin_lock_irqsave(&bfad->bfad_lock, flags);
2222 memcpy((void *)&iocmd->attr, (void *)&trunk->attr,
2223 sizeof(struct bfa_trunk_attr_s));
2224 iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa);
2225 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2226
2227 iocmd->status = BFA_STATUS_OK;
2228 return 0;
2229}
2230
2231int
2232bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2233{
2234 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2235 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2236 unsigned long flags;
2237
2238 spin_lock_irqsave(&bfad->bfad_lock, flags);
2239 if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
2240 if (v_cmd == IOCMD_QOS_ENABLE)
2241 fcport->cfg.qos_enabled = BFA_TRUE;
2242 else if (v_cmd == IOCMD_QOS_DISABLE)
2243 fcport->cfg.qos_enabled = BFA_FALSE;
2244 }
2245 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2246
2247 iocmd->status = BFA_STATUS_OK;
2248 return 0;
2249}
2250
2251int
2252bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
2253{
2254 struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd;
2255 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2256 unsigned long flags;
2257
2258 spin_lock_irqsave(&bfad->bfad_lock, flags);
2259 iocmd->attr.state = fcport->qos_attr.state;
2260 iocmd->attr.total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr);
2261 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2262
2263 iocmd->status = BFA_STATUS_OK;
2264 return 0;
2265}
2266
2267int
2268bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd)
2269{
2270 struct bfa_bsg_qos_vc_attr_s *iocmd =
2271 (struct bfa_bsg_qos_vc_attr_s *)cmd;
2272 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2273 struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
2274 unsigned long flags;
2275 u32 i = 0;
2276
2277 spin_lock_irqsave(&bfad->bfad_lock, flags);
2278 iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
2279 iocmd->attr.shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit);
2280 iocmd->attr.elp_opmode_flags =
2281 be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
2282
2283 /* Individual VC info */
2284 while (i < iocmd->attr.total_vc_count) {
2285 iocmd->attr.vc_info[i].vc_credit =
2286 bfa_vc_attr->vc_info[i].vc_credit;
2287 iocmd->attr.vc_info[i].borrow_credit =
2288 bfa_vc_attr->vc_info[i].borrow_credit;
2289 iocmd->attr.vc_info[i].priority =
2290 bfa_vc_attr->vc_info[i].priority;
2291 i++;
2292 }
2293 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2294
2295 iocmd->status = BFA_STATUS_OK;
2296 return 0;
2297}
2298
2299int
2300bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
2301{
2302 struct bfa_bsg_fcport_stats_s *iocmd =
2303 (struct bfa_bsg_fcport_stats_s *)cmd;
2304 struct bfad_hal_comp fcomp;
2305 unsigned long flags;
2306 struct bfa_cb_pending_q_s cb_qe;
2307
2308 init_completion(&fcomp.comp);
2309 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2310 &fcomp, &iocmd->stats);
2311
2312 spin_lock_irqsave(&bfad->bfad_lock, flags);
2313 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
2314 iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
2315 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2316 if (iocmd->status != BFA_STATUS_OK) {
2317 bfa_trc(bfad, iocmd->status);
2318 goto out;
2319 }
2320 wait_for_completion(&fcomp.comp);
2321 iocmd->status = fcomp.status;
2322out:
2323 return 0;
2324}
2325
2326int
2327bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
2328{
2329 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2330 struct bfad_hal_comp fcomp;
2331 unsigned long flags;
2332 struct bfa_cb_pending_q_s cb_qe;
2333
2334 init_completion(&fcomp.comp);
2335 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2336 &fcomp, NULL);
2337
2338 spin_lock_irqsave(&bfad->bfad_lock, flags);
2339 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
2340 iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
2341 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2342 if (iocmd->status != BFA_STATUS_OK) {
2343 bfa_trc(bfad, iocmd->status);
2344 goto out;
2345 }
2346 wait_for_completion(&fcomp.comp);
2347 iocmd->status = fcomp.status;
2348out:
2349 return 0;
2350}
2351
2352int
2353bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd)
2354{
2355 struct bfa_bsg_vf_stats_s *iocmd =
2356 (struct bfa_bsg_vf_stats_s *)cmd;
2357 struct bfa_fcs_fabric_s *fcs_vf;
2358 unsigned long flags;
2359
2360 spin_lock_irqsave(&bfad->bfad_lock, flags);
2361 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
2362 if (fcs_vf == NULL) {
2363 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2364 iocmd->status = BFA_STATUS_UNKNOWN_VFID;
2365 goto out;
2366 }
2367 memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats,
2368 sizeof(struct bfa_vf_stats_s));
2369 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2370 iocmd->status = BFA_STATUS_OK;
2371out:
2372 return 0;
2373}
2374
2375int
2376bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
2377{
2378 struct bfa_bsg_vf_reset_stats_s *iocmd =
2379 (struct bfa_bsg_vf_reset_stats_s *)cmd;
2380 struct bfa_fcs_fabric_s *fcs_vf;
2381 unsigned long flags;
2382
2383 spin_lock_irqsave(&bfad->bfad_lock, flags);
2384 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
2385 if (fcs_vf == NULL) {
2386 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2387 iocmd->status = BFA_STATUS_UNKNOWN_VFID;
2388 goto out;
2389 }
2390 memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s));
2391 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2392 iocmd->status = BFA_STATUS_OK;
2393out:
2394 return 0;
2395}
2396
2397int
2398bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
2399{
2400 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
2401 unsigned long flags;
2402
2403 spin_lock_irqsave(&bfad->bfad_lock, flags);
2404 if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE)
2405 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
2406 else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE)
2407 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
2408 else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
2409 iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
2410 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2411 return 0;
2412}
2413
2414int
2415bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd)
2416{
2417 struct bfa_bsg_fcpim_lunmask_query_s *iocmd =
2418 (struct bfa_bsg_fcpim_lunmask_query_s *)cmd;
2419 struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask;
2420 unsigned long flags;
2421
2422 spin_lock_irqsave(&bfad->bfad_lock, flags);
2423 iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask);
2424 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2425 return 0;
2426}
2427
2428int
2429bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2430{
2431 struct bfa_bsg_fcpim_lunmask_s *iocmd =
2432 (struct bfa_bsg_fcpim_lunmask_s *)cmd;
2433 unsigned long flags;
2434
2435 spin_lock_irqsave(&bfad->bfad_lock, flags);
2436 if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD)
2437 iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id,
2438 &iocmd->pwwn, iocmd->rpwwn, iocmd->lun);
2439 else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE)
2440 iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa,
2441 iocmd->vf_id, &iocmd->pwwn,
2442 iocmd->rpwwn, iocmd->lun);
2443 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2444 return 0;
2445}
2446
1514static int 2447static int
1515bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, 2448bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
1516 unsigned int payload_len) 2449 unsigned int payload_len)
1517{ 2450{
1518 int rc = EINVAL; 2451 int rc = -EINVAL;
1519 2452
1520 switch (cmd) { 2453 switch (cmd) {
1521 case IOCMD_IOC_ENABLE: 2454 case IOCMD_IOC_ENABLE:
@@ -1536,6 +2469,14 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
1536 case IOCMD_IOC_GET_FWSTATS: 2469 case IOCMD_IOC_GET_FWSTATS:
1537 rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len); 2470 rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len);
1538 break; 2471 break;
2472 case IOCMD_IOC_RESET_STATS:
2473 case IOCMD_IOC_RESET_FWSTATS:
2474 rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd);
2475 break;
2476 case IOCMD_IOC_SET_ADAPTER_NAME:
2477 case IOCMD_IOC_SET_PORT_NAME:
2478 rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd);
2479 break;
1539 case IOCMD_IOCFC_GET_ATTR: 2480 case IOCMD_IOCFC_GET_ATTR:
1540 rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd); 2481 rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd);
1541 break; 2482 break;
@@ -1554,12 +2495,31 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
1554 case IOCMD_PORT_GET_STATS: 2495 case IOCMD_PORT_GET_STATS:
1555 rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len); 2496 rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len);
1556 break; 2497 break;
2498 case IOCMD_PORT_RESET_STATS:
2499 rc = bfad_iocmd_port_reset_stats(bfad, iocmd);
2500 break;
2501 case IOCMD_PORT_CFG_TOPO:
2502 case IOCMD_PORT_CFG_SPEED:
2503 case IOCMD_PORT_CFG_ALPA:
2504 case IOCMD_PORT_CLR_ALPA:
2505 rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd);
2506 break;
2507 case IOCMD_PORT_CFG_MAXFRSZ:
2508 rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd);
2509 break;
2510 case IOCMD_PORT_BBSC_ENABLE:
2511 case IOCMD_PORT_BBSC_DISABLE:
2512 rc = bfad_iocmd_port_cfg_bbsc(bfad, iocmd, cmd);
2513 break;
1557 case IOCMD_LPORT_GET_ATTR: 2514 case IOCMD_LPORT_GET_ATTR:
1558 rc = bfad_iocmd_lport_get_attr(bfad, iocmd); 2515 rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
1559 break; 2516 break;
1560 case IOCMD_LPORT_GET_STATS: 2517 case IOCMD_LPORT_GET_STATS:
1561 rc = bfad_iocmd_lport_get_stats(bfad, iocmd); 2518 rc = bfad_iocmd_lport_get_stats(bfad, iocmd);
1562 break; 2519 break;
2520 case IOCMD_LPORT_RESET_STATS:
2521 rc = bfad_iocmd_lport_reset_stats(bfad, iocmd);
2522 break;
1563 case IOCMD_LPORT_GET_IOSTATS: 2523 case IOCMD_LPORT_GET_IOSTATS:
1564 rc = bfad_iocmd_lport_get_iostats(bfad, iocmd); 2524 rc = bfad_iocmd_lport_get_iostats(bfad, iocmd);
1565 break; 2525 break;
@@ -1575,12 +2535,40 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
1575 case IOCMD_RPORT_GET_STATS: 2535 case IOCMD_RPORT_GET_STATS:
1576 rc = bfad_iocmd_rport_get_stats(bfad, iocmd); 2536 rc = bfad_iocmd_rport_get_stats(bfad, iocmd);
1577 break; 2537 break;
2538 case IOCMD_RPORT_RESET_STATS:
2539 rc = bfad_iocmd_rport_clr_stats(bfad, iocmd);
2540 break;
2541 case IOCMD_RPORT_SET_SPEED:
2542 rc = bfad_iocmd_rport_set_speed(bfad, iocmd);
2543 break;
2544 case IOCMD_VPORT_GET_ATTR:
2545 rc = bfad_iocmd_vport_get_attr(bfad, iocmd);
2546 break;
2547 case IOCMD_VPORT_GET_STATS:
2548 rc = bfad_iocmd_vport_get_stats(bfad, iocmd);
2549 break;
2550 case IOCMD_VPORT_RESET_STATS:
2551 rc = bfad_iocmd_vport_clr_stats(bfad, iocmd);
2552 break;
1578 case IOCMD_FABRIC_GET_LPORTS: 2553 case IOCMD_FABRIC_GET_LPORTS:
1579 rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len); 2554 rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
1580 break; 2555 break;
2556 case IOCMD_RATELIM_ENABLE:
2557 case IOCMD_RATELIM_DISABLE:
2558 rc = bfad_iocmd_ratelim(bfad, cmd, iocmd);
2559 break;
2560 case IOCMD_RATELIM_DEF_SPEED:
2561 rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd);
2562 break;
2563 case IOCMD_FCPIM_FAILOVER:
2564 rc = bfad_iocmd_cfg_fcpim(bfad, iocmd);
2565 break;
1581 case IOCMD_FCPIM_MODSTATS: 2566 case IOCMD_FCPIM_MODSTATS:
1582 rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd); 2567 rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd);
1583 break; 2568 break;
2569 case IOCMD_FCPIM_MODSTATSCLR:
2570 rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd);
2571 break;
1584 case IOCMD_FCPIM_DEL_ITN_STATS: 2572 case IOCMD_FCPIM_DEL_ITN_STATS:
1585 rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd); 2573 rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd);
1586 break; 2574 break;
@@ -1590,6 +2578,9 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
1590 case IOCMD_ITNIM_GET_IOSTATS: 2578 case IOCMD_ITNIM_GET_IOSTATS:
1591 rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd); 2579 rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd);
1592 break; 2580 break;
2581 case IOCMD_ITNIM_RESET_STATS:
2582 rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd);
2583 break;
1593 case IOCMD_ITNIM_GET_ITNSTATS: 2584 case IOCMD_ITNIM_GET_ITNSTATS:
1594 rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd); 2585 rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd);
1595 break; 2586 break;
@@ -1702,11 +2693,92 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
1702 case IOCMD_DEBUG_PORTLOG: 2693 case IOCMD_DEBUG_PORTLOG:
1703 rc = bfad_iocmd_porglog_get(bfad, iocmd); 2694 rc = bfad_iocmd_porglog_get(bfad, iocmd);
1704 break; 2695 break;
2696 case IOCMD_DEBUG_FW_CORE:
2697 rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len);
2698 break;
2699 case IOCMD_DEBUG_FW_STATE_CLR:
2700 case IOCMD_DEBUG_PORTLOG_CLR:
2701 case IOCMD_DEBUG_START_DTRC:
2702 case IOCMD_DEBUG_STOP_DTRC:
2703 rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd);
2704 break;
2705 case IOCMD_DEBUG_PORTLOG_CTL:
2706 rc = bfad_iocmd_porglog_ctl(bfad, iocmd);
2707 break;
2708 case IOCMD_FCPIM_PROFILE_ON:
2709 case IOCMD_FCPIM_PROFILE_OFF:
2710 rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd);
2711 break;
2712 case IOCMD_ITNIM_GET_IOPROFILE:
2713 rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd);
2714 break;
2715 case IOCMD_FCPORT_GET_STATS:
2716 rc = bfad_iocmd_fcport_get_stats(bfad, iocmd);
2717 break;
2718 case IOCMD_FCPORT_RESET_STATS:
2719 rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd);
2720 break;
2721 case IOCMD_BOOT_CFG:
2722 rc = bfad_iocmd_boot_cfg(bfad, iocmd);
2723 break;
2724 case IOCMD_BOOT_QUERY:
2725 rc = bfad_iocmd_boot_query(bfad, iocmd);
2726 break;
2727 case IOCMD_PREBOOT_QUERY:
2728 rc = bfad_iocmd_preboot_query(bfad, iocmd);
2729 break;
2730 case IOCMD_ETHBOOT_CFG:
2731 rc = bfad_iocmd_ethboot_cfg(bfad, iocmd);
2732 break;
2733 case IOCMD_ETHBOOT_QUERY:
2734 rc = bfad_iocmd_ethboot_query(bfad, iocmd);
2735 break;
2736 case IOCMD_TRUNK_ENABLE:
2737 case IOCMD_TRUNK_DISABLE:
2738 rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd);
2739 break;
2740 case IOCMD_TRUNK_GET_ATTR:
2741 rc = bfad_iocmd_trunk_get_attr(bfad, iocmd);
2742 break;
2743 case IOCMD_QOS_ENABLE:
2744 case IOCMD_QOS_DISABLE:
2745 rc = bfad_iocmd_qos(bfad, iocmd, cmd);
2746 break;
2747 case IOCMD_QOS_GET_ATTR:
2748 rc = bfad_iocmd_qos_get_attr(bfad, iocmd);
2749 break;
2750 case IOCMD_QOS_GET_VC_ATTR:
2751 rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd);
2752 break;
2753 case IOCMD_QOS_GET_STATS:
2754 rc = bfad_iocmd_qos_get_stats(bfad, iocmd);
2755 break;
2756 case IOCMD_QOS_RESET_STATS:
2757 rc = bfad_iocmd_qos_reset_stats(bfad, iocmd);
2758 break;
2759 case IOCMD_VF_GET_STATS:
2760 rc = bfad_iocmd_vf_get_stats(bfad, iocmd);
2761 break;
2762 case IOCMD_VF_RESET_STATS:
2763 rc = bfad_iocmd_vf_clr_stats(bfad, iocmd);
2764 break;
2765 case IOCMD_FCPIM_LUNMASK_ENABLE:
2766 case IOCMD_FCPIM_LUNMASK_DISABLE:
2767 case IOCMD_FCPIM_LUNMASK_CLEAR:
2768 rc = bfad_iocmd_lunmask(bfad, iocmd, cmd);
2769 break;
2770 case IOCMD_FCPIM_LUNMASK_QUERY:
2771 rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd);
2772 break;
2773 case IOCMD_FCPIM_LUNMASK_ADD:
2774 case IOCMD_FCPIM_LUNMASK_DELETE:
2775 rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd);
2776 break;
1705 default: 2777 default:
1706 rc = EINVAL; 2778 rc = -EINVAL;
1707 break; 2779 break;
1708 } 2780 }
1709 return -rc; 2781 return rc;
1710} 2782}
1711 2783
1712static int 2784static int
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h
index 99b0e8a70c89..e859adb9aa9e 100644
--- a/drivers/scsi/bfa/bfad_bsg.h
+++ b/drivers/scsi/bfa/bfad_bsg.h
@@ -30,24 +30,48 @@ enum {
30 IOCMD_IOC_GET_INFO, 30 IOCMD_IOC_GET_INFO,
31 IOCMD_IOC_GET_STATS, 31 IOCMD_IOC_GET_STATS,
32 IOCMD_IOC_GET_FWSTATS, 32 IOCMD_IOC_GET_FWSTATS,
33 IOCMD_IOC_RESET_STATS,
34 IOCMD_IOC_RESET_FWSTATS,
35 IOCMD_IOC_SET_ADAPTER_NAME,
36 IOCMD_IOC_SET_PORT_NAME,
33 IOCMD_IOCFC_GET_ATTR, 37 IOCMD_IOCFC_GET_ATTR,
34 IOCMD_IOCFC_SET_INTR, 38 IOCMD_IOCFC_SET_INTR,
35 IOCMD_PORT_ENABLE, 39 IOCMD_PORT_ENABLE,
36 IOCMD_PORT_DISABLE, 40 IOCMD_PORT_DISABLE,
37 IOCMD_PORT_GET_ATTR, 41 IOCMD_PORT_GET_ATTR,
38 IOCMD_PORT_GET_STATS, 42 IOCMD_PORT_GET_STATS,
43 IOCMD_PORT_RESET_STATS,
44 IOCMD_PORT_CFG_TOPO,
45 IOCMD_PORT_CFG_SPEED,
46 IOCMD_PORT_CFG_ALPA,
47 IOCMD_PORT_CFG_MAXFRSZ,
48 IOCMD_PORT_CLR_ALPA,
49 IOCMD_PORT_BBSC_ENABLE,
50 IOCMD_PORT_BBSC_DISABLE,
39 IOCMD_LPORT_GET_ATTR, 51 IOCMD_LPORT_GET_ATTR,
40 IOCMD_LPORT_GET_RPORTS, 52 IOCMD_LPORT_GET_RPORTS,
41 IOCMD_LPORT_GET_STATS, 53 IOCMD_LPORT_GET_STATS,
54 IOCMD_LPORT_RESET_STATS,
42 IOCMD_LPORT_GET_IOSTATS, 55 IOCMD_LPORT_GET_IOSTATS,
43 IOCMD_RPORT_GET_ATTR, 56 IOCMD_RPORT_GET_ATTR,
44 IOCMD_RPORT_GET_ADDR, 57 IOCMD_RPORT_GET_ADDR,
45 IOCMD_RPORT_GET_STATS, 58 IOCMD_RPORT_GET_STATS,
59 IOCMD_RPORT_RESET_STATS,
60 IOCMD_RPORT_SET_SPEED,
61 IOCMD_VPORT_GET_ATTR,
62 IOCMD_VPORT_GET_STATS,
63 IOCMD_VPORT_RESET_STATS,
46 IOCMD_FABRIC_GET_LPORTS, 64 IOCMD_FABRIC_GET_LPORTS,
65 IOCMD_RATELIM_ENABLE,
66 IOCMD_RATELIM_DISABLE,
67 IOCMD_RATELIM_DEF_SPEED,
68 IOCMD_FCPIM_FAILOVER,
47 IOCMD_FCPIM_MODSTATS, 69 IOCMD_FCPIM_MODSTATS,
70 IOCMD_FCPIM_MODSTATSCLR,
48 IOCMD_FCPIM_DEL_ITN_STATS, 71 IOCMD_FCPIM_DEL_ITN_STATS,
49 IOCMD_ITNIM_GET_ATTR, 72 IOCMD_ITNIM_GET_ATTR,
50 IOCMD_ITNIM_GET_IOSTATS, 73 IOCMD_ITNIM_GET_IOSTATS,
74 IOCMD_ITNIM_RESET_STATS,
51 IOCMD_ITNIM_GET_ITNSTATS, 75 IOCMD_ITNIM_GET_ITNSTATS,
52 IOCMD_IOC_PCIFN_CFG, 76 IOCMD_IOC_PCIFN_CFG,
53 IOCMD_FCPORT_ENABLE, 77 IOCMD_FCPORT_ENABLE,
@@ -86,6 +110,39 @@ enum {
86 IOCMD_PHY_READ_FW, 110 IOCMD_PHY_READ_FW,
87 IOCMD_VHBA_QUERY, 111 IOCMD_VHBA_QUERY,
88 IOCMD_DEBUG_PORTLOG, 112 IOCMD_DEBUG_PORTLOG,
113 IOCMD_DEBUG_FW_CORE,
114 IOCMD_DEBUG_FW_STATE_CLR,
115 IOCMD_DEBUG_PORTLOG_CLR,
116 IOCMD_DEBUG_START_DTRC,
117 IOCMD_DEBUG_STOP_DTRC,
118 IOCMD_DEBUG_PORTLOG_CTL,
119 IOCMD_FCPIM_PROFILE_ON,
120 IOCMD_FCPIM_PROFILE_OFF,
121 IOCMD_ITNIM_GET_IOPROFILE,
122 IOCMD_FCPORT_GET_STATS,
123 IOCMD_FCPORT_RESET_STATS,
124 IOCMD_BOOT_CFG,
125 IOCMD_BOOT_QUERY,
126 IOCMD_PREBOOT_QUERY,
127 IOCMD_ETHBOOT_CFG,
128 IOCMD_ETHBOOT_QUERY,
129 IOCMD_TRUNK_ENABLE,
130 IOCMD_TRUNK_DISABLE,
131 IOCMD_TRUNK_GET_ATTR,
132 IOCMD_QOS_ENABLE,
133 IOCMD_QOS_DISABLE,
134 IOCMD_QOS_GET_ATTR,
135 IOCMD_QOS_GET_VC_ATTR,
136 IOCMD_QOS_GET_STATS,
137 IOCMD_QOS_RESET_STATS,
138 IOCMD_VF_GET_STATS,
139 IOCMD_VF_RESET_STATS,
140 IOCMD_FCPIM_LUNMASK_ENABLE,
141 IOCMD_FCPIM_LUNMASK_DISABLE,
142 IOCMD_FCPIM_LUNMASK_CLEAR,
143 IOCMD_FCPIM_LUNMASK_QUERY,
144 IOCMD_FCPIM_LUNMASK_ADD,
145 IOCMD_FCPIM_LUNMASK_DELETE,
89}; 146};
90 147
91struct bfa_bsg_gen_s { 148struct bfa_bsg_gen_s {
@@ -94,6 +151,43 @@ struct bfa_bsg_gen_s {
94 u16 rsvd; 151 u16 rsvd;
95}; 152};
96 153
154struct bfa_bsg_portlogctl_s {
155 bfa_status_t status;
156 u16 bfad_num;
157 u16 rsvd;
158 bfa_boolean_t ctl;
159 int inst_no;
160};
161
162struct bfa_bsg_fcpim_profile_s {
163 bfa_status_t status;
164 u16 bfad_num;
165 u16 rsvd;
166};
167
168struct bfa_bsg_itnim_ioprofile_s {
169 bfa_status_t status;
170 u16 bfad_num;
171 u16 vf_id;
172 wwn_t lpwwn;
173 wwn_t rpwwn;
174 struct bfa_itnim_ioprofile_s ioprofile;
175};
176
177struct bfa_bsg_fcport_stats_s {
178 bfa_status_t status;
179 u16 bfad_num;
180 u16 rsvd;
181 union bfa_fcport_stats_u stats;
182};
183
184struct bfa_bsg_ioc_name_s {
185 bfa_status_t status;
186 u16 bfad_num;
187 u16 rsvd;
188 char name[BFA_ADAPTER_SYM_NAME_LEN];
189};
190
97struct bfa_bsg_ioc_info_s { 191struct bfa_bsg_ioc_info_s {
98 bfa_status_t status; 192 bfa_status_t status;
99 u16 bfad_num; 193 u16 bfad_num;
@@ -164,6 +258,20 @@ struct bfa_bsg_port_attr_s {
164 struct bfa_port_attr_s attr; 258 struct bfa_port_attr_s attr;
165}; 259};
166 260
261struct bfa_bsg_port_cfg_s {
262 bfa_status_t status;
263 u16 bfad_num;
264 u16 rsvd;
265 u32 param;
266 u32 rsvd1;
267};
268
269struct bfa_bsg_port_cfg_maxfrsize_s {
270 bfa_status_t status;
271 u16 bfad_num;
272 u16 maxfrsize;
273};
274
167struct bfa_bsg_port_stats_s { 275struct bfa_bsg_port_stats_s {
168 bfa_status_t status; 276 bfa_status_t status;
169 u16 bfad_num; 277 u16 bfad_num;
@@ -237,6 +345,47 @@ struct bfa_bsg_rport_scsi_addr_s {
237 u32 lun; 345 u32 lun;
238}; 346};
239 347
348struct bfa_bsg_rport_reset_stats_s {
349 bfa_status_t status;
350 u16 bfad_num;
351 u16 vf_id;
352 wwn_t pwwn;
353 wwn_t rpwwn;
354};
355
356struct bfa_bsg_rport_set_speed_s {
357 bfa_status_t status;
358 u16 bfad_num;
359 u16 vf_id;
360 enum bfa_port_speed speed;
361 u32 rsvd;
362 wwn_t pwwn;
363 wwn_t rpwwn;
364};
365
366struct bfa_bsg_vport_attr_s {
367 bfa_status_t status;
368 u16 bfad_num;
369 u16 vf_id;
370 wwn_t vpwwn;
371 struct bfa_vport_attr_s vport_attr;
372};
373
374struct bfa_bsg_vport_stats_s {
375 bfa_status_t status;
376 u16 bfad_num;
377 u16 vf_id;
378 wwn_t vpwwn;
379 struct bfa_vport_stats_s vport_stats;
380};
381
382struct bfa_bsg_reset_stats_s {
383 bfa_status_t status;
384 u16 bfad_num;
385 u16 vf_id;
386 wwn_t vpwwn;
387};
388
240struct bfa_bsg_fabric_get_lports_s { 389struct bfa_bsg_fabric_get_lports_s {
241 bfa_status_t status; 390 bfa_status_t status;
242 u16 bfad_num; 391 u16 bfad_num;
@@ -246,6 +395,19 @@ struct bfa_bsg_fabric_get_lports_s {
246 u32 rsvd; 395 u32 rsvd;
247}; 396};
248 397
398struct bfa_bsg_trl_speed_s {
399 bfa_status_t status;
400 u16 bfad_num;
401 u16 rsvd;
402 enum bfa_port_speed speed;
403};
404
405struct bfa_bsg_fcpim_s {
406 bfa_status_t status;
407 u16 bfad_num;
408 u16 param;
409};
410
249struct bfa_bsg_fcpim_modstats_s { 411struct bfa_bsg_fcpim_modstats_s {
250 bfa_status_t status; 412 bfa_status_t status;
251 u16 bfad_num; 413 u16 bfad_num;
@@ -258,6 +420,11 @@ struct bfa_bsg_fcpim_del_itn_stats_s {
258 struct bfa_fcpim_del_itn_stats_s modstats; 420 struct bfa_fcpim_del_itn_stats_s modstats;
259}; 421};
260 422
423struct bfa_bsg_fcpim_modstatsclr_s {
424 bfa_status_t status;
425 u16 bfad_num;
426};
427
261struct bfa_bsg_itnim_attr_s { 428struct bfa_bsg_itnim_attr_s {
262 bfa_status_t status; 429 bfa_status_t status;
263 u16 bfad_num; 430 u16 bfad_num;
@@ -485,6 +652,76 @@ struct bfa_bsg_vhba_attr_s {
485 struct bfa_vhba_attr_s attr; 652 struct bfa_vhba_attr_s attr;
486}; 653};
487 654
655struct bfa_bsg_boot_s {
656 bfa_status_t status;
657 u16 bfad_num;
658 u16 rsvd;
659 struct bfa_boot_cfg_s cfg;
660};
661
662struct bfa_bsg_preboot_s {
663 bfa_status_t status;
664 u16 bfad_num;
665 u16 rsvd;
666 struct bfa_boot_pbc_s cfg;
667};
668
669struct bfa_bsg_ethboot_s {
670 bfa_status_t status;
671 u16 bfad_num;
672 u16 rsvd;
673 struct bfa_ethboot_cfg_s cfg;
674};
675
676struct bfa_bsg_trunk_attr_s {
677 bfa_status_t status;
678 u16 bfad_num;
679 u16 rsvd;
680 struct bfa_trunk_attr_s attr;
681};
682
683struct bfa_bsg_qos_attr_s {
684 bfa_status_t status;
685 u16 bfad_num;
686 u16 rsvd;
687 struct bfa_qos_attr_s attr;
688};
689
690struct bfa_bsg_qos_vc_attr_s {
691 bfa_status_t status;
692 u16 bfad_num;
693 u16 rsvd;
694 struct bfa_qos_vc_attr_s attr;
695};
696
697struct bfa_bsg_vf_stats_s {
698 bfa_status_t status;
699 u16 bfad_num;
700 u16 vf_id;
701 struct bfa_vf_stats_s stats;
702};
703
704struct bfa_bsg_vf_reset_stats_s {
705 bfa_status_t status;
706 u16 bfad_num;
707 u16 vf_id;
708};
709
710struct bfa_bsg_fcpim_lunmask_query_s {
711 bfa_status_t status;
712 u16 bfad_num;
713 struct bfa_lunmask_cfg_s lun_mask;
714};
715
716struct bfa_bsg_fcpim_lunmask_s {
717 bfa_status_t status;
718 u16 bfad_num;
719 u16 vf_id;
720 wwn_t pwwn;
721 wwn_t rpwwn;
722 struct scsi_lun lun;
723};
724
488struct bfa_bsg_fcpt_s { 725struct bfa_bsg_fcpt_s {
489 bfa_status_t status; 726 bfa_status_t status;
490 u16 vf_id; 727 u16 vf_id;
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 48661a2726d7..bda999ad9f52 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -56,7 +56,7 @@
56#ifdef BFA_DRIVER_VERSION 56#ifdef BFA_DRIVER_VERSION
57#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION 57#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
58#else 58#else
59#define BFAD_DRIVER_VERSION "3.0.2.1" 59#define BFAD_DRIVER_VERSION "3.0.2.2"
60#endif 60#endif
61 61
62#define BFAD_PROTO_NAME FCPI_NAME 62#define BFAD_PROTO_NAME FCPI_NAME
@@ -224,6 +224,10 @@ struct bfad_s {
224 char *regdata; 224 char *regdata;
225 u32 reglen; 225 u32 reglen;
226 struct dentry *bfad_dentry_files[5]; 226 struct dentry *bfad_dentry_files[5];
227 struct list_head free_aen_q;
228 struct list_head active_aen_q;
229 struct bfa_aen_entry_s aen_list[BFA_AEN_MAX_ENTRY];
230 spinlock_t bfad_aen_spinlock;
227}; 231};
228 232
229/* BFAD state machine events */ 233/* BFAD state machine events */
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index f2bf81265ae5..01312381639f 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -656,6 +656,31 @@ bfad_im_port_clean(struct bfad_im_port_s *im_port)
656 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 656 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
657} 657}
658 658
659static void bfad_aen_im_notify_handler(struct work_struct *work)
660{
661 struct bfad_im_s *im =
662 container_of(work, struct bfad_im_s, aen_im_notify_work);
663 struct bfa_aen_entry_s *aen_entry;
664 struct bfad_s *bfad = im->bfad;
665 struct Scsi_Host *shost = bfad->pport.im_port->shost;
666 void *event_data;
667 unsigned long flags;
668
669 while (!list_empty(&bfad->active_aen_q)) {
670 spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags);
671 bfa_q_deq(&bfad->active_aen_q, &aen_entry);
672 spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags);
673 event_data = (char *)aen_entry + sizeof(struct list_head);
674 fc_host_post_vendor_event(shost, fc_get_event_number(),
675 sizeof(struct bfa_aen_entry_s) -
676 sizeof(struct list_head),
677 (char *)event_data, BFAD_NL_VENDOR_ID);
678 spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags);
679 list_add_tail(&aen_entry->qe, &bfad->free_aen_q);
680 spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags);
681 }
682}
683
659bfa_status_t 684bfa_status_t
660bfad_im_probe(struct bfad_s *bfad) 685bfad_im_probe(struct bfad_s *bfad)
661{ 686{
@@ -676,6 +701,7 @@ bfad_im_probe(struct bfad_s *bfad)
676 rc = BFA_STATUS_FAILED; 701 rc = BFA_STATUS_FAILED;
677 } 702 }
678 703
704 INIT_WORK(&im->aen_im_notify_work, bfad_aen_im_notify_handler);
679ext: 705ext:
680 return rc; 706 return rc;
681} 707}
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 4fe34d576b05..004b6cf848d9 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -115,8 +115,30 @@ struct bfad_im_s {
115 struct bfad_s *bfad; 115 struct bfad_s *bfad;
116 struct workqueue_struct *drv_workq; 116 struct workqueue_struct *drv_workq;
117 char drv_workq_name[KOBJ_NAME_LEN]; 117 char drv_workq_name[KOBJ_NAME_LEN];
118 struct work_struct aen_im_notify_work;
118}; 119};
119 120
121#define bfad_get_aen_entry(_drv, _entry) do { \
122 unsigned long _flags; \
123 spin_lock_irqsave(&(_drv)->bfad_aen_spinlock, _flags); \
124 bfa_q_deq(&(_drv)->free_aen_q, &(_entry)); \
125 if (_entry) \
126 list_add_tail(&(_entry)->qe, &(_drv)->active_aen_q); \
127 spin_unlock_irqrestore(&(_drv)->bfad_aen_spinlock, _flags); \
128} while (0)
129
130/* post fc_host vendor event */
131#define bfad_im_post_vendor_event(_entry, _drv, _cnt, _cat, _evt) do { \
132 do_gettimeofday(&(_entry)->aen_tv); \
133 (_entry)->bfad_num = (_drv)->inst_no; \
134 (_entry)->seq_num = (_cnt); \
135 (_entry)->aen_category = (_cat); \
136 (_entry)->aen_type = (_evt); \
137 if ((_drv)->bfad_flags & BFAD_FC4_PROBE_DONE) \
138 queue_work((_drv)->im->drv_workq, \
139 &(_drv)->im->aen_im_notify_work); \
140} while (0)
141
120struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, 142struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port,
121 struct bfad_s *); 143 struct bfad_s *);
122bfa_status_t bfad_thread_workq(struct bfad_s *bfad); 144bfa_status_t bfad_thread_workq(struct bfad_s *bfad);
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h
index 1e258d5f8aec..b2ba0b2e91b2 100644
--- a/drivers/scsi/bfa/bfi.h
+++ b/drivers/scsi/bfa/bfi.h
@@ -784,6 +784,17 @@ enum bfi_sfp_i2h_e {
784}; 784};
785 785
786/* 786/*
787 * SFP state change notification
788 */
789struct bfi_sfp_scn_s {
790 struct bfi_mhdr_s mhr; /* host msg header */
791 u8 event;
792 u8 sfpid;
793 u8 pomlvl; /* pom level: normal/warning/alarm */
794 u8 is_elb; /* e-loopback */
795};
796
797/*
787 * SFP state 798 * SFP state
788 */ 799 */
789enum bfa_sfp_stat_e { 800enum bfa_sfp_stat_e {
@@ -926,6 +937,15 @@ struct bfi_flash_erase_rsp_s {
926}; 937};
927 938
928/* 939/*
940 * Flash event notification
941 */
942struct bfi_flash_event_s {
943 struct bfi_mhdr_s mh; /* Common msg header */
944 bfa_status_t status;
945 u32 param;
946};
947
948/*
929 *---------------------------------------------------------------------- 949 *----------------------------------------------------------------------
930 * DIAG 950 * DIAG
931 *---------------------------------------------------------------------- 951 *----------------------------------------------------------------------
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index d924236e1b91..42228ca5a9d2 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -2,7 +2,7 @@
2#define _BNX2FC_H_ 2#define _BNX2FC_H_
3/* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver. 3/* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver.
4 * 4 *
5 * Copyright (c) 2008 - 2010 Broadcom Corporation 5 * Copyright (c) 2008 - 2011 Broadcom Corporation
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -62,7 +62,7 @@
62#include "bnx2fc_constants.h" 62#include "bnx2fc_constants.h"
63 63
64#define BNX2FC_NAME "bnx2fc" 64#define BNX2FC_NAME "bnx2fc"
65#define BNX2FC_VERSION "1.0.3" 65#define BNX2FC_VERSION "1.0.4"
66 66
67#define PFX "bnx2fc: " 67#define PFX "bnx2fc: "
68 68
@@ -141,6 +141,10 @@
141 141
142#define BNX2FC_RNID_HBA 0x7 142#define BNX2FC_RNID_HBA 0x7
143 143
144#define SRR_RETRY_COUNT 5
145#define REC_RETRY_COUNT 1
146#define BNX2FC_NUM_ERR_BITS 63
147
144/* bnx2fc driver uses only one instance of fcoe_percpu_s */ 148/* bnx2fc driver uses only one instance of fcoe_percpu_s */
145extern struct fcoe_percpu_s bnx2fc_global; 149extern struct fcoe_percpu_s bnx2fc_global;
146 150
@@ -153,18 +157,13 @@ struct bnx2fc_percpu_s {
153}; 157};
154 158
155struct bnx2fc_hba { 159struct bnx2fc_hba {
156 struct list_head link; 160 struct list_head list;
157 struct cnic_dev *cnic; 161 struct cnic_dev *cnic;
158 struct pci_dev *pcidev; 162 struct pci_dev *pcidev;
159 struct net_device *netdev;
160 struct net_device *phys_dev; 163 struct net_device *phys_dev;
161 unsigned long reg_with_cnic; 164 unsigned long reg_with_cnic;
162 #define BNX2FC_CNIC_REGISTERED 1 165 #define BNX2FC_CNIC_REGISTERED 1
163 struct packet_type fcoe_packet_type;
164 struct packet_type fip_packet_type;
165 struct bnx2fc_cmd_mgr *cmd_mgr; 166 struct bnx2fc_cmd_mgr *cmd_mgr;
166 struct workqueue_struct *timer_work_queue;
167 struct kref kref;
168 spinlock_t hba_lock; 167 spinlock_t hba_lock;
169 struct mutex hba_mutex; 168 struct mutex hba_mutex;
170 unsigned long adapter_state; 169 unsigned long adapter_state;
@@ -172,15 +171,9 @@ struct bnx2fc_hba {
172 #define ADAPTER_STATE_GOING_DOWN 1 171 #define ADAPTER_STATE_GOING_DOWN 1
173 #define ADAPTER_STATE_LINK_DOWN 2 172 #define ADAPTER_STATE_LINK_DOWN 2
174 #define ADAPTER_STATE_READY 3 173 #define ADAPTER_STATE_READY 3
175 u32 flags; 174 unsigned long flags;
176 unsigned long init_done; 175 #define BNX2FC_FLAG_FW_INIT_DONE 0
177 #define BNX2FC_FW_INIT_DONE 0 176 #define BNX2FC_FLAG_DESTROY_CMPL 1
178 #define BNX2FC_CTLR_INIT_DONE 1
179 #define BNX2FC_CREATE_DONE 2
180 struct fcoe_ctlr ctlr;
181 struct list_head vports;
182 u8 vlan_enabled;
183 int vlan_id;
184 u32 next_conn_id; 177 u32 next_conn_id;
185 struct fcoe_task_ctx_entry **task_ctx; 178 struct fcoe_task_ctx_entry **task_ctx;
186 dma_addr_t *task_ctx_dma; 179 dma_addr_t *task_ctx_dma;
@@ -199,38 +192,41 @@ struct bnx2fc_hba {
199 char *dummy_buffer; 192 char *dummy_buffer;
200 dma_addr_t dummy_buf_dma; 193 dma_addr_t dummy_buf_dma;
201 194
195 /* Active list of offloaded sessions */
196 struct bnx2fc_rport **tgt_ofld_list;
197
198 /* statistics */
202 struct fcoe_statistics_params *stats_buffer; 199 struct fcoe_statistics_params *stats_buffer;
203 dma_addr_t stats_buf_dma; 200 dma_addr_t stats_buf_dma;
204 201 struct completion stat_req_done;
205 /*
206 * PCI related info.
207 */
208 u16 pci_did;
209 u16 pci_vid;
210 u16 pci_sdid;
211 u16 pci_svid;
212 u16 pci_func;
213 u16 pci_devno;
214
215 struct task_struct *l2_thread;
216
217 /* linkdown handling */
218 wait_queue_head_t shutdown_wait;
219 int wait_for_link_down;
220 202
221 /*destroy handling */ 203 /*destroy handling */
222 struct timer_list destroy_timer; 204 struct timer_list destroy_timer;
223 wait_queue_head_t destroy_wait; 205 wait_queue_head_t destroy_wait;
224 206
225 /* Active list of offloaded sessions */ 207 /* linkdown handling */
226 struct bnx2fc_rport *tgt_ofld_list[BNX2FC_NUM_MAX_SESS]; 208 wait_queue_head_t shutdown_wait;
209 int wait_for_link_down;
227 int num_ofld_sess; 210 int num_ofld_sess;
211 struct list_head vports;
212};
228 213
229 /* statistics */ 214struct bnx2fc_interface {
230 struct completion stat_req_done; 215 struct list_head list;
216 unsigned long if_flags;
217 #define BNX2FC_CTLR_INIT_DONE 0
218 struct bnx2fc_hba *hba;
219 struct net_device *netdev;
220 struct packet_type fcoe_packet_type;
221 struct packet_type fip_packet_type;
222 struct workqueue_struct *timer_work_queue;
223 struct kref kref;
224 struct fcoe_ctlr ctlr;
225 u8 vlan_enabled;
226 int vlan_id;
231}; 227};
232 228
233#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_hba, ctlr) 229#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_interface, ctlr)
234 230
235struct bnx2fc_lport { 231struct bnx2fc_lport {
236 struct list_head list; 232 struct list_head list;
@@ -252,9 +248,11 @@ struct bnx2fc_rport {
252 struct fc_rport_priv *rdata; 248 struct fc_rport_priv *rdata;
253 void __iomem *ctx_base; 249 void __iomem *ctx_base;
254#define DPM_TRIGER_TYPE 0x40 250#define DPM_TRIGER_TYPE 0x40
251 u32 io_timeout;
255 u32 fcoe_conn_id; 252 u32 fcoe_conn_id;
256 u32 context_id; 253 u32 context_id;
257 u32 sid; 254 u32 sid;
255 int dev_type;
258 256
259 unsigned long flags; 257 unsigned long flags;
260#define BNX2FC_FLAG_SESSION_READY 0x1 258#define BNX2FC_FLAG_SESSION_READY 0x1
@@ -262,10 +260,9 @@ struct bnx2fc_rport {
262#define BNX2FC_FLAG_DISABLED 0x3 260#define BNX2FC_FLAG_DISABLED 0x3
263#define BNX2FC_FLAG_DESTROYED 0x4 261#define BNX2FC_FLAG_DESTROYED 0x4
264#define BNX2FC_FLAG_OFLD_REQ_CMPL 0x5 262#define BNX2FC_FLAG_OFLD_REQ_CMPL 0x5
265#define BNX2FC_FLAG_DESTROY_CMPL 0x6 263#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x6
266#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x7 264#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x7
267#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x8 265#define BNX2FC_FLAG_EXPL_LOGO 0x8
268#define BNX2FC_FLAG_EXPL_LOGO 0x9
269 266
270 u8 src_addr[ETH_ALEN]; 267 u8 src_addr[ETH_ALEN];
271 u32 max_sqes; 268 u32 max_sqes;
@@ -327,12 +324,9 @@ struct bnx2fc_rport {
327 spinlock_t cq_lock; 324 spinlock_t cq_lock;
328 atomic_t num_active_ios; 325 atomic_t num_active_ios;
329 u32 flush_in_prog; 326 u32 flush_in_prog;
330 unsigned long work_time_slice;
331 unsigned long timestamp; 327 unsigned long timestamp;
332 struct list_head free_task_list; 328 struct list_head free_task_list;
333 struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1]; 329 struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1];
334 atomic_t pi;
335 atomic_t ci;
336 struct list_head active_cmd_queue; 330 struct list_head active_cmd_queue;
337 struct list_head els_queue; 331 struct list_head els_queue;
338 struct list_head io_retire_queue; 332 struct list_head io_retire_queue;
@@ -367,6 +361,8 @@ struct bnx2fc_els_cb_arg {
367 struct bnx2fc_cmd *aborted_io_req; 361 struct bnx2fc_cmd *aborted_io_req;
368 struct bnx2fc_cmd *io_req; 362 struct bnx2fc_cmd *io_req;
369 u16 l2_oxid; 363 u16 l2_oxid;
364 u32 offset;
365 enum fc_rctl r_ctl;
370}; 366};
371 367
372/* bnx2fc command structure */ 368/* bnx2fc command structure */
@@ -380,6 +376,7 @@ struct bnx2fc_cmd {
380#define BNX2FC_ABTS 3 376#define BNX2FC_ABTS 3
381#define BNX2FC_ELS 4 377#define BNX2FC_ELS 4
382#define BNX2FC_CLEANUP 5 378#define BNX2FC_CLEANUP 5
379#define BNX2FC_SEQ_CLEANUP 6
383 u8 io_req_flags; 380 u8 io_req_flags;
384 struct kref refcount; 381 struct kref refcount;
385 struct fcoe_port *port; 382 struct fcoe_port *port;
@@ -393,6 +390,7 @@ struct bnx2fc_cmd {
393 struct completion tm_done; 390 struct completion tm_done;
394 int wait_for_comp; 391 int wait_for_comp;
395 u16 xid; 392 u16 xid;
393 struct fcoe_err_report_entry err_entry;
396 struct fcoe_task_ctx_entry *task; 394 struct fcoe_task_ctx_entry *task;
397 struct io_bdt *bd_tbl; 395 struct io_bdt *bd_tbl;
398 struct fcp_rsp *rsp; 396 struct fcp_rsp *rsp;
@@ -409,6 +407,12 @@ struct bnx2fc_cmd {
409#define BNX2FC_FLAG_IO_COMPL 0x9 407#define BNX2FC_FLAG_IO_COMPL 0x9
410#define BNX2FC_FLAG_ELS_DONE 0xa 408#define BNX2FC_FLAG_ELS_DONE 0xa
411#define BNX2FC_FLAG_ELS_TIMEOUT 0xb 409#define BNX2FC_FLAG_ELS_TIMEOUT 0xb
410#define BNX2FC_FLAG_CMD_LOST 0xc
411#define BNX2FC_FLAG_SRR_SENT 0xd
412 u8 rec_retry;
413 u8 srr_retry;
414 u32 srr_offset;
415 u8 srr_rctl;
412 u32 fcp_resid; 416 u32 fcp_resid;
413 u32 fcp_rsp_len; 417 u32 fcp_rsp_len;
414 u32 fcp_sns_len; 418 u32 fcp_sns_len;
@@ -439,6 +443,7 @@ struct bnx2fc_unsol_els {
439 443
440 444
441 445
446struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt);
442struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type); 447struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type);
443void bnx2fc_cmd_release(struct kref *ref); 448void bnx2fc_cmd_release(struct kref *ref);
444int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd); 449int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd);
@@ -476,6 +481,10 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req);
476void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, 481void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
477 struct fcoe_task_ctx_entry *task, 482 struct fcoe_task_ctx_entry *task,
478 u16 orig_xid); 483 u16 orig_xid);
484void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnup_req,
485 struct fcoe_task_ctx_entry *task,
486 struct bnx2fc_cmd *orig_io_req,
487 u32 offset);
479void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, 488void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
480 struct fcoe_task_ctx_entry *task); 489 struct fcoe_task_ctx_entry *task);
481void bnx2fc_init_task(struct bnx2fc_cmd *io_req, 490void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
@@ -525,5 +534,13 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
525 unsigned char *buf, 534 unsigned char *buf,
526 u32 frame_len, u16 l2_oxid); 535 u32 frame_len, u16 l2_oxid);
527int bnx2fc_send_stat_req(struct bnx2fc_hba *hba); 536int bnx2fc_send_stat_req(struct bnx2fc_hba *hba);
537int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, struct bnx2fc_cmd *io_req);
538int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req);
539int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl);
540void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnup_req,
541 struct fcoe_task_ctx_entry *task,
542 u8 rx_state);
543int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
544 enum fc_rctl r_ctl);
528 545
529#endif 546#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.h b/drivers/scsi/bnx2fc/bnx2fc_debug.h
index 7f6aff68cc53..3416d9a746c7 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_debug.h
+++ b/drivers/scsi/bnx2fc/bnx2fc_debug.h
@@ -21,21 +21,21 @@ extern unsigned int bnx2fc_debug_level;
21 21
22#define BNX2FC_ELS_DBG(fmt, arg...) \ 22#define BNX2FC_ELS_DBG(fmt, arg...) \
23 BNX2FC_CHK_LOGGING(LOG_ELS, \ 23 BNX2FC_CHK_LOGGING(LOG_ELS, \
24 printk(KERN_ALERT PFX fmt, ##arg)) 24 printk(KERN_INFO PFX fmt, ##arg))
25 25
26#define BNX2FC_MISC_DBG(fmt, arg...) \ 26#define BNX2FC_MISC_DBG(fmt, arg...) \
27 BNX2FC_CHK_LOGGING(LOG_MISC, \ 27 BNX2FC_CHK_LOGGING(LOG_MISC, \
28 printk(KERN_ALERT PFX fmt, ##arg)) 28 printk(KERN_INFO PFX fmt, ##arg))
29 29
30#define BNX2FC_IO_DBG(io_req, fmt, arg...) \ 30#define BNX2FC_IO_DBG(io_req, fmt, arg...) \
31 do { \ 31 do { \
32 if (!io_req || !io_req->port || !io_req->port->lport || \ 32 if (!io_req || !io_req->port || !io_req->port->lport || \
33 !io_req->port->lport->host) \ 33 !io_req->port->lport->host) \
34 BNX2FC_CHK_LOGGING(LOG_IO, \ 34 BNX2FC_CHK_LOGGING(LOG_IO, \
35 printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \ 35 printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
36 else \ 36 else \
37 BNX2FC_CHK_LOGGING(LOG_IO, \ 37 BNX2FC_CHK_LOGGING(LOG_IO, \
38 shost_printk(KERN_ALERT, \ 38 shost_printk(KERN_INFO, \
39 (io_req)->port->lport->host, \ 39 (io_req)->port->lport->host, \
40 PFX "xid:0x%x " fmt, \ 40 PFX "xid:0x%x " fmt, \
41 (io_req)->xid, ##arg)); \ 41 (io_req)->xid, ##arg)); \
@@ -46,10 +46,10 @@ extern unsigned int bnx2fc_debug_level;
46 if (!tgt || !tgt->port || !tgt->port->lport || \ 46 if (!tgt || !tgt->port || !tgt->port->lport || \
47 !tgt->port->lport->host || !tgt->rport) \ 47 !tgt->port->lport->host || !tgt->rport) \
48 BNX2FC_CHK_LOGGING(LOG_TGT, \ 48 BNX2FC_CHK_LOGGING(LOG_TGT, \
49 printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \ 49 printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
50 else \ 50 else \
51 BNX2FC_CHK_LOGGING(LOG_TGT, \ 51 BNX2FC_CHK_LOGGING(LOG_TGT, \
52 shost_printk(KERN_ALERT, \ 52 shost_printk(KERN_INFO, \
53 (tgt)->port->lport->host, \ 53 (tgt)->port->lport->host, \
54 PFX "port:%x " fmt, \ 54 PFX "port:%x " fmt, \
55 (tgt)->rport->port_id, ##arg)); \ 55 (tgt)->rport->port_id, ##arg)); \
@@ -60,10 +60,10 @@ extern unsigned int bnx2fc_debug_level;
60 do { \ 60 do { \
61 if (!lport || !lport->host) \ 61 if (!lport || !lport->host) \
62 BNX2FC_CHK_LOGGING(LOG_HBA, \ 62 BNX2FC_CHK_LOGGING(LOG_HBA, \
63 printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \ 63 printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
64 else \ 64 else \
65 BNX2FC_CHK_LOGGING(LOG_HBA, \ 65 BNX2FC_CHK_LOGGING(LOG_HBA, \
66 shost_printk(KERN_ALERT, lport->host, \ 66 shost_printk(KERN_INFO, lport->host, \
67 PFX fmt, ##arg)); \ 67 PFX fmt, ##arg)); \
68 } while (0) 68 } while (0)
69 69
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index 7e89143f15cf..d66dcbd0df10 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -3,7 +3,7 @@
3 * This file contains helper routines that handle ELS requests 3 * This file contains helper routines that handle ELS requests
4 * and responses. 4 * and responses.
5 * 5 *
6 * Copyright (c) 2008 - 2010 Broadcom Corporation 6 * Copyright (c) 2008 - 2011 Broadcom Corporation
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -253,13 +253,417 @@ int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
253 return rc; 253 return rc;
254} 254}
255 255
256void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
257{
258 struct bnx2fc_mp_req *mp_req;
259 struct fc_frame_header *fc_hdr, *fh;
260 struct bnx2fc_cmd *srr_req;
261 struct bnx2fc_cmd *orig_io_req;
262 struct fc_frame *fp;
263 unsigned char *buf;
264 void *resp_buf;
265 u32 resp_len, hdr_len;
266 u8 opcode;
267 int rc = 0;
268
269 orig_io_req = cb_arg->aborted_io_req;
270 srr_req = cb_arg->io_req;
271 if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
272 BNX2FC_IO_DBG(srr_req, "srr_compl: xid - 0x%x completed",
273 orig_io_req->xid);
274 goto srr_compl_done;
275 }
276 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
277 BNX2FC_IO_DBG(srr_req, "rec abts in prog "
278 "orig_io - 0x%x\n",
279 orig_io_req->xid);
280 goto srr_compl_done;
281 }
282 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) {
283 /* SRR timedout */
284 BNX2FC_IO_DBG(srr_req, "srr timed out, abort "
285 "orig_io - 0x%x\n",
286 orig_io_req->xid);
287 rc = bnx2fc_initiate_abts(srr_req);
288 if (rc != SUCCESS) {
289 BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
290 "failed. issue cleanup\n");
291 bnx2fc_initiate_cleanup(srr_req);
292 }
293 orig_io_req->srr_retry++;
294 if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) {
295 struct bnx2fc_rport *tgt = orig_io_req->tgt;
296 spin_unlock_bh(&tgt->tgt_lock);
297 rc = bnx2fc_send_srr(orig_io_req,
298 orig_io_req->srr_offset,
299 orig_io_req->srr_rctl);
300 spin_lock_bh(&tgt->tgt_lock);
301 if (!rc)
302 goto srr_compl_done;
303 }
304
305 rc = bnx2fc_initiate_abts(orig_io_req);
306 if (rc != SUCCESS) {
307 BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
308 "failed xid = 0x%x. issue cleanup\n",
309 orig_io_req->xid);
310 bnx2fc_initiate_cleanup(orig_io_req);
311 }
312 goto srr_compl_done;
313 }
314 mp_req = &(srr_req->mp_req);
315 fc_hdr = &(mp_req->resp_fc_hdr);
316 resp_len = mp_req->resp_len;
317 resp_buf = mp_req->resp_buf;
318
319 hdr_len = sizeof(*fc_hdr);
320 buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
321 if (!buf) {
322 printk(KERN_ERR PFX "srr buf: mem alloc failure\n");
323 goto srr_compl_done;
324 }
325 memcpy(buf, fc_hdr, hdr_len);
326 memcpy(buf + hdr_len, resp_buf, resp_len);
327
328 fp = fc_frame_alloc(NULL, resp_len);
329 if (!fp) {
330 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
331 goto free_buf;
332 }
333
334 fh = (struct fc_frame_header *) fc_frame_header_get(fp);
335 /* Copy FC Frame header and payload into the frame */
336 memcpy(fh, buf, hdr_len + resp_len);
337
338 opcode = fc_frame_payload_op(fp);
339 switch (opcode) {
340 case ELS_LS_ACC:
341 BNX2FC_IO_DBG(srr_req, "SRR success\n");
342 break;
343 case ELS_LS_RJT:
344 BNX2FC_IO_DBG(srr_req, "SRR rejected\n");
345 rc = bnx2fc_initiate_abts(orig_io_req);
346 if (rc != SUCCESS) {
347 BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
348 "failed xid = 0x%x. issue cleanup\n",
349 orig_io_req->xid);
350 bnx2fc_initiate_cleanup(orig_io_req);
351 }
352 break;
353 default:
354 BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n",
355 opcode);
356 break;
357 }
358 fc_frame_free(fp);
359free_buf:
360 kfree(buf);
361srr_compl_done:
362 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
363}
364
365void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
366{
367 struct bnx2fc_cmd *orig_io_req, *new_io_req;
368 struct bnx2fc_cmd *rec_req;
369 struct bnx2fc_mp_req *mp_req;
370 struct fc_frame_header *fc_hdr, *fh;
371 struct fc_els_ls_rjt *rjt;
372 struct fc_els_rec_acc *acc;
373 struct bnx2fc_rport *tgt;
374 struct fcoe_err_report_entry *err_entry;
375 struct scsi_cmnd *sc_cmd;
376 enum fc_rctl r_ctl;
377 unsigned char *buf;
378 void *resp_buf;
379 struct fc_frame *fp;
380 u8 opcode;
381 u32 offset;
382 u32 e_stat;
383 u32 resp_len, hdr_len;
384 int rc = 0;
385 bool send_seq_clnp = false;
386 bool abort_io = false;
387
388 BNX2FC_MISC_DBG("Entered rec_compl callback\n");
389 rec_req = cb_arg->io_req;
390 orig_io_req = cb_arg->aborted_io_req;
391 BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid);
392 tgt = orig_io_req->tgt;
393
394 if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
395 BNX2FC_IO_DBG(rec_req, "completed"
396 "orig_io - 0x%x\n",
397 orig_io_req->xid);
398 goto rec_compl_done;
399 }
400 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
401 BNX2FC_IO_DBG(rec_req, "abts in prog "
402 "orig_io - 0x%x\n",
403 orig_io_req->xid);
404 goto rec_compl_done;
405 }
406 /* Handle REC timeout case */
407 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) {
408 BNX2FC_IO_DBG(rec_req, "timed out, abort "
409 "orig_io - 0x%x\n",
410 orig_io_req->xid);
411 /* els req is timed out. send abts for els */
412 rc = bnx2fc_initiate_abts(rec_req);
413 if (rc != SUCCESS) {
414 BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
415 "failed. issue cleanup\n");
416 bnx2fc_initiate_cleanup(rec_req);
417 }
418 orig_io_req->rec_retry++;
419 /* REC timedout. send ABTS to the orig IO req */
420 if (orig_io_req->rec_retry <= REC_RETRY_COUNT) {
421 spin_unlock_bh(&tgt->tgt_lock);
422 rc = bnx2fc_send_rec(orig_io_req);
423 spin_lock_bh(&tgt->tgt_lock);
424 if (!rc)
425 goto rec_compl_done;
426 }
427 rc = bnx2fc_initiate_abts(orig_io_req);
428 if (rc != SUCCESS) {
429 BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
430 "failed xid = 0x%x. issue cleanup\n",
431 orig_io_req->xid);
432 bnx2fc_initiate_cleanup(orig_io_req);
433 }
434 goto rec_compl_done;
435 }
436 mp_req = &(rec_req->mp_req);
437 fc_hdr = &(mp_req->resp_fc_hdr);
438 resp_len = mp_req->resp_len;
439 acc = resp_buf = mp_req->resp_buf;
440
441 hdr_len = sizeof(*fc_hdr);
442
443 buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
444 if (!buf) {
445 printk(KERN_ERR PFX "rec buf: mem alloc failure\n");
446 goto rec_compl_done;
447 }
448 memcpy(buf, fc_hdr, hdr_len);
449 memcpy(buf + hdr_len, resp_buf, resp_len);
450
451 fp = fc_frame_alloc(NULL, resp_len);
452 if (!fp) {
453 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
454 goto free_buf;
455 }
456
457 fh = (struct fc_frame_header *) fc_frame_header_get(fp);
458 /* Copy FC Frame header and payload into the frame */
459 memcpy(fh, buf, hdr_len + resp_len);
460
461 opcode = fc_frame_payload_op(fp);
462 if (opcode == ELS_LS_RJT) {
463 BNX2FC_IO_DBG(rec_req, "opcode is RJT\n");
464 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
465 if ((rjt->er_reason == ELS_RJT_LOGIC ||
466 rjt->er_reason == ELS_RJT_UNAB) &&
467 rjt->er_explan == ELS_EXPL_OXID_RXID) {
468 BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n");
469 new_io_req = bnx2fc_cmd_alloc(tgt);
470 if (!new_io_req)
471 goto abort_io;
472 new_io_req->sc_cmd = orig_io_req->sc_cmd;
473 /* cleanup orig_io_req that is with the FW */
474 set_bit(BNX2FC_FLAG_CMD_LOST,
475 &orig_io_req->req_flags);
476 bnx2fc_initiate_cleanup(orig_io_req);
477 /* Post a new IO req with the same sc_cmd */
478 BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
479 spin_unlock_bh(&tgt->tgt_lock);
480 rc = bnx2fc_post_io_req(tgt, new_io_req);
481 spin_lock_bh(&tgt->tgt_lock);
482 if (!rc)
483 goto free_frame;
484 BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
485 }
486abort_io:
487 rc = bnx2fc_initiate_abts(orig_io_req);
488 if (rc != SUCCESS) {
489 BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
490 "failed. issue cleanup\n");
491 bnx2fc_initiate_cleanup(orig_io_req);
492 }
493 } else if (opcode == ELS_LS_ACC) {
494 /* REVISIT: Check if the exchange is already aborted */
495 offset = ntohl(acc->reca_fc4value);
496 e_stat = ntohl(acc->reca_e_stat);
497 if (e_stat & ESB_ST_SEQ_INIT) {
498 BNX2FC_IO_DBG(rec_req, "target has the seq init\n");
499 goto free_frame;
500 }
501 BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n",
502 e_stat, offset);
503 /* Seq initiative is with us */
504 err_entry = (struct fcoe_err_report_entry *)
505 &orig_io_req->err_entry;
506 sc_cmd = orig_io_req->sc_cmd;
507 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
508 /* SCSI WRITE command */
509 if (offset == orig_io_req->data_xfer_len) {
510 BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n");
511 /* FCP_RSP lost */
512 r_ctl = FC_RCTL_DD_CMD_STATUS;
513 offset = 0;
514 } else {
515 /* start transmitting from offset */
516 BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n");
517 send_seq_clnp = true;
518 r_ctl = FC_RCTL_DD_DATA_DESC;
519 if (bnx2fc_initiate_seq_cleanup(orig_io_req,
520 offset, r_ctl))
521 abort_io = true;
522 /* XFER_RDY */
523 }
524 } else {
525 /* SCSI READ command */
526 if (err_entry->data.rx_buf_off ==
527 orig_io_req->data_xfer_len) {
528 /* FCP_RSP lost */
529 BNX2FC_IO_DBG(rec_req, "READ - resp lost\n");
530 r_ctl = FC_RCTL_DD_CMD_STATUS;
531 offset = 0;
532 } else {
533 /* request retransmission from this offset */
534 send_seq_clnp = true;
535 offset = err_entry->data.rx_buf_off;
536 BNX2FC_IO_DBG(rec_req, "RD DATA lost\n");
537 /* FCP_DATA lost */
538 r_ctl = FC_RCTL_DD_SOL_DATA;
539 if (bnx2fc_initiate_seq_cleanup(orig_io_req,
540 offset, r_ctl))
541 abort_io = true;
542 }
543 }
544 if (abort_io) {
545 rc = bnx2fc_initiate_abts(orig_io_req);
546 if (rc != SUCCESS) {
547 BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts"
548 " failed. issue cleanup\n");
549 bnx2fc_initiate_cleanup(orig_io_req);
550 }
551 } else if (!send_seq_clnp) {
552 BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n");
553 spin_unlock_bh(&tgt->tgt_lock);
554 rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
555 spin_lock_bh(&tgt->tgt_lock);
556
557 if (rc) {
558 BNX2FC_IO_DBG(rec_req, "Unable to send SRR"
559 " IO will abort\n");
560 }
561 }
562 }
563free_frame:
564 fc_frame_free(fp);
565free_buf:
566 kfree(buf);
567rec_compl_done:
568 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
569 kfree(cb_arg);
570}
571
572int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req)
573{
574 struct fc_els_rec rec;
575 struct bnx2fc_rport *tgt = orig_io_req->tgt;
576 struct fc_lport *lport = tgt->rdata->local_port;
577 struct bnx2fc_els_cb_arg *cb_arg = NULL;
578 u32 sid = tgt->sid;
579 u32 r_a_tov = lport->r_a_tov;
580 int rc;
581
582 BNX2FC_IO_DBG(orig_io_req, "Sending REC\n");
583 memset(&rec, 0, sizeof(rec));
584
585 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
586 if (!cb_arg) {
587 printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n");
588 rc = -ENOMEM;
589 goto rec_err;
590 }
591 kref_get(&orig_io_req->refcount);
592
593 cb_arg->aborted_io_req = orig_io_req;
594
595 rec.rec_cmd = ELS_REC;
596 hton24(rec.rec_s_id, sid);
597 rec.rec_ox_id = htons(orig_io_req->xid);
598 rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
599
600 rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec),
601 bnx2fc_rec_compl, cb_arg,
602 r_a_tov);
603rec_err:
604 if (rc) {
605 BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n");
606 spin_lock_bh(&tgt->tgt_lock);
607 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
608 spin_unlock_bh(&tgt->tgt_lock);
609 kfree(cb_arg);
610 }
611 return rc;
612}
613
614int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl)
615{
616 struct fcp_srr srr;
617 struct bnx2fc_rport *tgt = orig_io_req->tgt;
618 struct fc_lport *lport = tgt->rdata->local_port;
619 struct bnx2fc_els_cb_arg *cb_arg = NULL;
620 u32 r_a_tov = lport->r_a_tov;
621 int rc;
622
623 BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n");
624 memset(&srr, 0, sizeof(srr));
625
626 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
627 if (!cb_arg) {
628 printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n");
629 rc = -ENOMEM;
630 goto srr_err;
631 }
632 kref_get(&orig_io_req->refcount);
633
634 cb_arg->aborted_io_req = orig_io_req;
635
636 srr.srr_op = ELS_SRR;
637 srr.srr_ox_id = htons(orig_io_req->xid);
638 srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
639 srr.srr_rel_off = htonl(offset);
640 srr.srr_r_ctl = r_ctl;
641 orig_io_req->srr_offset = offset;
642 orig_io_req->srr_rctl = r_ctl;
643
644 rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr),
645 bnx2fc_srr_compl, cb_arg,
646 r_a_tov);
647srr_err:
648 if (rc) {
649 BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n");
650 spin_lock_bh(&tgt->tgt_lock);
651 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
652 spin_unlock_bh(&tgt->tgt_lock);
653 kfree(cb_arg);
654 } else
655 set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags);
656
657 return rc;
658}
659
256static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op, 660static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
257 void *data, u32 data_len, 661 void *data, u32 data_len,
258 void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg), 662 void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
259 struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec) 663 struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
260{ 664{
261 struct fcoe_port *port = tgt->port; 665 struct fcoe_port *port = tgt->port;
262 struct bnx2fc_hba *hba = port->priv; 666 struct bnx2fc_interface *interface = port->priv;
263 struct fc_rport *rport = tgt->rport; 667 struct fc_rport *rport = tgt->rport;
264 struct fc_lport *lport = port->lport; 668 struct fc_lport *lport = port->lport;
265 struct bnx2fc_cmd *els_req; 669 struct bnx2fc_cmd *els_req;
@@ -274,12 +678,12 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
274 678
275 rc = fc_remote_port_chkready(rport); 679 rc = fc_remote_port_chkready(rport);
276 if (rc) { 680 if (rc) {
277 printk(KERN_ALERT PFX "els 0x%x: rport not ready\n", op); 681 printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op);
278 rc = -EINVAL; 682 rc = -EINVAL;
279 goto els_err; 683 goto els_err;
280 } 684 }
281 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { 685 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
282 printk(KERN_ALERT PFX "els 0x%x: link is not ready\n", op); 686 printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op);
283 rc = -EINVAL; 687 rc = -EINVAL;
284 goto els_err; 688 goto els_err;
285 } 689 }
@@ -305,7 +709,7 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
305 mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req); 709 mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
306 rc = bnx2fc_init_mp_req(els_req); 710 rc = bnx2fc_init_mp_req(els_req);
307 if (rc == FAILED) { 711 if (rc == FAILED) {
308 printk(KERN_ALERT PFX "ELS MP request init failed\n"); 712 printk(KERN_ERR PFX "ELS MP request init failed\n");
309 spin_lock_bh(&tgt->tgt_lock); 713 spin_lock_bh(&tgt->tgt_lock);
310 kref_put(&els_req->refcount, bnx2fc_cmd_release); 714 kref_put(&els_req->refcount, bnx2fc_cmd_release);
311 spin_unlock_bh(&tgt->tgt_lock); 715 spin_unlock_bh(&tgt->tgt_lock);
@@ -324,7 +728,7 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
324 if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) { 728 if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
325 memcpy(mp_req->req_buf, data, data_len); 729 memcpy(mp_req->req_buf, data, data_len);
326 } else { 730 } else {
327 printk(KERN_ALERT PFX "Invalid ELS op 0x%x\n", op); 731 printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op);
328 els_req->cb_func = NULL; 732 els_req->cb_func = NULL;
329 els_req->cb_arg = NULL; 733 els_req->cb_arg = NULL;
330 spin_lock_bh(&tgt->tgt_lock); 734 spin_lock_bh(&tgt->tgt_lock);
@@ -342,9 +746,14 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
342 did = tgt->rport->port_id; 746 did = tgt->rport->port_id;
343 sid = tgt->sid; 747 sid = tgt->sid;
344 748
345 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid, 749 if (op == ELS_SRR)
346 FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | 750 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid,
347 FC_FC_SEQ_INIT, 0); 751 FC_TYPE_FCP, FC_FC_FIRST_SEQ |
752 FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
753 else
754 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
755 FC_TYPE_ELS, FC_FC_FIRST_SEQ |
756 FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
348 757
349 /* Obtain exchange id */ 758 /* Obtain exchange id */
350 xid = els_req->xid; 759 xid = els_req->xid;
@@ -352,7 +761,8 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
352 index = xid % BNX2FC_TASKS_PER_PAGE; 761 index = xid % BNX2FC_TASKS_PER_PAGE;
353 762
354 /* Initialize task context for this IO request */ 763 /* Initialize task context for this IO request */
355 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; 764 task_page = (struct fcoe_task_ctx_entry *)
765 interface->hba->task_ctx[task_idx];
356 task = &(task_page[index]); 766 task = &(task_page[index]);
357 bnx2fc_init_mp_task(els_req, task); 767 bnx2fc_init_mp_task(els_req, task);
358 768
@@ -496,8 +906,8 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
496 void *arg, u32 timeout) 906 void *arg, u32 timeout)
497{ 907{
498 struct fcoe_port *port = lport_priv(lport); 908 struct fcoe_port *port = lport_priv(lport);
499 struct bnx2fc_hba *hba = port->priv; 909 struct bnx2fc_interface *interface = port->priv;
500 struct fcoe_ctlr *fip = &hba->ctlr; 910 struct fcoe_ctlr *fip = &interface->ctlr;
501 struct fc_frame_header *fh = fc_frame_header_get(fp); 911 struct fc_frame_header *fh = fc_frame_header_get(fp);
502 912
503 switch (op) { 913 switch (op) {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index a97aff3a0662..7cb2cd48b17b 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -3,7 +3,7 @@
3 * cnic modules to create FCoE instances, send/receive non-offloaded 3 * cnic modules to create FCoE instances, send/receive non-offloaded
4 * FIP/FCoE packets, listen to link events etc. 4 * FIP/FCoE packets, listen to link events etc.
5 * 5 *
6 * Copyright (c) 2008 - 2010 Broadcom Corporation 6 * Copyright (c) 2008 - 2011 Broadcom Corporation
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -15,13 +15,14 @@
15#include "bnx2fc.h" 15#include "bnx2fc.h"
16 16
17static struct list_head adapter_list; 17static struct list_head adapter_list;
18static struct list_head if_list;
18static u32 adapter_count; 19static u32 adapter_count;
19static DEFINE_MUTEX(bnx2fc_dev_lock); 20static DEFINE_MUTEX(bnx2fc_dev_lock);
20DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); 21DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
21 22
22#define DRV_MODULE_NAME "bnx2fc" 23#define DRV_MODULE_NAME "bnx2fc"
23#define DRV_MODULE_VERSION BNX2FC_VERSION 24#define DRV_MODULE_VERSION BNX2FC_VERSION
24#define DRV_MODULE_RELDATE "Jun 10, 2011" 25#define DRV_MODULE_RELDATE "Jun 23, 2011"
25 26
26 27
27static char version[] __devinitdata = 28static char version[] __devinitdata =
@@ -61,7 +62,7 @@ static int bnx2fc_disable(struct net_device *netdev);
61 62
62static void bnx2fc_recv_frame(struct sk_buff *skb); 63static void bnx2fc_recv_frame(struct sk_buff *skb);
63 64
64static void bnx2fc_start_disc(struct bnx2fc_hba *hba); 65static void bnx2fc_start_disc(struct bnx2fc_interface *interface);
65static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev); 66static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev);
66static int bnx2fc_net_config(struct fc_lport *lp); 67static int bnx2fc_net_config(struct fc_lport *lp);
67static int bnx2fc_lport_config(struct fc_lport *lport); 68static int bnx2fc_lport_config(struct fc_lport *lport);
@@ -70,18 +71,20 @@ static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba);
70static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba); 71static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba);
71static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba); 72static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
72static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba); 73static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
73static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba, 74static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
74 struct device *parent, int npiv); 75 struct device *parent, int npiv);
75static void bnx2fc_destroy_work(struct work_struct *work); 76static void bnx2fc_destroy_work(struct work_struct *work);
76 77
77static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev); 78static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
79static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
80 *phys_dev);
78static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic); 81static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic);
79 82
80static int bnx2fc_fw_init(struct bnx2fc_hba *hba); 83static int bnx2fc_fw_init(struct bnx2fc_hba *hba);
81static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba); 84static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba);
82 85
83static void bnx2fc_port_shutdown(struct fc_lport *lport); 86static void bnx2fc_port_shutdown(struct fc_lport *lport);
84static void bnx2fc_stop(struct bnx2fc_hba *hba); 87static void bnx2fc_stop(struct bnx2fc_interface *interface);
85static int __init bnx2fc_mod_init(void); 88static int __init bnx2fc_mod_init(void);
86static void __exit bnx2fc_mod_exit(void); 89static void __exit bnx2fc_mod_exit(void);
87 90
@@ -142,7 +145,8 @@ static void bnx2fc_abort_io(struct fc_lport *lport)
142static void bnx2fc_cleanup(struct fc_lport *lport) 145static void bnx2fc_cleanup(struct fc_lport *lport)
143{ 146{
144 struct fcoe_port *port = lport_priv(lport); 147 struct fcoe_port *port = lport_priv(lport);
145 struct bnx2fc_hba *hba = port->priv; 148 struct bnx2fc_interface *interface = port->priv;
149 struct bnx2fc_hba *hba = interface->hba;
146 struct bnx2fc_rport *tgt; 150 struct bnx2fc_rport *tgt;
147 int i; 151 int i;
148 152
@@ -219,7 +223,8 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
219 struct fcoe_crc_eof *cp; 223 struct fcoe_crc_eof *cp;
220 struct sk_buff *skb; 224 struct sk_buff *skb;
221 struct fc_frame_header *fh; 225 struct fc_frame_header *fh;
222 struct bnx2fc_hba *hba; 226 struct bnx2fc_interface *interface;
227 struct bnx2fc_hba *hba;
223 struct fcoe_port *port; 228 struct fcoe_port *port;
224 struct fcoe_hdr *hp; 229 struct fcoe_hdr *hp;
225 struct bnx2fc_rport *tgt; 230 struct bnx2fc_rport *tgt;
@@ -230,7 +235,8 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
230 int wlen, rc = 0; 235 int wlen, rc = 0;
231 236
232 port = (struct fcoe_port *)lport_priv(lport); 237 port = (struct fcoe_port *)lport_priv(lport);
233 hba = port->priv; 238 interface = port->priv;
239 hba = interface->hba;
234 240
235 fh = fc_frame_header_get(fp); 241 fh = fc_frame_header_get(fp);
236 242
@@ -242,12 +248,12 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
242 } 248 }
243 249
244 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { 250 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
245 if (!hba->ctlr.sel_fcf) { 251 if (!interface->ctlr.sel_fcf) {
246 BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n"); 252 BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
247 kfree_skb(skb); 253 kfree_skb(skb);
248 return -EINVAL; 254 return -EINVAL;
249 } 255 }
250 if (fcoe_ctlr_els_send(&hba->ctlr, lport, skb)) 256 if (fcoe_ctlr_els_send(&interface->ctlr, lport, skb))
251 return 0; 257 return 0;
252 } 258 }
253 259
@@ -316,19 +322,19 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
316 skb_reset_network_header(skb); 322 skb_reset_network_header(skb);
317 skb->mac_len = elen; 323 skb->mac_len = elen;
318 skb->protocol = htons(ETH_P_FCOE); 324 skb->protocol = htons(ETH_P_FCOE);
319 skb->dev = hba->netdev; 325 skb->dev = interface->netdev;
320 326
321 /* fill up mac and fcoe headers */ 327 /* fill up mac and fcoe headers */
322 eh = eth_hdr(skb); 328 eh = eth_hdr(skb);
323 eh->h_proto = htons(ETH_P_FCOE); 329 eh->h_proto = htons(ETH_P_FCOE);
324 if (hba->ctlr.map_dest) 330 if (interface->ctlr.map_dest)
325 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); 331 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
326 else 332 else
327 /* insert GW address */ 333 /* insert GW address */
328 memcpy(eh->h_dest, hba->ctlr.dest_addr, ETH_ALEN); 334 memcpy(eh->h_dest, interface->ctlr.dest_addr, ETH_ALEN);
329 335
330 if (unlikely(hba->ctlr.flogi_oxid != FC_XID_UNKNOWN)) 336 if (unlikely(interface->ctlr.flogi_oxid != FC_XID_UNKNOWN))
331 memcpy(eh->h_source, hba->ctlr.ctl_src_addr, ETH_ALEN); 337 memcpy(eh->h_source, interface->ctlr.ctl_src_addr, ETH_ALEN);
332 else 338 else
333 memcpy(eh->h_source, port->data_src_addr, ETH_ALEN); 339 memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
334 340
@@ -377,22 +383,23 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
377 struct packet_type *ptype, struct net_device *olddev) 383 struct packet_type *ptype, struct net_device *olddev)
378{ 384{
379 struct fc_lport *lport; 385 struct fc_lport *lport;
380 struct bnx2fc_hba *hba; 386 struct bnx2fc_interface *interface;
381 struct fc_frame_header *fh; 387 struct fc_frame_header *fh;
382 struct fcoe_rcv_info *fr; 388 struct fcoe_rcv_info *fr;
383 struct fcoe_percpu_s *bg; 389 struct fcoe_percpu_s *bg;
384 unsigned short oxid; 390 unsigned short oxid;
385 391
386 hba = container_of(ptype, struct bnx2fc_hba, fcoe_packet_type); 392 interface = container_of(ptype, struct bnx2fc_interface,
387 lport = hba->ctlr.lp; 393 fcoe_packet_type);
394 lport = interface->ctlr.lp;
388 395
389 if (unlikely(lport == NULL)) { 396 if (unlikely(lport == NULL)) {
390 printk(KERN_ALERT PFX "bnx2fc_rcv: lport is NULL\n"); 397 printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n");
391 goto err; 398 goto err;
392 } 399 }
393 400
394 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { 401 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
395 printk(KERN_ALERT PFX "bnx2fc_rcv: Wrong FC type frame\n"); 402 printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
396 goto err; 403 goto err;
397 } 404 }
398 405
@@ -411,7 +418,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
411 418
412 fr = fcoe_dev_from_skb(skb); 419 fr = fcoe_dev_from_skb(skb);
413 fr->fr_dev = lport; 420 fr->fr_dev = lport;
414 fr->ptype = ptype;
415 421
416 bg = &bnx2fc_global; 422 bg = &bnx2fc_global;
417 spin_lock_bh(&bg->fcoe_rx_list.lock); 423 spin_lock_bh(&bg->fcoe_rx_list.lock);
@@ -469,7 +475,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
469 fr = fcoe_dev_from_skb(skb); 475 fr = fcoe_dev_from_skb(skb);
470 lport = fr->fr_dev; 476 lport = fr->fr_dev;
471 if (unlikely(lport == NULL)) { 477 if (unlikely(lport == NULL)) {
472 printk(KERN_ALERT PFX "Invalid lport struct\n"); 478 printk(KERN_ERR PFX "Invalid lport struct\n");
473 kfree_skb(skb); 479 kfree_skb(skb);
474 return; 480 return;
475 } 481 }
@@ -594,7 +600,8 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
594 struct fc_host_statistics *bnx2fc_stats; 600 struct fc_host_statistics *bnx2fc_stats;
595 struct fc_lport *lport = shost_priv(shost); 601 struct fc_lport *lport = shost_priv(shost);
596 struct fcoe_port *port = lport_priv(lport); 602 struct fcoe_port *port = lport_priv(lport);
597 struct bnx2fc_hba *hba = port->priv; 603 struct bnx2fc_interface *interface = port->priv;
604 struct bnx2fc_hba *hba = interface->hba;
598 struct fcoe_statistics_params *fw_stats; 605 struct fcoe_statistics_params *fw_stats;
599 int rc = 0; 606 int rc = 0;
600 607
@@ -631,7 +638,7 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
631static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) 638static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
632{ 639{
633 struct fcoe_port *port = lport_priv(lport); 640 struct fcoe_port *port = lport_priv(lport);
634 struct bnx2fc_hba *hba = port->priv; 641 struct bnx2fc_interface *interface = port->priv;
635 struct Scsi_Host *shost = lport->host; 642 struct Scsi_Host *shost = lport->host;
636 int rc = 0; 643 int rc = 0;
637 644
@@ -654,7 +661,7 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
654 fc_host_max_npiv_vports(lport->host) = USHRT_MAX; 661 fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
655 sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s", 662 sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s",
656 BNX2FC_NAME, BNX2FC_VERSION, 663 BNX2FC_NAME, BNX2FC_VERSION,
657 hba->netdev->name); 664 interface->netdev->name);
658 665
659 return 0; 666 return 0;
660} 667}
@@ -662,8 +669,8 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
662static void bnx2fc_link_speed_update(struct fc_lport *lport) 669static void bnx2fc_link_speed_update(struct fc_lport *lport)
663{ 670{
664 struct fcoe_port *port = lport_priv(lport); 671 struct fcoe_port *port = lport_priv(lport);
665 struct bnx2fc_hba *hba = port->priv; 672 struct bnx2fc_interface *interface = port->priv;
666 struct net_device *netdev = hba->netdev; 673 struct net_device *netdev = interface->netdev;
667 struct ethtool_cmd ecmd; 674 struct ethtool_cmd ecmd;
668 675
669 if (!dev_ethtool_get_settings(netdev, &ecmd)) { 676 if (!dev_ethtool_get_settings(netdev, &ecmd)) {
@@ -691,7 +698,8 @@ static void bnx2fc_link_speed_update(struct fc_lport *lport)
691static int bnx2fc_link_ok(struct fc_lport *lport) 698static int bnx2fc_link_ok(struct fc_lport *lport)
692{ 699{
693 struct fcoe_port *port = lport_priv(lport); 700 struct fcoe_port *port = lport_priv(lport);
694 struct bnx2fc_hba *hba = port->priv; 701 struct bnx2fc_interface *interface = port->priv;
702 struct bnx2fc_hba *hba = interface->hba;
695 struct net_device *dev = hba->phys_dev; 703 struct net_device *dev = hba->phys_dev;
696 int rc = 0; 704 int rc = 0;
697 705
@@ -713,7 +721,7 @@ static int bnx2fc_link_ok(struct fc_lport *lport)
713 */ 721 */
714void bnx2fc_get_link_state(struct bnx2fc_hba *hba) 722void bnx2fc_get_link_state(struct bnx2fc_hba *hba)
715{ 723{
716 if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state)) 724 if (test_bit(__LINK_STATE_NOCARRIER, &hba->phys_dev->state))
717 set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); 725 set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
718 else 726 else
719 clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); 727 clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
@@ -722,11 +730,13 @@ void bnx2fc_get_link_state(struct bnx2fc_hba *hba)
722static int bnx2fc_net_config(struct fc_lport *lport) 730static int bnx2fc_net_config(struct fc_lport *lport)
723{ 731{
724 struct bnx2fc_hba *hba; 732 struct bnx2fc_hba *hba;
733 struct bnx2fc_interface *interface;
725 struct fcoe_port *port; 734 struct fcoe_port *port;
726 u64 wwnn, wwpn; 735 u64 wwnn, wwpn;
727 736
728 port = lport_priv(lport); 737 port = lport_priv(lport);
729 hba = port->priv; 738 interface = port->priv;
739 hba = interface->hba;
730 740
731 /* require support for get_pauseparam ethtool op. */ 741 /* require support for get_pauseparam ethtool op. */
732 if (!hba->phys_dev->ethtool_ops || 742 if (!hba->phys_dev->ethtool_ops ||
@@ -743,11 +753,11 @@ static int bnx2fc_net_config(struct fc_lport *lport)
743 bnx2fc_link_speed_update(lport); 753 bnx2fc_link_speed_update(lport);
744 754
745 if (!lport->vport) { 755 if (!lport->vport) {
746 wwnn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 1, 0); 756 wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 1, 0);
747 BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn); 757 BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
748 fc_set_wwnn(lport, wwnn); 758 fc_set_wwnn(lport, wwnn);
749 759
750 wwpn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 2, 0); 760 wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 2, 0);
751 BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn); 761 BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
752 fc_set_wwpn(lport, wwpn); 762 fc_set_wwpn(lport, wwpn);
753 } 763 }
@@ -759,9 +769,9 @@ static void bnx2fc_destroy_timer(unsigned long data)
759{ 769{
760 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data; 770 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data;
761 771
762 BNX2FC_HBA_DBG(hba->ctlr.lp, "ERROR:bnx2fc_destroy_timer - " 772 BNX2FC_MISC_DBG("ERROR:bnx2fc_destroy_timer - "
763 "Destroy compl not received!!\n"); 773 "Destroy compl not received!!\n");
764 hba->flags |= BNX2FC_FLAG_DESTROY_CMPL; 774 set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
765 wake_up_interruptible(&hba->destroy_wait); 775 wake_up_interruptible(&hba->destroy_wait);
766} 776}
767 777
@@ -779,54 +789,35 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
779 u16 vlan_id) 789 u16 vlan_id)
780{ 790{
781 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; 791 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
782 struct fc_lport *lport = hba->ctlr.lp; 792 struct fc_lport *lport;
783 struct fc_lport *vport; 793 struct fc_lport *vport;
794 struct bnx2fc_interface *interface;
795 int wait_for_upload = 0;
784 u32 link_possible = 1; 796 u32 link_possible = 1;
785 797
786 /* Ignore vlans for now */ 798 /* Ignore vlans for now */
787 if (vlan_id != 0) 799 if (vlan_id != 0)
788 return; 800 return;
789 801
790 if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
791 BNX2FC_MISC_DBG("driver not ready. event=%s %ld\n",
792 hba->netdev->name, event);
793 return;
794 }
795
796 /*
797 * ASSUMPTION:
798 * indicate_netevent cannot be called from cnic unless bnx2fc
799 * does register_device
800 */
801 BUG_ON(!lport);
802
803 BNX2FC_HBA_DBG(lport, "enter netevent handler - event=%s %ld\n",
804 hba->netdev->name, event);
805
806 switch (event) { 802 switch (event) {
807 case NETDEV_UP: 803 case NETDEV_UP:
808 BNX2FC_HBA_DBG(lport, "Port up, adapter_state = %ld\n",
809 hba->adapter_state);
810 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) 804 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
811 printk(KERN_ERR "indicate_netevent: "\ 805 printk(KERN_ERR "indicate_netevent: "\
812 "adapter is not UP!!\n"); 806 "hba is not UP!!\n");
813 break; 807 break;
814 808
815 case NETDEV_DOWN: 809 case NETDEV_DOWN:
816 BNX2FC_HBA_DBG(lport, "Port down\n");
817 clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); 810 clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
818 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); 811 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
819 link_possible = 0; 812 link_possible = 0;
820 break; 813 break;
821 814
822 case NETDEV_GOING_DOWN: 815 case NETDEV_GOING_DOWN:
823 BNX2FC_HBA_DBG(lport, "Port going down\n");
824 set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); 816 set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
825 link_possible = 0; 817 link_possible = 0;
826 break; 818 break;
827 819
828 case NETDEV_CHANGE: 820 case NETDEV_CHANGE:
829 BNX2FC_HBA_DBG(lport, "NETDEV_CHANGE\n");
830 break; 821 break;
831 822
832 default: 823 default:
@@ -834,15 +825,22 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
834 return; 825 return;
835 } 826 }
836 827
837 bnx2fc_link_speed_update(lport); 828 mutex_lock(&bnx2fc_dev_lock);
829 list_for_each_entry(interface, &if_list, list) {
838 830
839 if (link_possible && !bnx2fc_link_ok(lport)) { 831 if (interface->hba != hba)
840 printk(KERN_ERR "indicate_netevent: call ctlr_link_up\n"); 832 continue;
841 fcoe_ctlr_link_up(&hba->ctlr); 833
842 } else { 834 lport = interface->ctlr.lp;
843 printk(KERN_ERR "indicate_netevent: call ctlr_link_down\n"); 835 BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n",
844 if (fcoe_ctlr_link_down(&hba->ctlr)) { 836 interface->netdev->name, event);
845 clear_bit(ADAPTER_STATE_READY, &hba->adapter_state); 837
838 bnx2fc_link_speed_update(lport);
839
840 if (link_possible && !bnx2fc_link_ok(lport)) {
841 printk(KERN_ERR "indicate_netevent: ctlr_link_up\n");
842 fcoe_ctlr_link_up(&interface->ctlr);
843 } else if (fcoe_ctlr_link_down(&interface->ctlr)) {
846 mutex_lock(&lport->lp_mutex); 844 mutex_lock(&lport->lp_mutex);
847 list_for_each_entry(vport, &lport->vports, list) 845 list_for_each_entry(vport, &lport->vports, list)
848 fc_host_port_type(vport->host) = 846 fc_host_port_type(vport->host) =
@@ -853,24 +851,26 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
853 get_cpu())->LinkFailureCount++; 851 get_cpu())->LinkFailureCount++;
854 put_cpu(); 852 put_cpu();
855 fcoe_clean_pending_queue(lport); 853 fcoe_clean_pending_queue(lport);
854 wait_for_upload = 1;
855 }
856 }
857 mutex_unlock(&bnx2fc_dev_lock);
856 858
857 init_waitqueue_head(&hba->shutdown_wait); 859 if (wait_for_upload) {
858 BNX2FC_HBA_DBG(lport, "indicate_netevent " 860 clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
859 "num_ofld_sess = %d\n", 861 init_waitqueue_head(&hba->shutdown_wait);
860 hba->num_ofld_sess); 862 BNX2FC_MISC_DBG("indicate_netevent "
861 hba->wait_for_link_down = 1; 863 "num_ofld_sess = %d\n",
862 BNX2FC_HBA_DBG(lport, "waiting for uploads to " 864 hba->num_ofld_sess);
863 "compl proc = %s\n", 865 hba->wait_for_link_down = 1;
864 current->comm); 866 wait_event_interruptible(hba->shutdown_wait,
865 wait_event_interruptible(hba->shutdown_wait, 867 (hba->num_ofld_sess == 0));
866 (hba->num_ofld_sess == 0)); 868 BNX2FC_MISC_DBG("wakeup - num_ofld_sess = %d\n",
867 BNX2FC_HBA_DBG(lport, "wakeup - num_ofld_sess = %d\n",
868 hba->num_ofld_sess); 869 hba->num_ofld_sess);
869 hba->wait_for_link_down = 0; 870 hba->wait_for_link_down = 0;
870 871
871 if (signal_pending(current)) 872 if (signal_pending(current))
872 flush_signals(current); 873 flush_signals(current);
873 }
874 } 874 }
875} 875}
876 876
@@ -889,23 +889,12 @@ static int bnx2fc_libfc_config(struct fc_lport *lport)
889 889
890static int bnx2fc_em_config(struct fc_lport *lport) 890static int bnx2fc_em_config(struct fc_lport *lport)
891{ 891{
892 struct fcoe_port *port = lport_priv(lport);
893 struct bnx2fc_hba *hba = port->priv;
894
895 if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID, 892 if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID,
896 FCOE_MAX_XID, NULL)) { 893 FCOE_MAX_XID, NULL)) {
897 printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n"); 894 printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n");
898 return -ENOMEM; 895 return -ENOMEM;
899 } 896 }
900 897
901 hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID,
902 BNX2FC_MAX_XID);
903
904 if (!hba->cmd_mgr) {
905 printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
906 fc_exch_mgr_free(lport);
907 return -ENOMEM;
908 }
909 return 0; 898 return 0;
910} 899}
911 900
@@ -918,11 +907,8 @@ static int bnx2fc_lport_config(struct fc_lport *lport)
918 lport->e_d_tov = 2 * 1000; 907 lport->e_d_tov = 2 * 1000;
919 lport->r_a_tov = 10 * 1000; 908 lport->r_a_tov = 10 * 1000;
920 909
921 /* REVISIT: enable when supporting tape devices
922 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | 910 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
923 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); 911 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
924 */
925 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS);
926 lport->does_npiv = 1; 912 lport->does_npiv = 1;
927 913
928 memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen)); 914 memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen));
@@ -952,9 +938,10 @@ static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
952 struct packet_type *ptype, 938 struct packet_type *ptype,
953 struct net_device *orig_dev) 939 struct net_device *orig_dev)
954{ 940{
955 struct bnx2fc_hba *hba; 941 struct bnx2fc_interface *interface;
956 hba = container_of(ptype, struct bnx2fc_hba, fip_packet_type); 942 interface = container_of(ptype, struct bnx2fc_interface,
957 fcoe_ctlr_recv(&hba->ctlr, skb); 943 fip_packet_type);
944 fcoe_ctlr_recv(&interface->ctlr, skb);
958 return 0; 945 return 0;
959} 946}
960 947
@@ -1005,17 +992,17 @@ static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled)
1005 struct Scsi_Host *shost = vport_to_shost(vport); 992 struct Scsi_Host *shost = vport_to_shost(vport);
1006 struct fc_lport *n_port = shost_priv(shost); 993 struct fc_lport *n_port = shost_priv(shost);
1007 struct fcoe_port *port = lport_priv(n_port); 994 struct fcoe_port *port = lport_priv(n_port);
1008 struct bnx2fc_hba *hba = port->priv; 995 struct bnx2fc_interface *interface = port->priv;
1009 struct net_device *netdev = hba->netdev; 996 struct net_device *netdev = interface->netdev;
1010 struct fc_lport *vn_port; 997 struct fc_lport *vn_port;
1011 998
1012 if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) { 999 if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) {
1013 printk(KERN_ERR PFX "vn ports cannot be created on" 1000 printk(KERN_ERR PFX "vn ports cannot be created on"
1014 "this hba\n"); 1001 "this interface\n");
1015 return -EIO; 1002 return -EIO;
1016 } 1003 }
1017 mutex_lock(&bnx2fc_dev_lock); 1004 mutex_lock(&bnx2fc_dev_lock);
1018 vn_port = bnx2fc_if_create(hba, &vport->dev, 1); 1005 vn_port = bnx2fc_if_create(interface, &vport->dev, 1);
1019 mutex_unlock(&bnx2fc_dev_lock); 1006 mutex_unlock(&bnx2fc_dev_lock);
1020 1007
1021 if (IS_ERR(vn_port)) { 1008 if (IS_ERR(vn_port)) {
@@ -1065,10 +1052,10 @@ static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable)
1065} 1052}
1066 1053
1067 1054
1068static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba) 1055static int bnx2fc_netdev_setup(struct bnx2fc_interface *interface)
1069{ 1056{
1070 struct net_device *netdev = hba->netdev; 1057 struct net_device *netdev = interface->netdev;
1071 struct net_device *physdev = hba->phys_dev; 1058 struct net_device *physdev = interface->hba->phys_dev;
1072 struct netdev_hw_addr *ha; 1059 struct netdev_hw_addr *ha;
1073 int sel_san_mac = 0; 1060 int sel_san_mac = 0;
1074 1061
@@ -1083,7 +1070,8 @@ static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba)
1083 1070
1084 if ((ha->type == NETDEV_HW_ADDR_T_SAN) && 1071 if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
1085 (is_valid_ether_addr(ha->addr))) { 1072 (is_valid_ether_addr(ha->addr))) {
1086 memcpy(hba->ctlr.ctl_src_addr, ha->addr, ETH_ALEN); 1073 memcpy(interface->ctlr.ctl_src_addr, ha->addr,
1074 ETH_ALEN);
1087 sel_san_mac = 1; 1075 sel_san_mac = 1;
1088 BNX2FC_MISC_DBG("Found SAN MAC\n"); 1076 BNX2FC_MISC_DBG("Found SAN MAC\n");
1089 } 1077 }
@@ -1093,15 +1081,15 @@ static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba)
1093 if (!sel_san_mac) 1081 if (!sel_san_mac)
1094 return -ENODEV; 1082 return -ENODEV;
1095 1083
1096 hba->fip_packet_type.func = bnx2fc_fip_recv; 1084 interface->fip_packet_type.func = bnx2fc_fip_recv;
1097 hba->fip_packet_type.type = htons(ETH_P_FIP); 1085 interface->fip_packet_type.type = htons(ETH_P_FIP);
1098 hba->fip_packet_type.dev = netdev; 1086 interface->fip_packet_type.dev = netdev;
1099 dev_add_pack(&hba->fip_packet_type); 1087 dev_add_pack(&interface->fip_packet_type);
1100 1088
1101 hba->fcoe_packet_type.func = bnx2fc_rcv; 1089 interface->fcoe_packet_type.func = bnx2fc_rcv;
1102 hba->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); 1090 interface->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
1103 hba->fcoe_packet_type.dev = netdev; 1091 interface->fcoe_packet_type.dev = netdev;
1104 dev_add_pack(&hba->fcoe_packet_type); 1092 dev_add_pack(&interface->fcoe_packet_type);
1105 1093
1106 return 0; 1094 return 0;
1107} 1095}
@@ -1137,53 +1125,54 @@ static void bnx2fc_release_transport(void)
1137 1125
1138static void bnx2fc_interface_release(struct kref *kref) 1126static void bnx2fc_interface_release(struct kref *kref)
1139{ 1127{
1140 struct bnx2fc_hba *hba; 1128 struct bnx2fc_interface *interface;
1141 struct net_device *netdev; 1129 struct net_device *netdev;
1142 struct net_device *phys_dev;
1143 1130
1144 hba = container_of(kref, struct bnx2fc_hba, kref); 1131 interface = container_of(kref, struct bnx2fc_interface, kref);
1145 BNX2FC_MISC_DBG("Interface is being released\n"); 1132 BNX2FC_MISC_DBG("Interface is being released\n");
1146 1133
1147 netdev = hba->netdev; 1134 netdev = interface->netdev;
1148 phys_dev = hba->phys_dev;
1149 1135
1150 /* tear-down FIP controller */ 1136 /* tear-down FIP controller */
1151 if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done)) 1137 if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags))
1152 fcoe_ctlr_destroy(&hba->ctlr); 1138 fcoe_ctlr_destroy(&interface->ctlr);
1139
1140 kfree(interface);
1153 1141
1154 /* Free the command manager */
1155 if (hba->cmd_mgr) {
1156 bnx2fc_cmd_mgr_free(hba->cmd_mgr);
1157 hba->cmd_mgr = NULL;
1158 }
1159 dev_put(netdev); 1142 dev_put(netdev);
1160 module_put(THIS_MODULE); 1143 module_put(THIS_MODULE);
1161} 1144}
1162 1145
1163static inline void bnx2fc_interface_get(struct bnx2fc_hba *hba) 1146static inline void bnx2fc_interface_get(struct bnx2fc_interface *interface)
1164{ 1147{
1165 kref_get(&hba->kref); 1148 kref_get(&interface->kref);
1166} 1149}
1167 1150
1168static inline void bnx2fc_interface_put(struct bnx2fc_hba *hba) 1151static inline void bnx2fc_interface_put(struct bnx2fc_interface *interface)
1169{ 1152{
1170 kref_put(&hba->kref, bnx2fc_interface_release); 1153 kref_put(&interface->kref, bnx2fc_interface_release);
1171} 1154}
1172static void bnx2fc_interface_destroy(struct bnx2fc_hba *hba) 1155static void bnx2fc_hba_destroy(struct bnx2fc_hba *hba)
1173{ 1156{
1157 /* Free the command manager */
1158 if (hba->cmd_mgr) {
1159 bnx2fc_cmd_mgr_free(hba->cmd_mgr);
1160 hba->cmd_mgr = NULL;
1161 }
1162 kfree(hba->tgt_ofld_list);
1174 bnx2fc_unbind_pcidev(hba); 1163 bnx2fc_unbind_pcidev(hba);
1175 kfree(hba); 1164 kfree(hba);
1176} 1165}
1177 1166
1178/** 1167/**
1179 * bnx2fc_interface_create - create a new fcoe instance 1168 * bnx2fc_hba_create - create a new bnx2fc hba
1180 * 1169 *
1181 * @cnic: pointer to cnic device 1170 * @cnic: pointer to cnic device
1182 * 1171 *
1183 * Creates a new FCoE instance on the given device which include allocating 1172 * Creates a new FCoE hba on the given device.
1184 * hba structure, scsi_host and lport structures. 1173 *
1185 */ 1174 */
1186static struct bnx2fc_hba *bnx2fc_interface_create(struct cnic_dev *cnic) 1175static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
1187{ 1176{
1188 struct bnx2fc_hba *hba; 1177 struct bnx2fc_hba *hba;
1189 int rc; 1178 int rc;
@@ -1198,65 +1187,83 @@ static struct bnx2fc_hba *bnx2fc_interface_create(struct cnic_dev *cnic)
1198 1187
1199 hba->cnic = cnic; 1188 hba->cnic = cnic;
1200 rc = bnx2fc_bind_pcidev(hba); 1189 rc = bnx2fc_bind_pcidev(hba);
1201 if (rc) 1190 if (rc) {
1191 printk(KERN_ERR PFX "create_adapter: bind error\n");
1202 goto bind_err; 1192 goto bind_err;
1193 }
1203 hba->phys_dev = cnic->netdev; 1194 hba->phys_dev = cnic->netdev;
1204 /* will get overwritten after we do vlan discovery */ 1195 hba->next_conn_id = 0;
1205 hba->netdev = hba->phys_dev; 1196
1197 hba->tgt_ofld_list =
1198 kzalloc(sizeof(struct bnx2fc_rport *) * BNX2FC_NUM_MAX_SESS,
1199 GFP_KERNEL);
1200 if (!hba->tgt_ofld_list) {
1201 printk(KERN_ERR PFX "Unable to allocate tgt offload list\n");
1202 goto tgtofld_err;
1203 }
1204
1205 hba->num_ofld_sess = 0;
1206
1207 hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID,
1208 BNX2FC_MAX_XID);
1209 if (!hba->cmd_mgr) {
1210 printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
1211 goto cmgr_err;
1212 }
1206 1213
1207 init_waitqueue_head(&hba->shutdown_wait); 1214 init_waitqueue_head(&hba->shutdown_wait);
1208 init_waitqueue_head(&hba->destroy_wait); 1215 init_waitqueue_head(&hba->destroy_wait);
1216 INIT_LIST_HEAD(&hba->vports);
1209 1217
1210 return hba; 1218 return hba;
1219
1220cmgr_err:
1221 kfree(hba->tgt_ofld_list);
1222tgtofld_err:
1223 bnx2fc_unbind_pcidev(hba);
1211bind_err: 1224bind_err:
1212 printk(KERN_ERR PFX "create_interface: bind error\n");
1213 kfree(hba); 1225 kfree(hba);
1214 return NULL; 1226 return NULL;
1215} 1227}
1216 1228
1217static int bnx2fc_interface_setup(struct bnx2fc_hba *hba, 1229struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
1218 enum fip_state fip_mode) 1230 struct net_device *netdev,
1231 enum fip_state fip_mode)
1219{ 1232{
1233 struct bnx2fc_interface *interface;
1220 int rc = 0; 1234 int rc = 0;
1221 struct net_device *netdev = hba->netdev;
1222 struct fcoe_ctlr *fip = &hba->ctlr;
1223 1235
1236 interface = kzalloc(sizeof(*interface), GFP_KERNEL);
1237 if (!interface) {
1238 printk(KERN_ERR PFX "Unable to allocate interface structure\n");
1239 return NULL;
1240 }
1224 dev_hold(netdev); 1241 dev_hold(netdev);
1225 kref_init(&hba->kref); 1242 kref_init(&interface->kref);
1226 1243 interface->hba = hba;
1227 hba->flags = 0; 1244 interface->netdev = netdev;
1228 1245
1229 /* Initialize FIP */ 1246 /* Initialize FIP */
1230 memset(fip, 0, sizeof(*fip)); 1247 fcoe_ctlr_init(&interface->ctlr, fip_mode);
1231 fcoe_ctlr_init(fip, fip_mode); 1248 interface->ctlr.send = bnx2fc_fip_send;
1232 hba->ctlr.send = bnx2fc_fip_send; 1249 interface->ctlr.update_mac = bnx2fc_update_src_mac;
1233 hba->ctlr.update_mac = bnx2fc_update_src_mac; 1250 interface->ctlr.get_src_addr = bnx2fc_get_src_mac;
1234 hba->ctlr.get_src_addr = bnx2fc_get_src_mac; 1251 set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags);
1235 set_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done);
1236
1237 INIT_LIST_HEAD(&hba->vports);
1238 rc = bnx2fc_netdev_setup(hba);
1239 if (rc)
1240 goto setup_err;
1241 1252
1242 hba->next_conn_id = 0; 1253 rc = bnx2fc_netdev_setup(interface);
1254 if (!rc)
1255 return interface;
1243 1256
1244 memset(hba->tgt_ofld_list, 0, sizeof(hba->tgt_ofld_list)); 1257 fcoe_ctlr_destroy(&interface->ctlr);
1245 hba->num_ofld_sess = 0;
1246
1247 return 0;
1248
1249setup_err:
1250 fcoe_ctlr_destroy(&hba->ctlr);
1251 dev_put(netdev); 1258 dev_put(netdev);
1252 bnx2fc_interface_put(hba); 1259 kfree(interface);
1253 return rc; 1260 return NULL;
1254} 1261}
1255 1262
1256/** 1263/**
1257 * bnx2fc_if_create - Create FCoE instance on a given interface 1264 * bnx2fc_if_create - Create FCoE instance on a given interface
1258 * 1265 *
1259 * @hba: FCoE interface to create a local port on 1266 * @interface: FCoE interface to create a local port on
1260 * @parent: Device pointer to be the parent in sysfs for the SCSI host 1267 * @parent: Device pointer to be the parent in sysfs for the SCSI host
1261 * @npiv: Indicates if the port is vport or not 1268 * @npiv: Indicates if the port is vport or not
1262 * 1269 *
@@ -1264,7 +1271,7 @@ setup_err:
1264 * 1271 *
1265 * Returns: Allocated fc_lport or an error pointer 1272 * Returns: Allocated fc_lport or an error pointer
1266 */ 1273 */
1267static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba, 1274static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1268 struct device *parent, int npiv) 1275 struct device *parent, int npiv)
1269{ 1276{
1270 struct fc_lport *lport, *n_port; 1277 struct fc_lport *lport, *n_port;
@@ -1272,11 +1279,12 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
1272 struct Scsi_Host *shost; 1279 struct Scsi_Host *shost;
1273 struct fc_vport *vport = dev_to_vport(parent); 1280 struct fc_vport *vport = dev_to_vport(parent);
1274 struct bnx2fc_lport *blport; 1281 struct bnx2fc_lport *blport;
1282 struct bnx2fc_hba *hba;
1275 int rc = 0; 1283 int rc = 0;
1276 1284
1277 blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL); 1285 blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
1278 if (!blport) { 1286 if (!blport) {
1279 BNX2FC_HBA_DBG(hba->ctlr.lp, "Unable to alloc bnx2fc_lport\n"); 1287 BNX2FC_HBA_DBG(interface->ctlr.lp, "Unable to alloc blport\n");
1280 return NULL; 1288 return NULL;
1281 } 1289 }
1282 1290
@@ -1293,7 +1301,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
1293 shost = lport->host; 1301 shost = lport->host;
1294 port = lport_priv(lport); 1302 port = lport_priv(lport);
1295 port->lport = lport; 1303 port->lport = lport;
1296 port->priv = hba; 1304 port->priv = interface;
1297 INIT_WORK(&port->destroy_work, bnx2fc_destroy_work); 1305 INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
1298 1306
1299 /* Configure fcoe_port */ 1307 /* Configure fcoe_port */
@@ -1317,7 +1325,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
1317 rc = bnx2fc_shost_config(lport, parent); 1325 rc = bnx2fc_shost_config(lport, parent);
1318 if (rc) { 1326 if (rc) {
1319 printk(KERN_ERR PFX "Couldnt configure shost for %s\n", 1327 printk(KERN_ERR PFX "Couldnt configure shost for %s\n",
1320 hba->netdev->name); 1328 interface->netdev->name);
1321 goto lp_config_err; 1329 goto lp_config_err;
1322 } 1330 }
1323 1331
@@ -1343,8 +1351,9 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
1343 goto shost_err; 1351 goto shost_err;
1344 } 1352 }
1345 1353
1346 bnx2fc_interface_get(hba); 1354 bnx2fc_interface_get(interface);
1347 1355
1356 hba = interface->hba;
1348 spin_lock_bh(&hba->hba_lock); 1357 spin_lock_bh(&hba->hba_lock);
1349 blport->lport = lport; 1358 blport->lport = lport;
1350 list_add_tail(&blport->list, &hba->vports); 1359 list_add_tail(&blport->list, &hba->vports);
@@ -1361,21 +1370,19 @@ free_blport:
1361 return NULL; 1370 return NULL;
1362} 1371}
1363 1372
1364static void bnx2fc_netdev_cleanup(struct bnx2fc_hba *hba) 1373static void bnx2fc_netdev_cleanup(struct bnx2fc_interface *interface)
1365{ 1374{
1366 /* Dont listen for Ethernet packets anymore */ 1375 /* Dont listen for Ethernet packets anymore */
1367 __dev_remove_pack(&hba->fcoe_packet_type); 1376 __dev_remove_pack(&interface->fcoe_packet_type);
1368 __dev_remove_pack(&hba->fip_packet_type); 1377 __dev_remove_pack(&interface->fip_packet_type);
1369 synchronize_net(); 1378 synchronize_net();
1370} 1379}
1371 1380
1372static void bnx2fc_if_destroy(struct fc_lport *lport) 1381static void bnx2fc_if_destroy(struct fc_lport *lport, struct bnx2fc_hba *hba)
1373{ 1382{
1374 struct fcoe_port *port = lport_priv(lport); 1383 struct fcoe_port *port = lport_priv(lport);
1375 struct bnx2fc_hba *hba = port->priv;
1376 struct bnx2fc_lport *blport, *tmp; 1384 struct bnx2fc_lport *blport, *tmp;
1377 1385
1378 BNX2FC_HBA_DBG(hba->ctlr.lp, "ENTERED bnx2fc_if_destroy\n");
1379 /* Stop the transmit retry timer */ 1386 /* Stop the transmit retry timer */
1380 del_timer_sync(&port->timer); 1387 del_timer_sync(&port->timer);
1381 1388
@@ -1409,8 +1416,6 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
1409 1416
1410 /* Release Scsi_Host */ 1417 /* Release Scsi_Host */
1411 scsi_host_put(lport->host); 1418 scsi_host_put(lport->host);
1412
1413 bnx2fc_interface_put(hba);
1414} 1419}
1415 1420
1416/** 1421/**
@@ -1425,46 +1430,31 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
1425 */ 1430 */
1426static int bnx2fc_destroy(struct net_device *netdev) 1431static int bnx2fc_destroy(struct net_device *netdev)
1427{ 1432{
1428 struct bnx2fc_hba *hba = NULL; 1433 struct bnx2fc_interface *interface = NULL;
1429 struct net_device *phys_dev; 1434 struct bnx2fc_hba *hba;
1435 struct fc_lport *lport;
1430 int rc = 0; 1436 int rc = 0;
1431 1437
1432 rtnl_lock(); 1438 rtnl_lock();
1433
1434 mutex_lock(&bnx2fc_dev_lock); 1439 mutex_lock(&bnx2fc_dev_lock);
1435 /* obtain physical netdev */
1436 if (netdev->priv_flags & IFF_802_1Q_VLAN)
1437 phys_dev = vlan_dev_real_dev(netdev);
1438 else {
1439 printk(KERN_ERR PFX "Not a vlan device\n");
1440 rc = -ENODEV;
1441 goto netdev_err;
1442 }
1443 1440
1444 hba = bnx2fc_hba_lookup(phys_dev); 1441 interface = bnx2fc_interface_lookup(netdev);
1445 if (!hba || !hba->ctlr.lp) { 1442 if (!interface || !interface->ctlr.lp) {
1446 rc = -ENODEV; 1443 rc = -ENODEV;
1447 printk(KERN_ERR PFX "bnx2fc_destroy: hba or lport not found\n"); 1444 printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n");
1448 goto netdev_err;
1449 }
1450
1451 if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
1452 printk(KERN_ERR PFX "bnx2fc_destroy: Create not called\n");
1453 goto netdev_err; 1445 goto netdev_err;
1454 } 1446 }
1455 1447
1456 bnx2fc_netdev_cleanup(hba); 1448 hba = interface->hba;
1457
1458 bnx2fc_stop(hba);
1459
1460 bnx2fc_if_destroy(hba->ctlr.lp);
1461 1449
1462 destroy_workqueue(hba->timer_work_queue); 1450 bnx2fc_netdev_cleanup(interface);
1451 lport = interface->ctlr.lp;
1452 bnx2fc_stop(interface);
1453 list_del(&interface->list);
1454 destroy_workqueue(interface->timer_work_queue);
1455 bnx2fc_interface_put(interface);
1456 bnx2fc_if_destroy(lport, hba);
1463 1457
1464 if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done))
1465 bnx2fc_fw_destroy(hba);
1466
1467 clear_bit(BNX2FC_CREATE_DONE, &hba->init_done);
1468netdev_err: 1458netdev_err:
1469 mutex_unlock(&bnx2fc_dev_lock); 1459 mutex_unlock(&bnx2fc_dev_lock);
1470 rtnl_unlock(); 1460 rtnl_unlock();
@@ -1475,16 +1465,20 @@ static void bnx2fc_destroy_work(struct work_struct *work)
1475{ 1465{
1476 struct fcoe_port *port; 1466 struct fcoe_port *port;
1477 struct fc_lport *lport; 1467 struct fc_lport *lport;
1468 struct bnx2fc_interface *interface;
1469 struct bnx2fc_hba *hba;
1478 1470
1479 port = container_of(work, struct fcoe_port, destroy_work); 1471 port = container_of(work, struct fcoe_port, destroy_work);
1480 lport = port->lport; 1472 lport = port->lport;
1473 interface = port->priv;
1474 hba = interface->hba;
1481 1475
1482 BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n"); 1476 BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
1483 1477
1484 bnx2fc_port_shutdown(lport); 1478 bnx2fc_port_shutdown(lport);
1485 rtnl_lock(); 1479 rtnl_lock();
1486 mutex_lock(&bnx2fc_dev_lock); 1480 mutex_lock(&bnx2fc_dev_lock);
1487 bnx2fc_if_destroy(lport); 1481 bnx2fc_if_destroy(lport, hba);
1488 mutex_unlock(&bnx2fc_dev_lock); 1482 mutex_unlock(&bnx2fc_dev_lock);
1489 rtnl_unlock(); 1483 rtnl_unlock();
1490} 1484}
@@ -1556,28 +1550,27 @@ static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
1556static void bnx2fc_ulp_start(void *handle) 1550static void bnx2fc_ulp_start(void *handle)
1557{ 1551{
1558 struct bnx2fc_hba *hba = handle; 1552 struct bnx2fc_hba *hba = handle;
1559 struct fc_lport *lport = hba->ctlr.lp; 1553 struct bnx2fc_interface *interface;
1554 struct fc_lport *lport;
1560 1555
1561 BNX2FC_MISC_DBG("Entered %s\n", __func__);
1562 mutex_lock(&bnx2fc_dev_lock); 1556 mutex_lock(&bnx2fc_dev_lock);
1563 1557
1564 if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) 1558 if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags))
1565 goto start_disc;
1566
1567 if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done))
1568 bnx2fc_fw_init(hba); 1559 bnx2fc_fw_init(hba);
1569 1560
1570start_disc:
1571 mutex_unlock(&bnx2fc_dev_lock);
1572
1573 BNX2FC_MISC_DBG("bnx2fc started.\n"); 1561 BNX2FC_MISC_DBG("bnx2fc started.\n");
1574 1562
1575 /* Kick off Fabric discovery*/ 1563 list_for_each_entry(interface, &if_list, list) {
1576 if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) { 1564 if (interface->hba == hba) {
1577 printk(KERN_ERR PFX "ulp_init: start discovery\n"); 1565 lport = interface->ctlr.lp;
1578 lport->tt.frame_send = bnx2fc_xmit; 1566 /* Kick off Fabric discovery*/
1579 bnx2fc_start_disc(hba); 1567 printk(KERN_ERR PFX "ulp_init: start discovery\n");
1568 lport->tt.frame_send = bnx2fc_xmit;
1569 bnx2fc_start_disc(interface);
1570 }
1580 } 1571 }
1572
1573 mutex_unlock(&bnx2fc_dev_lock);
1581} 1574}
1582 1575
1583static void bnx2fc_port_shutdown(struct fc_lport *lport) 1576static void bnx2fc_port_shutdown(struct fc_lport *lport)
@@ -1587,37 +1580,25 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport)
1587 fc_lport_destroy(lport); 1580 fc_lport_destroy(lport);
1588} 1581}
1589 1582
1590static void bnx2fc_stop(struct bnx2fc_hba *hba) 1583static void bnx2fc_stop(struct bnx2fc_interface *interface)
1591{ 1584{
1592 struct fc_lport *lport; 1585 struct fc_lport *lport;
1593 struct fc_lport *vport; 1586 struct fc_lport *vport;
1594 1587
1595 BNX2FC_MISC_DBG("ENTERED %s - init_done = %ld\n", __func__, 1588 if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags))
1596 hba->init_done); 1589 return;
1597 if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done) &&
1598 test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
1599 lport = hba->ctlr.lp;
1600 bnx2fc_port_shutdown(lport);
1601 BNX2FC_HBA_DBG(lport, "bnx2fc_stop: waiting for %d "
1602 "offloaded sessions\n",
1603 hba->num_ofld_sess);
1604 wait_event_interruptible(hba->shutdown_wait,
1605 (hba->num_ofld_sess == 0));
1606 mutex_lock(&lport->lp_mutex);
1607 list_for_each_entry(vport, &lport->vports, list)
1608 fc_host_port_type(vport->host) = FC_PORTTYPE_UNKNOWN;
1609 mutex_unlock(&lport->lp_mutex);
1610 fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
1611 fcoe_ctlr_link_down(&hba->ctlr);
1612 fcoe_clean_pending_queue(lport);
1613
1614 mutex_lock(&hba->hba_mutex);
1615 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
1616 clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
1617 1590
1618 clear_bit(ADAPTER_STATE_READY, &hba->adapter_state); 1591 lport = interface->ctlr.lp;
1619 mutex_unlock(&hba->hba_mutex); 1592 bnx2fc_port_shutdown(lport);
1620 } 1593
1594 mutex_lock(&lport->lp_mutex);
1595 list_for_each_entry(vport, &lport->vports, list)
1596 fc_host_port_type(vport->host) =
1597 FC_PORTTYPE_UNKNOWN;
1598 mutex_unlock(&lport->lp_mutex);
1599 fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
1600 fcoe_ctlr_link_down(&interface->ctlr);
1601 fcoe_clean_pending_queue(lport);
1621} 1602}
1622 1603
1623static int bnx2fc_fw_init(struct bnx2fc_hba *hba) 1604static int bnx2fc_fw_init(struct bnx2fc_hba *hba)
@@ -1656,8 +1637,7 @@ static int bnx2fc_fw_init(struct bnx2fc_hba *hba)
1656 } 1637 }
1657 1638
1658 1639
1659 /* Mark HBA to indicate that the FW INIT is done */ 1640 set_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags);
1660 set_bit(BNX2FC_FW_INIT_DONE, &hba->init_done);
1661 return 0; 1641 return 0;
1662 1642
1663err_unbind: 1643err_unbind:
@@ -1668,7 +1648,7 @@ err_out:
1668 1648
1669static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba) 1649static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
1670{ 1650{
1671 if (test_and_clear_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) { 1651 if (test_and_clear_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) {
1672 if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) { 1652 if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) {
1673 init_timer(&hba->destroy_timer); 1653 init_timer(&hba->destroy_timer);
1674 hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT + 1654 hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT +
@@ -1677,8 +1657,8 @@ static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
1677 hba->destroy_timer.data = (unsigned long)hba; 1657 hba->destroy_timer.data = (unsigned long)hba;
1678 add_timer(&hba->destroy_timer); 1658 add_timer(&hba->destroy_timer);
1679 wait_event_interruptible(hba->destroy_wait, 1659 wait_event_interruptible(hba->destroy_wait,
1680 (hba->flags & 1660 test_bit(BNX2FC_FLAG_DESTROY_CMPL,
1681 BNX2FC_FLAG_DESTROY_CMPL)); 1661 &hba->flags));
1682 /* This should never happen */ 1662 /* This should never happen */
1683 if (signal_pending(current)) 1663 if (signal_pending(current))
1684 flush_signals(current); 1664 flush_signals(current);
@@ -1699,40 +1679,57 @@ static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
1699 */ 1679 */
1700static void bnx2fc_ulp_stop(void *handle) 1680static void bnx2fc_ulp_stop(void *handle)
1701{ 1681{
1702 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)handle; 1682 struct bnx2fc_hba *hba = handle;
1683 struct bnx2fc_interface *interface;
1703 1684
1704 printk(KERN_ERR "ULP_STOP\n"); 1685 printk(KERN_ERR "ULP_STOP\n");
1705 1686
1706 mutex_lock(&bnx2fc_dev_lock); 1687 mutex_lock(&bnx2fc_dev_lock);
1707 bnx2fc_stop(hba); 1688 if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags))
1689 goto exit;
1690 list_for_each_entry(interface, &if_list, list) {
1691 if (interface->hba == hba)
1692 bnx2fc_stop(interface);
1693 }
1694 BUG_ON(hba->num_ofld_sess != 0);
1695
1696 mutex_lock(&hba->hba_mutex);
1697 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
1698 clear_bit(ADAPTER_STATE_GOING_DOWN,
1699 &hba->adapter_state);
1700
1701 clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
1702 mutex_unlock(&hba->hba_mutex);
1703
1708 bnx2fc_fw_destroy(hba); 1704 bnx2fc_fw_destroy(hba);
1705exit:
1709 mutex_unlock(&bnx2fc_dev_lock); 1706 mutex_unlock(&bnx2fc_dev_lock);
1710} 1707}
1711 1708
1712static void bnx2fc_start_disc(struct bnx2fc_hba *hba) 1709static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
1713{ 1710{
1714 struct fc_lport *lport; 1711 struct fc_lport *lport;
1715 int wait_cnt = 0; 1712 int wait_cnt = 0;
1716 1713
1717 BNX2FC_MISC_DBG("Entered %s\n", __func__); 1714 BNX2FC_MISC_DBG("Entered %s\n", __func__);
1718 /* Kick off FIP/FLOGI */ 1715 /* Kick off FIP/FLOGI */
1719 if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) { 1716 if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) {
1720 printk(KERN_ERR PFX "Init not done yet\n"); 1717 printk(KERN_ERR PFX "Init not done yet\n");
1721 return; 1718 return;
1722 } 1719 }
1723 1720
1724 lport = hba->ctlr.lp; 1721 lport = interface->ctlr.lp;
1725 BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n"); 1722 BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
1726 1723
1727 if (!bnx2fc_link_ok(lport)) { 1724 if (!bnx2fc_link_ok(lport)) {
1728 BNX2FC_HBA_DBG(lport, "ctlr_link_up\n"); 1725 BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
1729 fcoe_ctlr_link_up(&hba->ctlr); 1726 fcoe_ctlr_link_up(&interface->ctlr);
1730 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; 1727 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1731 set_bit(ADAPTER_STATE_READY, &hba->adapter_state); 1728 set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
1732 } 1729 }
1733 1730
1734 /* wait for the FCF to be selected before issuing FLOGI */ 1731 /* wait for the FCF to be selected before issuing FLOGI */
1735 while (!hba->ctlr.sel_fcf) { 1732 while (!interface->ctlr.sel_fcf) {
1736 msleep(250); 1733 msleep(250);
1737 /* give up after 3 secs */ 1734 /* give up after 3 secs */
1738 if (++wait_cnt > 12) 1735 if (++wait_cnt > 12)
@@ -1758,15 +1755,15 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
1758 1755
1759 BNX2FC_MISC_DBG("Entered %s\n", __func__); 1756 BNX2FC_MISC_DBG("Entered %s\n", __func__);
1760 /* bnx2fc works only when bnx2x is loaded */ 1757 /* bnx2fc works only when bnx2x is loaded */
1761 if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 1758 if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) ||
1759 (dev->max_fcoe_conn == 0)) {
1762 printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s," 1760 printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s,"
1763 " flags: %lx\n", 1761 " flags: %lx fcoe_conn: %d\n",
1764 dev->netdev->name, dev->flags); 1762 dev->netdev->name, dev->flags, dev->max_fcoe_conn);
1765 return; 1763 return;
1766 } 1764 }
1767 1765
1768 /* Configure FCoE interface */ 1766 hba = bnx2fc_hba_create(dev);
1769 hba = bnx2fc_interface_create(dev);
1770 if (!hba) { 1767 if (!hba) {
1771 printk(KERN_ERR PFX "hba initialization failed\n"); 1768 printk(KERN_ERR PFX "hba initialization failed\n");
1772 return; 1769 return;
@@ -1774,7 +1771,7 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
1774 1771
1775 /* Add HBA to the adapter list */ 1772 /* Add HBA to the adapter list */
1776 mutex_lock(&bnx2fc_dev_lock); 1773 mutex_lock(&bnx2fc_dev_lock);
1777 list_add_tail(&hba->link, &adapter_list); 1774 list_add_tail(&hba->list, &adapter_list);
1778 adapter_count++; 1775 adapter_count++;
1779 mutex_unlock(&bnx2fc_dev_lock); 1776 mutex_unlock(&bnx2fc_dev_lock);
1780 1777
@@ -1782,7 +1779,7 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
1782 rc = dev->register_device(dev, CNIC_ULP_FCOE, 1779 rc = dev->register_device(dev, CNIC_ULP_FCOE,
1783 (void *) hba); 1780 (void *) hba);
1784 if (rc) 1781 if (rc)
1785 printk(KERN_ALERT PFX "register_device failed, rc = %d\n", rc); 1782 printk(KERN_ERR PFX "register_device failed, rc = %d\n", rc);
1786 else 1783 else
1787 set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); 1784 set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
1788} 1785}
@@ -1790,52 +1787,21 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
1790 1787
1791static int bnx2fc_disable(struct net_device *netdev) 1788static int bnx2fc_disable(struct net_device *netdev)
1792{ 1789{
1793 struct bnx2fc_hba *hba; 1790 struct bnx2fc_interface *interface;
1794 struct net_device *phys_dev;
1795 struct ethtool_drvinfo drvinfo;
1796 int rc = 0; 1791 int rc = 0;
1797 1792
1798 rtnl_lock(); 1793 rtnl_lock();
1799
1800 mutex_lock(&bnx2fc_dev_lock); 1794 mutex_lock(&bnx2fc_dev_lock);
1801 1795
1802 /* obtain physical netdev */ 1796 interface = bnx2fc_interface_lookup(netdev);
1803 if (netdev->priv_flags & IFF_802_1Q_VLAN) 1797 if (!interface || !interface->ctlr.lp) {
1804 phys_dev = vlan_dev_real_dev(netdev);
1805 else {
1806 printk(KERN_ERR PFX "Not a vlan device\n");
1807 rc = -ENODEV;
1808 goto nodev;
1809 }
1810
1811 /* verify if the physical device is a netxtreme2 device */
1812 if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
1813 memset(&drvinfo, 0, sizeof(drvinfo));
1814 phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
1815 if (strcmp(drvinfo.driver, "bnx2x")) {
1816 printk(KERN_ERR PFX "Not a netxtreme2 device\n");
1817 rc = -ENODEV;
1818 goto nodev;
1819 }
1820 } else {
1821 printk(KERN_ERR PFX "unable to obtain drv_info\n");
1822 rc = -ENODEV;
1823 goto nodev;
1824 }
1825
1826 printk(KERN_ERR PFX "phys_dev is netxtreme2 device\n");
1827
1828 /* obtain hba and initialize rest of the structure */
1829 hba = bnx2fc_hba_lookup(phys_dev);
1830 if (!hba || !hba->ctlr.lp) {
1831 rc = -ENODEV; 1798 rc = -ENODEV;
1832 printk(KERN_ERR PFX "bnx2fc_disable: hba or lport not found\n"); 1799 printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n");
1833 } else { 1800 } else {
1834 fcoe_ctlr_link_down(&hba->ctlr); 1801 fcoe_ctlr_link_down(&interface->ctlr);
1835 fcoe_clean_pending_queue(hba->ctlr.lp); 1802 fcoe_clean_pending_queue(interface->ctlr.lp);
1836 } 1803 }
1837 1804
1838nodev:
1839 mutex_unlock(&bnx2fc_dev_lock); 1805 mutex_unlock(&bnx2fc_dev_lock);
1840 rtnl_unlock(); 1806 rtnl_unlock();
1841 return rc; 1807 return rc;
@@ -1844,48 +1810,19 @@ nodev:
1844 1810
1845static int bnx2fc_enable(struct net_device *netdev) 1811static int bnx2fc_enable(struct net_device *netdev)
1846{ 1812{
1847 struct bnx2fc_hba *hba; 1813 struct bnx2fc_interface *interface;
1848 struct net_device *phys_dev;
1849 struct ethtool_drvinfo drvinfo;
1850 int rc = 0; 1814 int rc = 0;
1851 1815
1852 rtnl_lock(); 1816 rtnl_lock();
1853
1854 BNX2FC_MISC_DBG("Entered %s\n", __func__);
1855 mutex_lock(&bnx2fc_dev_lock); 1817 mutex_lock(&bnx2fc_dev_lock);
1856 1818
1857 /* obtain physical netdev */ 1819 interface = bnx2fc_interface_lookup(netdev);
1858 if (netdev->priv_flags & IFF_802_1Q_VLAN) 1820 if (!interface || !interface->ctlr.lp) {
1859 phys_dev = vlan_dev_real_dev(netdev);
1860 else {
1861 printk(KERN_ERR PFX "Not a vlan device\n");
1862 rc = -ENODEV;
1863 goto nodev;
1864 }
1865 /* verify if the physical device is a netxtreme2 device */
1866 if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
1867 memset(&drvinfo, 0, sizeof(drvinfo));
1868 phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
1869 if (strcmp(drvinfo.driver, "bnx2x")) {
1870 printk(KERN_ERR PFX "Not a netxtreme2 device\n");
1871 rc = -ENODEV;
1872 goto nodev;
1873 }
1874 } else {
1875 printk(KERN_ERR PFX "unable to obtain drv_info\n");
1876 rc = -ENODEV; 1821 rc = -ENODEV;
1877 goto nodev; 1822 printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n");
1878 } 1823 } else if (!bnx2fc_link_ok(interface->ctlr.lp))
1879 1824 fcoe_ctlr_link_up(&interface->ctlr);
1880 /* obtain hba and initialize rest of the structure */
1881 hba = bnx2fc_hba_lookup(phys_dev);
1882 if (!hba || !hba->ctlr.lp) {
1883 rc = -ENODEV;
1884 printk(KERN_ERR PFX "bnx2fc_enable: hba or lport not found\n");
1885 } else if (!bnx2fc_link_ok(hba->ctlr.lp))
1886 fcoe_ctlr_link_up(&hba->ctlr);
1887 1825
1888nodev:
1889 mutex_unlock(&bnx2fc_dev_lock); 1826 mutex_unlock(&bnx2fc_dev_lock);
1890 rtnl_unlock(); 1827 rtnl_unlock();
1891 return rc; 1828 return rc;
@@ -1903,6 +1840,7 @@ nodev:
1903 */ 1840 */
1904static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) 1841static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
1905{ 1842{
1843 struct bnx2fc_interface *interface;
1906 struct bnx2fc_hba *hba; 1844 struct bnx2fc_hba *hba;
1907 struct net_device *phys_dev; 1845 struct net_device *phys_dev;
1908 struct fc_lport *lport; 1846 struct fc_lport *lport;
@@ -1938,7 +1876,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
1938 if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) { 1876 if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
1939 memset(&drvinfo, 0, sizeof(drvinfo)); 1877 memset(&drvinfo, 0, sizeof(drvinfo));
1940 phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo); 1878 phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
1941 if (strcmp(drvinfo.driver, "bnx2x")) { 1879 if (strncmp(drvinfo.driver, "bnx2x", strlen("bnx2x"))) {
1942 printk(KERN_ERR PFX "Not a netxtreme2 device\n"); 1880 printk(KERN_ERR PFX "Not a netxtreme2 device\n");
1943 rc = -EINVAL; 1881 rc = -EINVAL;
1944 goto netdev_err; 1882 goto netdev_err;
@@ -1949,7 +1887,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
1949 goto netdev_err; 1887 goto netdev_err;
1950 } 1888 }
1951 1889
1952 /* obtain hba and initialize rest of the structure */ 1890 /* obtain interface and initialize rest of the structure */
1953 hba = bnx2fc_hba_lookup(phys_dev); 1891 hba = bnx2fc_hba_lookup(phys_dev);
1954 if (!hba) { 1892 if (!hba) {
1955 rc = -ENODEV; 1893 rc = -ENODEV;
@@ -1957,67 +1895,61 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
1957 goto netdev_err; 1895 goto netdev_err;
1958 } 1896 }
1959 1897
1960 if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) { 1898 if (bnx2fc_interface_lookup(netdev)) {
1961 rc = bnx2fc_fw_init(hba);
1962 if (rc)
1963 goto netdev_err;
1964 }
1965
1966 if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
1967 rc = -EEXIST; 1899 rc = -EEXIST;
1968 goto netdev_err; 1900 goto netdev_err;
1969 } 1901 }
1970 1902
1971 /* update netdev with vlan netdev */ 1903 interface = bnx2fc_interface_create(hba, netdev, fip_mode);
1972 hba->netdev = netdev; 1904 if (!interface) {
1973 hba->vlan_id = vlan_id; 1905 printk(KERN_ERR PFX "bnx2fc_interface_create failed\n");
1974 hba->vlan_enabled = 1;
1975
1976 rc = bnx2fc_interface_setup(hba, fip_mode);
1977 if (rc) {
1978 printk(KERN_ERR PFX "bnx2fc_interface_setup failed\n");
1979 goto ifput_err; 1906 goto ifput_err;
1980 } 1907 }
1981 1908
1982 hba->timer_work_queue = 1909 interface->vlan_id = vlan_id;
1910 interface->vlan_enabled = 1;
1911
1912 interface->timer_work_queue =
1983 create_singlethread_workqueue("bnx2fc_timer_wq"); 1913 create_singlethread_workqueue("bnx2fc_timer_wq");
1984 if (!hba->timer_work_queue) { 1914 if (!interface->timer_work_queue) {
1985 printk(KERN_ERR PFX "ulp_init could not create timer_wq\n"); 1915 printk(KERN_ERR PFX "ulp_init could not create timer_wq\n");
1986 rc = -EINVAL; 1916 rc = -EINVAL;
1987 goto ifput_err; 1917 goto ifput_err;
1988 } 1918 }
1989 1919
1990 lport = bnx2fc_if_create(hba, &hba->pcidev->dev, 0); 1920 lport = bnx2fc_if_create(interface, &interface->hba->pcidev->dev, 0);
1991 if (!lport) { 1921 if (!lport) {
1992 printk(KERN_ERR PFX "Failed to create interface (%s)\n", 1922 printk(KERN_ERR PFX "Failed to create interface (%s)\n",
1993 netdev->name); 1923 netdev->name);
1994 bnx2fc_netdev_cleanup(hba); 1924 bnx2fc_netdev_cleanup(interface);
1995 rc = -EINVAL; 1925 rc = -EINVAL;
1996 goto if_create_err; 1926 goto if_create_err;
1997 } 1927 }
1998 1928
1929 /* Add interface to if_list */
1930 list_add_tail(&interface->list, &if_list);
1931
1999 lport->boot_time = jiffies; 1932 lport->boot_time = jiffies;
2000 1933
2001 /* Make this master N_port */ 1934 /* Make this master N_port */
2002 hba->ctlr.lp = lport; 1935 interface->ctlr.lp = lport;
2003 1936
2004 set_bit(BNX2FC_CREATE_DONE, &hba->init_done); 1937 BNX2FC_HBA_DBG(lport, "create: START DISC\n");
2005 printk(KERN_ERR PFX "create: START DISC\n"); 1938 bnx2fc_start_disc(interface);
2006 bnx2fc_start_disc(hba);
2007 /* 1939 /*
2008 * Release from kref_init in bnx2fc_interface_setup, on success 1940 * Release from kref_init in bnx2fc_interface_setup, on success
2009 * lport should be holding a reference taken in bnx2fc_if_create 1941 * lport should be holding a reference taken in bnx2fc_if_create
2010 */ 1942 */
2011 bnx2fc_interface_put(hba); 1943 bnx2fc_interface_put(interface);
2012 /* put netdev that was held while calling dev_get_by_name */ 1944 /* put netdev that was held while calling dev_get_by_name */
2013 mutex_unlock(&bnx2fc_dev_lock); 1945 mutex_unlock(&bnx2fc_dev_lock);
2014 rtnl_unlock(); 1946 rtnl_unlock();
2015 return 0; 1947 return 0;
2016 1948
2017if_create_err: 1949if_create_err:
2018 destroy_workqueue(hba->timer_work_queue); 1950 destroy_workqueue(interface->timer_work_queue);
2019ifput_err: 1951ifput_err:
2020 bnx2fc_interface_put(hba); 1952 bnx2fc_interface_put(interface);
2021netdev_err: 1953netdev_err:
2022 module_put(THIS_MODULE); 1954 module_put(THIS_MODULE);
2023mod_err: 1955mod_err:
@@ -2027,7 +1959,7 @@ mod_err:
2027} 1959}
2028 1960
2029/** 1961/**
2030 * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc adapter instance 1962 * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc hba instance
2031 * 1963 *
2032 * @cnic: Pointer to cnic device instance 1964 * @cnic: Pointer to cnic device instance
2033 * 1965 *
@@ -2047,19 +1979,30 @@ static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic)
2047 return NULL; 1979 return NULL;
2048} 1980}
2049 1981
2050static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev) 1982static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
1983 *netdev)
1984{
1985 struct bnx2fc_interface *interface;
1986
1987 /* Called with bnx2fc_dev_lock held */
1988 list_for_each_entry(interface, &if_list, list) {
1989 if (interface->netdev == netdev)
1990 return interface;
1991 }
1992 return NULL;
1993}
1994
1995static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device
1996 *phys_dev)
2051{ 1997{
2052 struct list_head *list;
2053 struct list_head *temp;
2054 struct bnx2fc_hba *hba; 1998 struct bnx2fc_hba *hba;
2055 1999
2056 /* Called with bnx2fc_dev_lock held */ 2000 /* Called with bnx2fc_dev_lock held */
2057 list_for_each_safe(list, temp, &adapter_list) { 2001 list_for_each_entry(hba, &adapter_list, list) {
2058 hba = (struct bnx2fc_hba *)list;
2059 if (hba->phys_dev == phys_dev) 2002 if (hba->phys_dev == phys_dev)
2060 return hba; 2003 return hba;
2061 } 2004 }
2062 printk(KERN_ERR PFX "hba_lookup: hba NULL\n"); 2005 printk(KERN_ERR PFX "adapter_lookup: hba NULL\n");
2063 return NULL; 2006 return NULL;
2064} 2007}
2065 2008
@@ -2071,6 +2014,8 @@ static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev)
2071static void bnx2fc_ulp_exit(struct cnic_dev *dev) 2014static void bnx2fc_ulp_exit(struct cnic_dev *dev)
2072{ 2015{
2073 struct bnx2fc_hba *hba; 2016 struct bnx2fc_hba *hba;
2017 struct bnx2fc_interface *interface, *tmp;
2018 struct fc_lport *lport;
2074 2019
2075 BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n"); 2020 BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n");
2076 2021
@@ -2089,13 +2034,20 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
2089 return; 2034 return;
2090 } 2035 }
2091 2036
2092 list_del_init(&hba->link); 2037 list_del_init(&hba->list);
2093 adapter_count--; 2038 adapter_count--;
2094 2039
2095 if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) { 2040 list_for_each_entry_safe(interface, tmp, &if_list, list) {
2096 /* destroy not called yet, move to quiesced list */ 2041 /* destroy not called yet, move to quiesced list */
2097 bnx2fc_netdev_cleanup(hba); 2042 if (interface->hba == hba) {
2098 bnx2fc_if_destroy(hba->ctlr.lp); 2043 bnx2fc_netdev_cleanup(interface);
2044 bnx2fc_stop(interface);
2045
2046 list_del(&interface->list);
2047 lport = interface->ctlr.lp;
2048 bnx2fc_interface_put(interface);
2049 bnx2fc_if_destroy(lport, hba);
2050 }
2099 } 2051 }
2100 mutex_unlock(&bnx2fc_dev_lock); 2052 mutex_unlock(&bnx2fc_dev_lock);
2101 2053
@@ -2103,7 +2055,7 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
2103 /* unregister cnic device */ 2055 /* unregister cnic device */
2104 if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic)) 2056 if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
2105 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE); 2057 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
2106 bnx2fc_interface_destroy(hba); 2058 bnx2fc_hba_destroy(hba);
2107} 2059}
2108 2060
2109/** 2061/**
@@ -2259,6 +2211,7 @@ static int __init bnx2fc_mod_init(void)
2259 } 2211 }
2260 2212
2261 INIT_LIST_HEAD(&adapter_list); 2213 INIT_LIST_HEAD(&adapter_list);
2214 INIT_LIST_HEAD(&if_list);
2262 mutex_init(&bnx2fc_dev_lock); 2215 mutex_init(&bnx2fc_dev_lock);
2263 adapter_count = 0; 2216 adapter_count = 0;
2264 2217
@@ -2336,16 +2289,17 @@ static void __exit bnx2fc_mod_exit(void)
2336 mutex_unlock(&bnx2fc_dev_lock); 2289 mutex_unlock(&bnx2fc_dev_lock);
2337 2290
2338 /* Unregister with cnic */ 2291 /* Unregister with cnic */
2339 list_for_each_entry_safe(hba, next, &to_be_deleted, link) { 2292 list_for_each_entry_safe(hba, next, &to_be_deleted, list) {
2340 list_del_init(&hba->link); 2293 list_del_init(&hba->list);
2341 printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p, kref = %d\n", 2294 printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p\n",
2342 hba, atomic_read(&hba->kref.refcount)); 2295 hba);
2343 bnx2fc_ulp_stop(hba); 2296 bnx2fc_ulp_stop(hba);
2344 /* unregister cnic device */ 2297 /* unregister cnic device */
2345 if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, 2298 if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED,
2346 &hba->reg_with_cnic)) 2299 &hba->reg_with_cnic))
2347 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE); 2300 hba->cnic->unregister_device(hba->cnic,
2348 bnx2fc_interface_destroy(hba); 2301 CNIC_ULP_FCOE);
2302 bnx2fc_hba_destroy(hba);
2349 } 2303 }
2350 cnic_unregister_driver(CNIC_ULP_FCOE); 2304 cnic_unregister_driver(CNIC_ULP_FCOE);
2351 2305
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 09bdd9b88d1a..72cfb14acd3a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -2,7 +2,7 @@
2 * This file contains the code that low level functions that interact 2 * This file contains the code that low level functions that interact
3 * with 57712 FCoE firmware. 3 * with 57712 FCoE firmware.
4 * 4 *
5 * Copyright (c) 2008 - 2010 Broadcom Corporation 5 * Copyright (c) 2008 - 2011 Broadcom Corporation
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -23,7 +23,7 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
23 struct fcoe_kcqe *ofld_kcqe); 23 struct fcoe_kcqe *ofld_kcqe);
24static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code); 24static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
25static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, 25static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
26 struct fcoe_kcqe *conn_destroy); 26 struct fcoe_kcqe *destroy_kcqe);
27 27
28int bnx2fc_send_stat_req(struct bnx2fc_hba *hba) 28int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
29{ 29{
@@ -67,7 +67,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
67 int rc = 0; 67 int rc = 0;
68 68
69 if (!hba->cnic) { 69 if (!hba->cnic) {
70 printk(KERN_ALERT PFX "hba->cnic NULL during fcoe fw init\n"); 70 printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n");
71 return -ENODEV; 71 return -ENODEV;
72 } 72 }
73 73
@@ -103,6 +103,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
103 fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION; 103 fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
104 fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION; 104 fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
105 105
106
106 fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma; 107 fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
107 fcoe_init2.hash_tbl_pbl_addr_hi = (u32) 108 fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
108 ((u64) hba->hash_tbl_pbl_dma >> 32); 109 ((u64) hba->hash_tbl_pbl_dma >> 32);
@@ -165,7 +166,8 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
165 struct bnx2fc_rport *tgt) 166 struct bnx2fc_rport *tgt)
166{ 167{
167 struct fc_lport *lport = port->lport; 168 struct fc_lport *lport = port->lport;
168 struct bnx2fc_hba *hba = port->priv; 169 struct bnx2fc_interface *interface = port->priv;
170 struct bnx2fc_hba *hba = interface->hba;
169 struct kwqe *kwqe_arr[4]; 171 struct kwqe *kwqe_arr[4];
170 struct fcoe_kwqe_conn_offload1 ofld_req1; 172 struct fcoe_kwqe_conn_offload1 ofld_req1;
171 struct fcoe_kwqe_conn_offload2 ofld_req2; 173 struct fcoe_kwqe_conn_offload2 ofld_req2;
@@ -227,7 +229,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
227 ofld_req3.hdr.flags = 229 ofld_req3.hdr.flags =
228 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 230 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
229 231
230 ofld_req3.vlan_tag = hba->vlan_id << 232 ofld_req3.vlan_tag = interface->vlan_id <<
231 FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT; 233 FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
232 ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT; 234 ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
233 235
@@ -277,8 +279,20 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
277 ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) << 279 ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
278 FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT); 280 FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
279 281
282 /*
283 * Info from PRLI response, this info is used for sequence level error
284 * recovery support
285 */
286 if (tgt->dev_type == TYPE_TAPE) {
287 ofld_req3.flags |= 1 <<
288 FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT;
289 ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED)
290 ? 1 : 0) <<
291 FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT);
292 }
293
280 /* vlan flag */ 294 /* vlan flag */
281 ofld_req3.flags |= (hba->vlan_enabled << 295 ofld_req3.flags |= (interface->vlan_enabled <<
282 FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT); 296 FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
283 297
284 /* C2_VALID and ACK flags are not set as they are not suppported */ 298 /* C2_VALID and ACK flags are not set as they are not suppported */
@@ -300,12 +314,13 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
300 ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2]; 314 ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2];
301 ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1]; 315 ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1];
302 ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0]; 316 ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0];
303 ofld_req4.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */ 317 ofld_req4.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
304 ofld_req4.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4]; 318 /* fcf mac */
305 ofld_req4.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3]; 319 ofld_req4.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
306 ofld_req4.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2]; 320 ofld_req4.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
307 ofld_req4.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1]; 321 ofld_req4.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
308 ofld_req4.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0]; 322 ofld_req4.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
323 ofld_req4.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
309 324
310 ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma; 325 ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
311 ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32); 326 ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
@@ -335,7 +350,8 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
335 struct bnx2fc_rport *tgt) 350 struct bnx2fc_rport *tgt)
336{ 351{
337 struct kwqe *kwqe_arr[2]; 352 struct kwqe *kwqe_arr[2];
338 struct bnx2fc_hba *hba = port->priv; 353 struct bnx2fc_interface *interface = port->priv;
354 struct bnx2fc_hba *hba = interface->hba;
339 struct fcoe_kwqe_conn_enable_disable enbl_req; 355 struct fcoe_kwqe_conn_enable_disable enbl_req;
340 struct fc_lport *lport = port->lport; 356 struct fc_lport *lport = port->lport;
341 struct fc_rport *rport = tgt->rport; 357 struct fc_rport *rport = tgt->rport;
@@ -358,12 +374,12 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
358 enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0]; 374 enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0];
359 memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN); 375 memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
360 376
361 enbl_req.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */ 377 enbl_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
362 enbl_req.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4]; 378 enbl_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
363 enbl_req.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3]; 379 enbl_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
364 enbl_req.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2]; 380 enbl_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
365 enbl_req.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1]; 381 enbl_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
366 enbl_req.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0]; 382 enbl_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
367 383
368 port_id = fc_host_port_id(lport->host); 384 port_id = fc_host_port_id(lport->host);
369 if (port_id != tgt->sid) { 385 if (port_id != tgt->sid) {
@@ -379,10 +395,10 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
379 enbl_req.d_id[0] = (port_id & 0x000000FF); 395 enbl_req.d_id[0] = (port_id & 0x000000FF);
380 enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8; 396 enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
381 enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16; 397 enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
382 enbl_req.vlan_tag = hba->vlan_id << 398 enbl_req.vlan_tag = interface->vlan_id <<
383 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; 399 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
384 enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; 400 enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
385 enbl_req.vlan_flag = hba->vlan_enabled; 401 enbl_req.vlan_flag = interface->vlan_enabled;
386 enbl_req.context_id = tgt->context_id; 402 enbl_req.context_id = tgt->context_id;
387 enbl_req.conn_id = tgt->fcoe_conn_id; 403 enbl_req.conn_id = tgt->fcoe_conn_id;
388 404
@@ -402,7 +418,8 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
402int bnx2fc_send_session_disable_req(struct fcoe_port *port, 418int bnx2fc_send_session_disable_req(struct fcoe_port *port,
403 struct bnx2fc_rport *tgt) 419 struct bnx2fc_rport *tgt)
404{ 420{
405 struct bnx2fc_hba *hba = port->priv; 421 struct bnx2fc_interface *interface = port->priv;
422 struct bnx2fc_hba *hba = interface->hba;
406 struct fcoe_kwqe_conn_enable_disable disable_req; 423 struct fcoe_kwqe_conn_enable_disable disable_req;
407 struct kwqe *kwqe_arr[2]; 424 struct kwqe *kwqe_arr[2];
408 struct fc_rport *rport = tgt->rport; 425 struct fc_rport *rport = tgt->rport;
@@ -423,12 +440,12 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
423 disable_req.src_mac_addr_hi[0] = tgt->src_addr[1]; 440 disable_req.src_mac_addr_hi[0] = tgt->src_addr[1];
424 disable_req.src_mac_addr_hi[1] = tgt->src_addr[0]; 441 disable_req.src_mac_addr_hi[1] = tgt->src_addr[0];
425 442
426 disable_req.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */ 443 disable_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
427 disable_req.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4]; 444 disable_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
428 disable_req.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3]; 445 disable_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
429 disable_req.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2]; 446 disable_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
430 disable_req.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1]; 447 disable_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
431 disable_req.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0]; 448 disable_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
432 449
433 port_id = tgt->sid; 450 port_id = tgt->sid;
434 disable_req.s_id[0] = (port_id & 0x000000FF); 451 disable_req.s_id[0] = (port_id & 0x000000FF);
@@ -442,11 +459,11 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
442 disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16; 459 disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
443 disable_req.context_id = tgt->context_id; 460 disable_req.context_id = tgt->context_id;
444 disable_req.conn_id = tgt->fcoe_conn_id; 461 disable_req.conn_id = tgt->fcoe_conn_id;
445 disable_req.vlan_tag = hba->vlan_id << 462 disable_req.vlan_tag = interface->vlan_id <<
446 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; 463 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
447 disable_req.vlan_tag |= 464 disable_req.vlan_tag |=
448 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; 465 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
449 disable_req.vlan_flag = hba->vlan_enabled; 466 disable_req.vlan_flag = interface->vlan_enabled;
450 467
451 kwqe_arr[0] = (struct kwqe *) &disable_req; 468 kwqe_arr[0] = (struct kwqe *) &disable_req;
452 469
@@ -525,7 +542,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
525{ 542{
526 struct fcoe_port *port = tgt->port; 543 struct fcoe_port *port = tgt->port;
527 struct fc_lport *lport = port->lport; 544 struct fc_lport *lport = port->lport;
528 struct bnx2fc_hba *hba = port->priv; 545 struct bnx2fc_interface *interface = port->priv;
529 struct bnx2fc_unsol_els *unsol_els; 546 struct bnx2fc_unsol_els *unsol_els;
530 struct fc_frame_header *fh; 547 struct fc_frame_header *fh;
531 struct fc_frame *fp; 548 struct fc_frame *fp;
@@ -586,7 +603,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
586 fr_eof(fp) = FC_EOF_T; 603 fr_eof(fp) = FC_EOF_T;
587 fr_crc(fp) = cpu_to_le32(~crc); 604 fr_crc(fp) = cpu_to_le32(~crc);
588 unsol_els->lport = lport; 605 unsol_els->lport = lport;
589 unsol_els->hba = hba; 606 unsol_els->hba = interface->hba;
590 unsol_els->fp = fp; 607 unsol_els->fp = fp;
591 INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work); 608 INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
592 queue_work(bnx2fc_wq, &unsol_els->unsol_els_work); 609 queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
@@ -608,9 +625,12 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
608 u32 frame_len, len; 625 u32 frame_len, len;
609 struct bnx2fc_cmd *io_req = NULL; 626 struct bnx2fc_cmd *io_req = NULL;
610 struct fcoe_task_ctx_entry *task, *task_page; 627 struct fcoe_task_ctx_entry *task, *task_page;
611 struct bnx2fc_hba *hba = tgt->port->priv; 628 struct bnx2fc_interface *interface = tgt->port->priv;
629 struct bnx2fc_hba *hba = interface->hba;
612 int task_idx, index; 630 int task_idx, index;
613 int rc = 0; 631 int rc = 0;
632 u64 err_warn_bit_map;
633 u8 err_warn = 0xff;
614 634
615 635
616 BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe); 636 BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
@@ -673,39 +693,43 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
673 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n", 693 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
674 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); 694 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
675 695
676 bnx2fc_return_rqe(tgt, 1);
677 696
678 if (xid > BNX2FC_MAX_XID) { 697 if (xid > BNX2FC_MAX_XID) {
679 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", 698 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
680 xid); 699 xid);
681 spin_unlock_bh(&tgt->tgt_lock); 700 goto ret_err_rqe;
682 break;
683 } 701 }
684 702
685 task_idx = xid / BNX2FC_TASKS_PER_PAGE; 703 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
686 index = xid % BNX2FC_TASKS_PER_PAGE; 704 index = xid % BNX2FC_TASKS_PER_PAGE;
687 task_page = (struct fcoe_task_ctx_entry *) 705 task_page = (struct fcoe_task_ctx_entry *)
688 hba->task_ctx[task_idx]; 706 hba->task_ctx[task_idx];
689 task = &(task_page[index]); 707 task = &(task_page[index]);
690 708
691 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; 709 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
692 if (!io_req) { 710 if (!io_req)
693 spin_unlock_bh(&tgt->tgt_lock); 711 goto ret_err_rqe;
694 break;
695 }
696 712
697 if (io_req->cmd_type != BNX2FC_SCSI_CMD) { 713 if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
698 printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n"); 714 printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
699 spin_unlock_bh(&tgt->tgt_lock); 715 goto ret_err_rqe;
700 break;
701 } 716 }
702 717
703 if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP, 718 if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
704 &io_req->req_flags)) { 719 &io_req->req_flags)) {
705 BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in " 720 BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
706 "progress.. ignore unsol err\n"); 721 "progress.. ignore unsol err\n");
707 spin_unlock_bh(&tgt->tgt_lock); 722 goto ret_err_rqe;
708 break; 723 }
724
725 err_warn_bit_map = (u64)
726 ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
727 (u64)err_entry->data.err_warn_bitmap_lo;
728 for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
729 if (err_warn_bit_map & (u64)((u64)1 << i)) {
730 err_warn = i;
731 break;
732 }
709 } 733 }
710 734
711 /* 735 /*
@@ -715,26 +739,61 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
715 * logging out the target, when the ABTS eventually 739 * logging out the target, when the ABTS eventually
716 * times out. 740 * times out.
717 */ 741 */
718 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, 742 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
719 &io_req->req_flags)) {
720 /*
721 * Cancel the timeout_work, as we received IO
722 * completion with FW error.
723 */
724 if (cancel_delayed_work(&io_req->timeout_work))
725 kref_put(&io_req->refcount,
726 bnx2fc_cmd_release); /* timer hold */
727
728 rc = bnx2fc_initiate_abts(io_req);
729 if (rc != SUCCESS) {
730 BNX2FC_IO_DBG(io_req, "err_warn: initiate_abts "
731 "failed. issue cleanup\n");
732 rc = bnx2fc_initiate_cleanup(io_req);
733 BUG_ON(rc);
734 }
735 } else
736 printk(KERN_ERR PFX "err_warn: io_req (0x%x) already " 743 printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
737 "in ABTS processing\n", xid); 744 "in ABTS processing\n", xid);
745 goto ret_err_rqe;
746 }
747 BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn);
748 if (tgt->dev_type != TYPE_TAPE)
749 goto skip_rec;
750 switch (err_warn) {
751 case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION:
752 case FCOE_ERROR_CODE_DATA_OOO_RO:
753 case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT:
754 case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET:
755 case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ:
756 case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET:
757 BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n",
758 xid);
759 memset(&io_req->err_entry, 0,
760 sizeof(struct fcoe_err_report_entry));
761 memcpy(&io_req->err_entry, err_entry,
762 sizeof(struct fcoe_err_report_entry));
763 if (!test_bit(BNX2FC_FLAG_SRR_SENT,
764 &io_req->req_flags)) {
765 spin_unlock_bh(&tgt->tgt_lock);
766 rc = bnx2fc_send_rec(io_req);
767 spin_lock_bh(&tgt->tgt_lock);
768
769 if (rc)
770 goto skip_rec;
771 } else
772 printk(KERN_ERR PFX "SRR in progress\n");
773 goto ret_err_rqe;
774 break;
775 default:
776 break;
777 }
778
779skip_rec:
780 set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags);
781 /*
782 * Cancel the timeout_work, as we received IO
783 * completion with FW error.
784 */
785 if (cancel_delayed_work(&io_req->timeout_work))
786 kref_put(&io_req->refcount, bnx2fc_cmd_release);
787
788 rc = bnx2fc_initiate_abts(io_req);
789 if (rc != SUCCESS) {
790 printk(KERN_ERR PFX "err_warn: initiate_abts "
791 "failed xid = 0x%x. issue cleanup\n",
792 io_req->xid);
793 bnx2fc_initiate_cleanup(io_req);
794 }
795ret_err_rqe:
796 bnx2fc_return_rqe(tgt, 1);
738 spin_unlock_bh(&tgt->tgt_lock); 797 spin_unlock_bh(&tgt->tgt_lock);
739 break; 798 break;
740 799
@@ -755,6 +814,47 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
755 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x", 814 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
756 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); 815 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
757 816
817 if (xid > BNX2FC_MAX_XID) {
818 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid);
819 goto ret_warn_rqe;
820 }
821
822 err_warn_bit_map = (u64)
823 ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
824 (u64)err_entry->data.err_warn_bitmap_lo;
825 for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
826 if (err_warn_bit_map & (u64) (1 << i)) {
827 err_warn = i;
828 break;
829 }
830 }
831 BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn);
832
833 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
834 index = xid % BNX2FC_TASKS_PER_PAGE;
835 task_page = (struct fcoe_task_ctx_entry *)
836 interface->hba->task_ctx[task_idx];
837 task = &(task_page[index]);
838 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
839 if (!io_req)
840 goto ret_warn_rqe;
841
842 if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
843 printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
844 goto ret_warn_rqe;
845 }
846
847 memset(&io_req->err_entry, 0,
848 sizeof(struct fcoe_err_report_entry));
849 memcpy(&io_req->err_entry, err_entry,
850 sizeof(struct fcoe_err_report_entry));
851
852 if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION)
853 /* REC_TOV is not a warning code */
854 BUG_ON(1);
855 else
856 BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n");
857ret_warn_rqe:
758 bnx2fc_return_rqe(tgt, 1); 858 bnx2fc_return_rqe(tgt, 1);
759 spin_unlock_bh(&tgt->tgt_lock); 859 spin_unlock_bh(&tgt->tgt_lock);
760 break; 860 break;
@@ -770,7 +870,8 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
770 struct fcoe_task_ctx_entry *task; 870 struct fcoe_task_ctx_entry *task;
771 struct fcoe_task_ctx_entry *task_page; 871 struct fcoe_task_ctx_entry *task_page;
772 struct fcoe_port *port = tgt->port; 872 struct fcoe_port *port = tgt->port;
773 struct bnx2fc_hba *hba = port->priv; 873 struct bnx2fc_interface *interface = port->priv;
874 struct bnx2fc_hba *hba = interface->hba;
774 struct bnx2fc_cmd *io_req; 875 struct bnx2fc_cmd *io_req;
775 int task_idx, index; 876 int task_idx, index;
776 u16 xid; 877 u16 xid;
@@ -781,7 +882,7 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
781 spin_lock_bh(&tgt->tgt_lock); 882 spin_lock_bh(&tgt->tgt_lock);
782 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; 883 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
783 if (xid >= BNX2FC_MAX_TASKS) { 884 if (xid >= BNX2FC_MAX_TASKS) {
784 printk(KERN_ALERT PFX "ERROR:xid out of range\n"); 885 printk(KERN_ERR PFX "ERROR:xid out of range\n");
785 spin_unlock_bh(&tgt->tgt_lock); 886 spin_unlock_bh(&tgt->tgt_lock);
786 return; 887 return;
787 } 888 }
@@ -861,6 +962,13 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
861 kref_put(&io_req->refcount, bnx2fc_cmd_release); 962 kref_put(&io_req->refcount, bnx2fc_cmd_release);
862 break; 963 break;
863 964
965 case BNX2FC_SEQ_CLEANUP:
966 BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n",
967 io_req->xid);
968 bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state);
969 kref_put(&io_req->refcount, bnx2fc_cmd_release);
970 break;
971
864 default: 972 default:
865 printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type); 973 printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
866 break; 974 break;
@@ -962,8 +1070,10 @@ unlock:
962 1 - tgt->cq_curr_toggle_bit; 1070 1 - tgt->cq_curr_toggle_bit;
963 } 1071 }
964 } 1072 }
965 bnx2fc_arm_cq(tgt); 1073 if (num_free_sqes) {
966 atomic_add(num_free_sqes, &tgt->free_sqes); 1074 bnx2fc_arm_cq(tgt);
1075 atomic_add(num_free_sqes, &tgt->free_sqes);
1076 }
967 spin_unlock_bh(&tgt->cq_lock); 1077 spin_unlock_bh(&tgt->cq_lock);
968 return 0; 1078 return 0;
969} 1079}
@@ -983,7 +1093,7 @@ static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
983 struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id]; 1093 struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
984 1094
985 if (!tgt) { 1095 if (!tgt) {
986 printk(KERN_ALERT PFX "conn_id 0x%x not valid\n", conn_id); 1096 printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id);
987 return; 1097 return;
988 } 1098 }
989 1099
@@ -1004,6 +1114,7 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
1004{ 1114{
1005 struct bnx2fc_rport *tgt; 1115 struct bnx2fc_rport *tgt;
1006 struct fcoe_port *port; 1116 struct fcoe_port *port;
1117 struct bnx2fc_interface *interface;
1007 u32 conn_id; 1118 u32 conn_id;
1008 u32 context_id; 1119 u32 context_id;
1009 int rc; 1120 int rc;
@@ -1018,8 +1129,9 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
1018 BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n", 1129 BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
1019 ofld_kcqe->fcoe_conn_context_id); 1130 ofld_kcqe->fcoe_conn_context_id);
1020 port = tgt->port; 1131 port = tgt->port;
1021 if (hba != tgt->port->priv) { 1132 interface = tgt->port->priv;
1022 printk(KERN_ALERT PFX "ERROR:ofld_cmpl: HBA mis-match\n"); 1133 if (hba != interface->hba) {
1134 printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
1023 goto ofld_cmpl_err; 1135 goto ofld_cmpl_err;
1024 } 1136 }
1025 /* 1137 /*
@@ -1040,7 +1152,7 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
1040 /* now enable the session */ 1152 /* now enable the session */
1041 rc = bnx2fc_send_session_enable_req(port, tgt); 1153 rc = bnx2fc_send_session_enable_req(port, tgt);
1042 if (rc) { 1154 if (rc) {
1043 printk(KERN_ALERT PFX "enable session failed\n"); 1155 printk(KERN_ERR PFX "enable session failed\n");
1044 goto ofld_cmpl_err; 1156 goto ofld_cmpl_err;
1045 } 1157 }
1046 } 1158 }
@@ -1063,6 +1175,7 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1063 struct fcoe_kcqe *ofld_kcqe) 1175 struct fcoe_kcqe *ofld_kcqe)
1064{ 1176{
1065 struct bnx2fc_rport *tgt; 1177 struct bnx2fc_rport *tgt;
1178 struct bnx2fc_interface *interface;
1066 u32 conn_id; 1179 u32 conn_id;
1067 u32 context_id; 1180 u32 context_id;
1068 1181
@@ -1070,7 +1183,7 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1070 conn_id = ofld_kcqe->fcoe_conn_id; 1183 conn_id = ofld_kcqe->fcoe_conn_id;
1071 tgt = hba->tgt_ofld_list[conn_id]; 1184 tgt = hba->tgt_ofld_list[conn_id];
1072 if (!tgt) { 1185 if (!tgt) {
1073 printk(KERN_ALERT PFX "ERROR:enbl_cmpl: No pending ofld req\n"); 1186 printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n");
1074 return; 1187 return;
1075 } 1188 }
1076 1189
@@ -1082,16 +1195,17 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1082 * and enable 1195 * and enable
1083 */ 1196 */
1084 if (tgt->context_id != context_id) { 1197 if (tgt->context_id != context_id) {
1085 printk(KERN_ALERT PFX "context id mis-match\n"); 1198 printk(KERN_ERR PFX "context id mis-match\n");
1086 return; 1199 return;
1087 } 1200 }
1088 if (hba != tgt->port->priv) { 1201 interface = tgt->port->priv;
1089 printk(KERN_ALERT PFX "bnx2fc-enbl_cmpl: HBA mis-match\n"); 1202 if (hba != interface->hba) {
1203 printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
1090 goto enbl_cmpl_err; 1204 goto enbl_cmpl_err;
1091 } 1205 }
1092 if (ofld_kcqe->completion_status) { 1206 if (ofld_kcqe->completion_status)
1093 goto enbl_cmpl_err; 1207 goto enbl_cmpl_err;
1094 } else { 1208 else {
1095 /* enable successful - rport ready for issuing IOs */ 1209 /* enable successful - rport ready for issuing IOs */
1096 set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); 1210 set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1097 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); 1211 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
@@ -1114,14 +1228,14 @@ static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
1114 conn_id = disable_kcqe->fcoe_conn_id; 1228 conn_id = disable_kcqe->fcoe_conn_id;
1115 tgt = hba->tgt_ofld_list[conn_id]; 1229 tgt = hba->tgt_ofld_list[conn_id];
1116 if (!tgt) { 1230 if (!tgt) {
1117 printk(KERN_ALERT PFX "ERROR: disable_cmpl: No disable req\n"); 1231 printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n");
1118 return; 1232 return;
1119 } 1233 }
1120 1234
1121 BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id); 1235 BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
1122 1236
1123 if (disable_kcqe->completion_status) { 1237 if (disable_kcqe->completion_status) {
1124 printk(KERN_ALERT PFX "ERROR: Disable failed with cmpl status %d\n", 1238 printk(KERN_ERR PFX "Disable failed with cmpl status %d\n",
1125 disable_kcqe->completion_status); 1239 disable_kcqe->completion_status);
1126 return; 1240 return;
1127 } else { 1241 } else {
@@ -1143,14 +1257,14 @@ static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
1143 conn_id = destroy_kcqe->fcoe_conn_id; 1257 conn_id = destroy_kcqe->fcoe_conn_id;
1144 tgt = hba->tgt_ofld_list[conn_id]; 1258 tgt = hba->tgt_ofld_list[conn_id];
1145 if (!tgt) { 1259 if (!tgt) {
1146 printk(KERN_ALERT PFX "destroy_cmpl: No destroy req\n"); 1260 printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n");
1147 return; 1261 return;
1148 } 1262 }
1149 1263
1150 BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id); 1264 BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
1151 1265
1152 if (destroy_kcqe->completion_status) { 1266 if (destroy_kcqe->completion_status) {
1153 printk(KERN_ALERT PFX "Destroy conn failed, cmpl status %d\n", 1267 printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n",
1154 destroy_kcqe->completion_status); 1268 destroy_kcqe->completion_status);
1155 return; 1269 return;
1156 } else { 1270 } else {
@@ -1182,6 +1296,7 @@ static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
1182 break; 1296 break;
1183 case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION: 1297 case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
1184 printk(KERN_ERR PFX "init failure due to HSI mismatch\n"); 1298 printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
1299 break;
1185 default: 1300 default:
1186 printk(KERN_ERR PFX "Unknown Error code %d\n", err_code); 1301 printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
1187 } 1302 }
@@ -1240,7 +1355,7 @@ void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
1240 } else { 1355 } else {
1241 printk(KERN_ERR PFX "DESTROY success\n"); 1356 printk(KERN_ERR PFX "DESTROY success\n");
1242 } 1357 }
1243 hba->flags |= BNX2FC_FLAG_DESTROY_CMPL; 1358 set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
1244 wake_up_interruptible(&hba->destroy_wait); 1359 wake_up_interruptible(&hba->destroy_wait);
1245 break; 1360 break;
1246 1361
@@ -1262,7 +1377,7 @@ void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
1262 case FCOE_KCQE_OPCODE_FCOE_ERROR: 1377 case FCOE_KCQE_OPCODE_FCOE_ERROR:
1263 /* fall thru */ 1378 /* fall thru */
1264 default: 1379 default:
1265 printk(KERN_ALERT PFX "unknown opcode 0x%x\n", 1380 printk(KERN_ERR PFX "unknown opcode 0x%x\n",
1266 kcqe->op_code); 1381 kcqe->op_code);
1267 } 1382 }
1268 } 1383 }
@@ -1305,7 +1420,8 @@ int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
1305 struct fcoe_port *port = tgt->port; 1420 struct fcoe_port *port = tgt->port;
1306 u32 reg_off; 1421 u32 reg_off;
1307 resource_size_t reg_base; 1422 resource_size_t reg_base;
1308 struct bnx2fc_hba *hba = port->priv; 1423 struct bnx2fc_interface *interface = port->priv;
1424 struct bnx2fc_hba *hba = interface->hba;
1309 1425
1310 reg_base = pci_resource_start(hba->pcidev, 1426 reg_base = pci_resource_start(hba->pcidev,
1311 BNX2X_DOORBELL_PCI_BAR); 1427 BNX2X_DOORBELL_PCI_BAR);
@@ -1344,6 +1460,96 @@ void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1344 tgt->conn_db->rq_prod = tgt->rq_prod_idx; 1460 tgt->conn_db->rq_prod = tgt->rq_prod_idx;
1345} 1461}
1346 1462
1463void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
1464 struct fcoe_task_ctx_entry *task,
1465 struct bnx2fc_cmd *orig_io_req,
1466 u32 offset)
1467{
1468 struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
1469 struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
1470 struct bnx2fc_interface *interface = tgt->port->priv;
1471 struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
1472 struct fcoe_task_ctx_entry *orig_task;
1473 struct fcoe_task_ctx_entry *task_page;
1474 struct fcoe_ext_mul_sges_ctx *sgl;
1475 u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
1476 u8 orig_task_type;
1477 u16 orig_xid = orig_io_req->xid;
1478 u32 context_id = tgt->context_id;
1479 u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma;
1480 u32 orig_offset = offset;
1481 int bd_count;
1482 int orig_task_idx, index;
1483 int i;
1484
1485 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1486
1487 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1488 orig_task_type = FCOE_TASK_TYPE_WRITE;
1489 else
1490 orig_task_type = FCOE_TASK_TYPE_READ;
1491
1492 /* Tx flags */
1493 task->txwr_rxrd.const_ctx.tx_flags =
1494 FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP <<
1495 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1496 /* init flags */
1497 task->txwr_rxrd.const_ctx.init_flags = task_type <<
1498 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1499 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1500 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1501 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1502 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1503 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1504 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1505
1506 task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1507
1508 task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0;
1509 task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset;
1510
1511 bd_count = orig_io_req->bd_tbl->bd_valid;
1512
1513 /* obtain the appropriate bd entry from relative offset */
1514 for (i = 0; i < bd_count; i++) {
1515 if (offset < bd[i].buf_len)
1516 break;
1517 offset -= bd[i].buf_len;
1518 }
1519 phys_addr += (i * sizeof(struct fcoe_bd_ctx));
1520
1521 if (orig_task_type == FCOE_TASK_TYPE_WRITE) {
1522 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1523 (u32)phys_addr;
1524 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1525 (u32)((u64)phys_addr >> 32);
1526 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1527 bd_count;
1528 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off =
1529 offset; /* adjusted offset */
1530 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
1531 } else {
1532 orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE;
1533 index = orig_xid % BNX2FC_TASKS_PER_PAGE;
1534
1535 task_page = (struct fcoe_task_ctx_entry *)
1536 interface->hba->task_ctx[orig_task_idx];
1537 orig_task = &(task_page[index]);
1538
1539 /* Multiple SGEs were used for this IO */
1540 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1541 sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr;
1542 sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32);
1543 sgl->mul_sgl.sgl_size = bd_count;
1544 sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */
1545 sgl->mul_sgl.cur_sge_idx = i;
1546
1547 memset(&task->rxwr_only.rx_seq_ctx, 0,
1548 sizeof(struct fcoe_rx_seq_ctx));
1549 task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset;
1550 task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset;
1551 }
1552}
1347void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, 1553void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1348 struct fcoe_task_ctx_entry *task, 1554 struct fcoe_task_ctx_entry *task,
1349 u16 orig_xid) 1555 u16 orig_xid)
@@ -1360,7 +1566,12 @@ void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1360 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; 1566 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1361 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << 1567 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1362 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; 1568 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1363 task->txwr_rxrd.const_ctx.init_flags |= 1569 if (tgt->dev_type == TYPE_TAPE)
1570 task->txwr_rxrd.const_ctx.init_flags |=
1571 FCOE_TASK_DEV_TYPE_TAPE <<
1572 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1573 else
1574 task->txwr_rxrd.const_ctx.init_flags |=
1364 FCOE_TASK_DEV_TYPE_DISK << 1575 FCOE_TASK_DEV_TYPE_DISK <<
1365 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; 1576 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1366 task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; 1577 task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
@@ -1420,7 +1631,12 @@ void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
1420 /* init flags */ 1631 /* init flags */
1421 task->txwr_rxrd.const_ctx.init_flags = task_type << 1632 task->txwr_rxrd.const_ctx.init_flags = task_type <<
1422 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; 1633 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1423 task->txwr_rxrd.const_ctx.init_flags |= 1634 if (tgt->dev_type == TYPE_TAPE)
1635 task->txwr_rxrd.const_ctx.init_flags |=
1636 FCOE_TASK_DEV_TYPE_TAPE <<
1637 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1638 else
1639 task->txwr_rxrd.const_ctx.init_flags |=
1424 FCOE_TASK_DEV_TYPE_DISK << 1640 FCOE_TASK_DEV_TYPE_DISK <<
1425 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; 1641 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1426 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << 1642 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
@@ -1477,6 +1693,7 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1477 struct bnx2fc_rport *tgt = io_req->tgt; 1693 struct bnx2fc_rport *tgt = io_req->tgt;
1478 struct fcoe_cached_sge_ctx *cached_sge; 1694 struct fcoe_cached_sge_ctx *cached_sge;
1479 struct fcoe_ext_mul_sges_ctx *sgl; 1695 struct fcoe_ext_mul_sges_ctx *sgl;
1696 int dev_type = tgt->dev_type;
1480 u64 *fcp_cmnd; 1697 u64 *fcp_cmnd;
1481 u64 tmp_fcp_cmnd[4]; 1698 u64 tmp_fcp_cmnd[4];
1482 u32 context_id; 1699 u32 context_id;
@@ -1494,20 +1711,40 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1494 task_type = FCOE_TASK_TYPE_READ; 1711 task_type = FCOE_TASK_TYPE_READ;
1495 1712
1496 /* Tx only */ 1713 /* Tx only */
1714 bd_count = bd_tbl->bd_valid;
1497 if (task_type == FCOE_TASK_TYPE_WRITE) { 1715 if (task_type == FCOE_TASK_TYPE_WRITE) {
1498 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = 1716 if ((dev_type == TYPE_DISK) && (bd_count == 1)) {
1499 (u32)bd_tbl->bd_tbl_dma; 1717 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1500 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = 1718
1501 (u32)((u64)bd_tbl->bd_tbl_dma >> 32); 1719 task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo =
1502 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1720 fcoe_bd_tbl->buf_addr_lo;
1503 bd_tbl->bd_valid; 1721 task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi =
1722 fcoe_bd_tbl->buf_addr_hi;
1723 task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem =
1724 fcoe_bd_tbl->buf_len;
1725
1726 task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1727 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1728 } else {
1729 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1730 (u32)bd_tbl->bd_tbl_dma;
1731 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1732 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1733 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1734 bd_tbl->bd_valid;
1735 }
1504 } 1736 }
1505 1737
1506 /*Tx Write Rx Read */ 1738 /*Tx Write Rx Read */
1507 /* Init state to NORMAL */ 1739 /* Init state to NORMAL */
1508 task->txwr_rxrd.const_ctx.init_flags = task_type << 1740 task->txwr_rxrd.const_ctx.init_flags |= task_type <<
1509 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; 1741 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1510 task->txwr_rxrd.const_ctx.init_flags |= 1742 if (dev_type == TYPE_TAPE)
1743 task->txwr_rxrd.const_ctx.init_flags |=
1744 FCOE_TASK_DEV_TYPE_TAPE <<
1745 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1746 else
1747 task->txwr_rxrd.const_ctx.init_flags |=
1511 FCOE_TASK_DEV_TYPE_DISK << 1748 FCOE_TASK_DEV_TYPE_DISK <<
1512 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; 1749 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1513 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << 1750 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
@@ -1550,7 +1787,8 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1550 cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge; 1787 cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
1551 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; 1788 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1552 bd_count = bd_tbl->bd_valid; 1789 bd_count = bd_tbl->bd_valid;
1553 if (task_type == FCOE_TASK_TYPE_READ) { 1790 if (task_type == FCOE_TASK_TYPE_READ &&
1791 dev_type == TYPE_DISK) {
1554 if (bd_count == 1) { 1792 if (bd_count == 1) {
1555 1793
1556 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; 1794 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
@@ -1582,6 +1820,11 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1582 (u32)((u64)bd_tbl->bd_tbl_dma >> 32); 1820 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1583 sgl->mul_sgl.sgl_size = bd_count; 1821 sgl->mul_sgl.sgl_size = bd_count;
1584 } 1822 }
1823 } else {
1824 sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
1825 sgl->mul_sgl.cur_sge_addr.hi =
1826 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1827 sgl->mul_sgl.sgl_size = bd_count;
1585 } 1828 }
1586} 1829}
1587 1830
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 45eba6d609c9..6cc3789075bc 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1,7 +1,7 @@
1/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver. 1/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
2 * IO manager and SCSI IO processing. 2 * IO manager and SCSI IO processing.
3 * 3 *
4 * Copyright (c) 2008 - 2010 Broadcom Corporation 4 * Copyright (c) 2008 - 2011 Broadcom Corporation
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -18,8 +18,6 @@ static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
18 int bd_index); 18 int bd_index);
19static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); 19static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
20static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req); 20static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
21static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
22 struct bnx2fc_cmd *io_req);
23static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req); 21static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
24static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req); 22static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
25static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, 23static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
@@ -29,10 +27,11 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
29void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, 27void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
30 unsigned int timer_msec) 28 unsigned int timer_msec)
31{ 29{
32 struct bnx2fc_hba *hba = io_req->port->priv; 30 struct bnx2fc_interface *interface = io_req->port->priv;
33 31
34 if (queue_delayed_work(hba->timer_work_queue, &io_req->timeout_work, 32 if (queue_delayed_work(interface->timer_work_queue,
35 msecs_to_jiffies(timer_msec))) 33 &io_req->timeout_work,
34 msecs_to_jiffies(timer_msec)))
36 kref_get(&io_req->refcount); 35 kref_get(&io_req->refcount);
37} 36}
38 37
@@ -217,6 +216,11 @@ static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
217 return; 216 return;
218 217
219 BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code); 218 BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code);
219 if (test_bit(BNX2FC_FLAG_CMD_LOST, &io_req->req_flags)) {
220 /* Do not call scsi done for this IO */
221 return;
222 }
223
220 bnx2fc_unmap_sg_list(io_req); 224 bnx2fc_unmap_sg_list(io_req);
221 io_req->sc_cmd = NULL; 225 io_req->sc_cmd = NULL;
222 if (!sc_cmd) { 226 if (!sc_cmd) {
@@ -419,8 +423,8 @@ free_cmgr:
419struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) 423struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
420{ 424{
421 struct fcoe_port *port = tgt->port; 425 struct fcoe_port *port = tgt->port;
422 struct bnx2fc_hba *hba = port->priv; 426 struct bnx2fc_interface *interface = port->priv;
423 struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr; 427 struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
424 struct bnx2fc_cmd *io_req; 428 struct bnx2fc_cmd *io_req;
425 struct list_head *listp; 429 struct list_head *listp;
426 struct io_bdt *bd_tbl; 430 struct io_bdt *bd_tbl;
@@ -485,11 +489,12 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
485 kref_init(&io_req->refcount); 489 kref_init(&io_req->refcount);
486 return io_req; 490 return io_req;
487} 491}
488static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) 492
493struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
489{ 494{
490 struct fcoe_port *port = tgt->port; 495 struct fcoe_port *port = tgt->port;
491 struct bnx2fc_hba *hba = port->priv; 496 struct bnx2fc_interface *interface = port->priv;
492 struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr; 497 struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
493 struct bnx2fc_cmd *io_req; 498 struct bnx2fc_cmd *io_req;
494 struct list_head *listp; 499 struct list_head *listp;
495 struct io_bdt *bd_tbl; 500 struct io_bdt *bd_tbl;
@@ -570,7 +575,8 @@ void bnx2fc_cmd_release(struct kref *ref)
570static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) 575static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
571{ 576{
572 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); 577 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
573 struct bnx2fc_hba *hba = io_req->port->priv; 578 struct bnx2fc_interface *interface = io_req->port->priv;
579 struct bnx2fc_hba *hba = interface->hba;
574 size_t sz = sizeof(struct fcoe_bd_ctx); 580 size_t sz = sizeof(struct fcoe_bd_ctx);
575 581
576 /* clear tm flags */ 582 /* clear tm flags */
@@ -606,7 +612,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
606 struct bnx2fc_mp_req *mp_req; 612 struct bnx2fc_mp_req *mp_req;
607 struct fcoe_bd_ctx *mp_req_bd; 613 struct fcoe_bd_ctx *mp_req_bd;
608 struct fcoe_bd_ctx *mp_resp_bd; 614 struct fcoe_bd_ctx *mp_resp_bd;
609 struct bnx2fc_hba *hba = io_req->port->priv; 615 struct bnx2fc_interface *interface = io_req->port->priv;
616 struct bnx2fc_hba *hba = interface->hba;
610 dma_addr_t addr; 617 dma_addr_t addr;
611 size_t sz; 618 size_t sz;
612 619
@@ -682,7 +689,7 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
682 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 689 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
683 struct fc_rport_libfc_priv *rp = rport->dd_data; 690 struct fc_rport_libfc_priv *rp = rport->dd_data;
684 struct fcoe_port *port; 691 struct fcoe_port *port;
685 struct bnx2fc_hba *hba; 692 struct bnx2fc_interface *interface;
686 struct bnx2fc_rport *tgt; 693 struct bnx2fc_rport *tgt;
687 struct bnx2fc_cmd *io_req; 694 struct bnx2fc_cmd *io_req;
688 struct bnx2fc_mp_req *tm_req; 695 struct bnx2fc_mp_req *tm_req;
@@ -699,10 +706,10 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
699 706
700 lport = shost_priv(host); 707 lport = shost_priv(host);
701 port = lport_priv(lport); 708 port = lport_priv(lport);
702 hba = port->priv; 709 interface = port->priv;
703 710
704 if (rport == NULL) { 711 if (rport == NULL) {
705 printk(KERN_ALERT PFX "device_reset: rport is NULL\n"); 712 printk(KERN_ERR PFX "device_reset: rport is NULL\n");
706 rc = FAILED; 713 rc = FAILED;
707 goto tmf_err; 714 goto tmf_err;
708 } 715 }
@@ -745,7 +752,9 @@ retry_tmf:
745 rc = bnx2fc_init_mp_req(io_req); 752 rc = bnx2fc_init_mp_req(io_req);
746 if (rc == FAILED) { 753 if (rc == FAILED) {
747 printk(KERN_ERR PFX "Task mgmt MP request init failed\n"); 754 printk(KERN_ERR PFX "Task mgmt MP request init failed\n");
755 spin_lock_bh(&tgt->tgt_lock);
748 kref_put(&io_req->refcount, bnx2fc_cmd_release); 756 kref_put(&io_req->refcount, bnx2fc_cmd_release);
757 spin_unlock_bh(&tgt->tgt_lock);
749 goto tmf_err; 758 goto tmf_err;
750 } 759 }
751 760
@@ -774,7 +783,8 @@ retry_tmf:
774 index = xid % BNX2FC_TASKS_PER_PAGE; 783 index = xid % BNX2FC_TASKS_PER_PAGE;
775 784
776 /* Initialize task context for this IO request */ 785 /* Initialize task context for this IO request */
777 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; 786 task_page = (struct fcoe_task_ctx_entry *)
787 interface->hba->task_ctx[task_idx];
778 task = &(task_page[index]); 788 task = &(task_page[index]);
779 bnx2fc_init_mp_task(io_req, task); 789 bnx2fc_init_mp_task(io_req, task);
780 790
@@ -806,10 +816,10 @@ retry_tmf:
806 spin_unlock_bh(&tgt->tgt_lock); 816 spin_unlock_bh(&tgt->tgt_lock);
807 817
808 if (!rc) { 818 if (!rc) {
809 printk(KERN_ERR PFX "task mgmt command failed...\n"); 819 BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n");
810 rc = FAILED; 820 rc = FAILED;
811 } else { 821 } else {
812 printk(KERN_ERR PFX "task mgmt command success...\n"); 822 BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n");
813 rc = SUCCESS; 823 rc = SUCCESS;
814 } 824 }
815tmf_err: 825tmf_err:
@@ -822,7 +832,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
822 struct bnx2fc_rport *tgt = io_req->tgt; 832 struct bnx2fc_rport *tgt = io_req->tgt;
823 struct fc_rport *rport = tgt->rport; 833 struct fc_rport *rport = tgt->rport;
824 struct fc_rport_priv *rdata = tgt->rdata; 834 struct fc_rport_priv *rdata = tgt->rdata;
825 struct bnx2fc_hba *hba; 835 struct bnx2fc_interface *interface;
826 struct fcoe_port *port; 836 struct fcoe_port *port;
827 struct bnx2fc_cmd *abts_io_req; 837 struct bnx2fc_cmd *abts_io_req;
828 struct fcoe_task_ctx_entry *task; 838 struct fcoe_task_ctx_entry *task;
@@ -839,7 +849,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
839 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n"); 849 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n");
840 850
841 port = io_req->port; 851 port = io_req->port;
842 hba = port->priv; 852 interface = port->priv;
843 lport = port->lport; 853 lport = port->lport;
844 854
845 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 855 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
@@ -849,7 +859,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
849 } 859 }
850 860
851 if (rport == NULL) { 861 if (rport == NULL) {
852 printk(KERN_ALERT PFX "initiate_abts: rport is NULL\n"); 862 printk(KERN_ERR PFX "initiate_abts: rport is NULL\n");
853 rc = FAILED; 863 rc = FAILED;
854 goto abts_err; 864 goto abts_err;
855 } 865 }
@@ -896,7 +906,8 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
896 index = xid % BNX2FC_TASKS_PER_PAGE; 906 index = xid % BNX2FC_TASKS_PER_PAGE;
897 907
898 /* Initialize task context for this IO request */ 908 /* Initialize task context for this IO request */
899 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; 909 task_page = (struct fcoe_task_ctx_entry *)
910 interface->hba->task_ctx[task_idx];
900 task = &(task_page[index]); 911 task = &(task_page[index]);
901 bnx2fc_init_mp_task(abts_io_req, task); 912 bnx2fc_init_mp_task(abts_io_req, task);
902 913
@@ -924,11 +935,81 @@ abts_err:
924 return rc; 935 return rc;
925} 936}
926 937
938int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
939 enum fc_rctl r_ctl)
940{
941 struct fc_lport *lport;
942 struct bnx2fc_rport *tgt = orig_io_req->tgt;
943 struct bnx2fc_interface *interface;
944 struct fcoe_port *port;
945 struct bnx2fc_cmd *seq_clnp_req;
946 struct fcoe_task_ctx_entry *task;
947 struct fcoe_task_ctx_entry *task_page;
948 struct bnx2fc_els_cb_arg *cb_arg = NULL;
949 int task_idx, index;
950 u16 xid;
951 int rc = 0;
952
953 BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n",
954 orig_io_req->xid);
955 kref_get(&orig_io_req->refcount);
956
957 port = orig_io_req->port;
958 interface = port->priv;
959 lport = port->lport;
960
961 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
962 if (!cb_arg) {
963 printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n");
964 rc = -ENOMEM;
965 goto cleanup_err;
966 }
967
968 seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP);
969 if (!seq_clnp_req) {
970 printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
971 rc = -ENOMEM;
972 kfree(cb_arg);
973 goto cleanup_err;
974 }
975 /* Initialize rest of io_req fields */
976 seq_clnp_req->sc_cmd = NULL;
977 seq_clnp_req->port = port;
978 seq_clnp_req->tgt = tgt;
979 seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */
980
981 xid = seq_clnp_req->xid;
982
983 task_idx = xid/BNX2FC_TASKS_PER_PAGE;
984 index = xid % BNX2FC_TASKS_PER_PAGE;
985
986 /* Initialize task context for this IO request */
987 task_page = (struct fcoe_task_ctx_entry *)
988 interface->hba->task_ctx[task_idx];
989 task = &(task_page[index]);
990 cb_arg->aborted_io_req = orig_io_req;
991 cb_arg->io_req = seq_clnp_req;
992 cb_arg->r_ctl = r_ctl;
993 cb_arg->offset = offset;
994 seq_clnp_req->cb_arg = cb_arg;
995
996 printk(KERN_ERR PFX "call init_seq_cleanup_task\n");
997 bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset);
998
999 /* Obtain free SQ entry */
1000 bnx2fc_add_2_sq(tgt, xid);
1001
1002 /* Ring doorbell */
1003 bnx2fc_ring_doorbell(tgt);
1004cleanup_err:
1005 return rc;
1006}
1007
927int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req) 1008int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
928{ 1009{
929 struct fc_lport *lport; 1010 struct fc_lport *lport;
930 struct bnx2fc_rport *tgt = io_req->tgt; 1011 struct bnx2fc_rport *tgt = io_req->tgt;
931 struct bnx2fc_hba *hba; 1012 struct bnx2fc_interface *interface;
932 struct fcoe_port *port; 1013 struct fcoe_port *port;
933 struct bnx2fc_cmd *cleanup_io_req; 1014 struct bnx2fc_cmd *cleanup_io_req;
934 struct fcoe_task_ctx_entry *task; 1015 struct fcoe_task_ctx_entry *task;
@@ -941,7 +1022,7 @@ int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
941 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n"); 1022 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n");
942 1023
943 port = io_req->port; 1024 port = io_req->port;
944 hba = port->priv; 1025 interface = port->priv;
945 lport = port->lport; 1026 lport = port->lport;
946 1027
947 cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP); 1028 cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
@@ -963,7 +1044,8 @@ int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
963 index = xid % BNX2FC_TASKS_PER_PAGE; 1044 index = xid % BNX2FC_TASKS_PER_PAGE;
964 1045
965 /* Initialize task context for this IO request */ 1046 /* Initialize task context for this IO request */
966 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; 1047 task_page = (struct fcoe_task_ctx_entry *)
1048 interface->hba->task_ctx[task_idx];
967 task = &(task_page[index]); 1049 task = &(task_page[index]);
968 orig_xid = io_req->xid; 1050 orig_xid = io_req->xid;
969 1051
@@ -1031,7 +1113,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1031 1113
1032 lport = shost_priv(sc_cmd->device->host); 1114 lport = shost_priv(sc_cmd->device->host);
1033 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { 1115 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
1034 printk(KERN_ALERT PFX "eh_abort: link not ready\n"); 1116 printk(KERN_ERR PFX "eh_abort: link not ready\n");
1035 return rc; 1117 return rc;
1036 } 1118 }
1037 1119
@@ -1062,7 +1144,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1062 * io_req is no longer in the active_q. 1144 * io_req is no longer in the active_q.
1063 */ 1145 */
1064 if (tgt->flush_in_prog) { 1146 if (tgt->flush_in_prog) {
1065 printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) " 1147 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1066 "flush in progress\n", io_req->xid); 1148 "flush in progress\n", io_req->xid);
1067 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1149 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1068 spin_unlock_bh(&tgt->tgt_lock); 1150 spin_unlock_bh(&tgt->tgt_lock);
@@ -1070,7 +1152,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1070 } 1152 }
1071 1153
1072 if (io_req->on_active_queue == 0) { 1154 if (io_req->on_active_queue == 0) {
1073 printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) " 1155 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1074 "not on active_q\n", io_req->xid); 1156 "not on active_q\n", io_req->xid);
1075 /* 1157 /*
1076 * This condition can happen only due to the FW bug, 1158 * This condition can happen only due to the FW bug,
@@ -1108,7 +1190,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1108 set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags); 1190 set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
1109 rc = bnx2fc_initiate_abts(io_req); 1191 rc = bnx2fc_initiate_abts(io_req);
1110 } else { 1192 } else {
1111 printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) " 1193 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1112 "already in abts processing\n", io_req->xid); 1194 "already in abts processing\n", io_req->xid);
1113 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1195 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1114 spin_unlock_bh(&tgt->tgt_lock); 1196 spin_unlock_bh(&tgt->tgt_lock);
@@ -1149,6 +1231,42 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1149 return rc; 1231 return rc;
1150} 1232}
1151 1233
1234void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req,
1235 struct fcoe_task_ctx_entry *task,
1236 u8 rx_state)
1237{
1238 struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg;
1239 struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req;
1240 u32 offset = cb_arg->offset;
1241 enum fc_rctl r_ctl = cb_arg->r_ctl;
1242 int rc = 0;
1243 struct bnx2fc_rport *tgt = orig_io_req->tgt;
1244
1245 BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x"
1246 "cmd_type = %d\n",
1247 seq_clnp_req->xid, seq_clnp_req->cmd_type);
1248
1249 if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) {
1250 printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n",
1251 seq_clnp_req->xid);
1252 goto free_cb_arg;
1253 }
1254 kref_get(&orig_io_req->refcount);
1255
1256 spin_unlock_bh(&tgt->tgt_lock);
1257 rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
1258 spin_lock_bh(&tgt->tgt_lock);
1259
1260 if (rc)
1261 printk(KERN_ERR PFX "clnup_compl: Unable to send SRR"
1262 " IO will abort\n");
1263 seq_clnp_req->cb_arg = NULL;
1264 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
1265free_cb_arg:
1266 kfree(cb_arg);
1267 return;
1268}
1269
1152void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, 1270void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
1153 struct fcoe_task_ctx_entry *task, 1271 struct fcoe_task_ctx_entry *task,
1154 u8 num_rq) 1272 u8 num_rq)
@@ -1378,7 +1496,7 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
1378 fc_hdr->fh_r_ctl); 1496 fc_hdr->fh_r_ctl);
1379 } 1497 }
1380 if (!sc_cmd->SCp.ptr) { 1498 if (!sc_cmd->SCp.ptr) {
1381 printk(KERN_ALERT PFX "tm_compl: SCp.ptr is NULL\n"); 1499 printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n");
1382 return; 1500 return;
1383 } 1501 }
1384 switch (io_req->fcp_status) { 1502 switch (io_req->fcp_status) {
@@ -1410,7 +1528,7 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
1410 io_req->on_tmf_queue = 0; 1528 io_req->on_tmf_queue = 0;
1411 } else { 1529 } else {
1412 1530
1413 printk(KERN_ALERT PFX "Command not on active_cmd_queue!\n"); 1531 printk(KERN_ERR PFX "Command not on active_cmd_queue!\n");
1414 return; 1532 return;
1415 } 1533 }
1416 1534
@@ -1597,7 +1715,7 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
1597 1715
1598 if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) { 1716 if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) {
1599 /* Invalid sense sense length. */ 1717 /* Invalid sense sense length. */
1600 printk(KERN_ALERT PFX "invalid sns length %d\n", 1718 printk(KERN_ERR PFX "invalid sns length %d\n",
1601 rq_buff_len); 1719 rq_buff_len);
1602 /* reset rq_buff_len */ 1720 /* reset rq_buff_len */
1603 rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ; 1721 rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ;
@@ -1780,7 +1898,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
1780 scsi_set_resid(sc_cmd, io_req->fcp_resid); 1898 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1781 break; 1899 break;
1782 default: 1900 default:
1783 printk(KERN_ALERT PFX "scsi_cmd_compl: fcp_status = %d\n", 1901 printk(KERN_ERR PFX "scsi_cmd_compl: fcp_status = %d\n",
1784 io_req->fcp_status); 1902 io_req->fcp_status);
1785 break; 1903 break;
1786 } 1904 }
@@ -1789,14 +1907,15 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
1789 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1907 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1790} 1908}
1791 1909
1792static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, 1910int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
1793 struct bnx2fc_cmd *io_req) 1911 struct bnx2fc_cmd *io_req)
1794{ 1912{
1795 struct fcoe_task_ctx_entry *task; 1913 struct fcoe_task_ctx_entry *task;
1796 struct fcoe_task_ctx_entry *task_page; 1914 struct fcoe_task_ctx_entry *task_page;
1797 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1915 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1798 struct fcoe_port *port = tgt->port; 1916 struct fcoe_port *port = tgt->port;
1799 struct bnx2fc_hba *hba = port->priv; 1917 struct bnx2fc_interface *interface = port->priv;
1918 struct bnx2fc_hba *hba = interface->hba;
1800 struct fc_lport *lport = port->lport; 1919 struct fc_lport *lport = port->lport;
1801 struct fcoe_dev_stats *stats; 1920 struct fcoe_dev_stats *stats;
1802 int task_idx, index; 1921 int task_idx, index;
@@ -1854,7 +1973,8 @@ static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
1854 } 1973 }
1855 1974
1856 /* Time IO req */ 1975 /* Time IO req */
1857 bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT); 1976 if (tgt->io_timeout)
1977 bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT);
1858 /* Obtain free SQ entry */ 1978 /* Obtain free SQ entry */
1859 bnx2fc_add_2_sq(tgt, xid); 1979 bnx2fc_add_2_sq(tgt, xid);
1860 1980
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 3e892bd66fbe..d5311b577cca 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -2,7 +2,7 @@
2 * Handles operations such as session offload/upload etc, and manages 2 * Handles operations such as session offload/upload etc, and manages
3 * session resources such as connection id and qp resources. 3 * session resources such as connection id and qp resources.
4 * 4 *
5 * Copyright (c) 2008 - 2010 Broadcom Corporation 5 * Copyright (c) 2008 - 2011 Broadcom Corporation
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -65,7 +65,8 @@ static void bnx2fc_offload_session(struct fcoe_port *port,
65{ 65{
66 struct fc_lport *lport = rdata->local_port; 66 struct fc_lport *lport = rdata->local_port;
67 struct fc_rport *rport = rdata->rport; 67 struct fc_rport *rport = rdata->rport;
68 struct bnx2fc_hba *hba = port->priv; 68 struct bnx2fc_interface *interface = port->priv;
69 struct bnx2fc_hba *hba = interface->hba;
69 int rval; 70 int rval;
70 int i = 0; 71 int i = 0;
71 72
@@ -237,7 +238,8 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
237static void bnx2fc_upload_session(struct fcoe_port *port, 238static void bnx2fc_upload_session(struct fcoe_port *port,
238 struct bnx2fc_rport *tgt) 239 struct bnx2fc_rport *tgt)
239{ 240{
240 struct bnx2fc_hba *hba = port->priv; 241 struct bnx2fc_interface *interface = port->priv;
242 struct bnx2fc_hba *hba = interface->hba;
241 243
242 BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n", 244 BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n",
243 tgt->num_active_ios.counter); 245 tgt->num_active_ios.counter);
@@ -316,7 +318,8 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
316{ 318{
317 319
318 struct fc_rport *rport = rdata->rport; 320 struct fc_rport *rport = rdata->rport;
319 struct bnx2fc_hba *hba = port->priv; 321 struct bnx2fc_interface *interface = port->priv;
322 struct bnx2fc_hba *hba = interface->hba;
320 struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db; 323 struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
321 struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db; 324 struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
322 325
@@ -350,6 +353,14 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
350 tgt->rq_cons_idx = 0; 353 tgt->rq_cons_idx = 0;
351 atomic_set(&tgt->num_active_ios, 0); 354 atomic_set(&tgt->num_active_ios, 0);
352 355
356 if (rdata->flags & FC_RP_FLAGS_RETRY) {
357 tgt->dev_type = TYPE_TAPE;
358 tgt->io_timeout = 0; /* use default ULP timeout */
359 } else {
360 tgt->dev_type = TYPE_DISK;
361 tgt->io_timeout = BNX2FC_IO_TIMEOUT;
362 }
363
353 /* initialize sq doorbell */ 364 /* initialize sq doorbell */
354 sq_db->header.header = B577XX_DOORBELL_HDR_DB_TYPE; 365 sq_db->header.header = B577XX_DOORBELL_HDR_DB_TYPE;
355 sq_db->header.header |= B577XX_FCOE_CONNECTION_TYPE << 366 sq_db->header.header |= B577XX_FCOE_CONNECTION_TYPE <<
@@ -392,7 +403,8 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
392 enum fc_rport_event event) 403 enum fc_rport_event event)
393{ 404{
394 struct fcoe_port *port = lport_priv(lport); 405 struct fcoe_port *port = lport_priv(lport);
395 struct bnx2fc_hba *hba = port->priv; 406 struct bnx2fc_interface *interface = port->priv;
407 struct bnx2fc_hba *hba = interface->hba;
396 struct fc_rport *rport = rdata->rport; 408 struct fc_rport *rport = rdata->rport;
397 struct fc_rport_libfc_priv *rp; 409 struct fc_rport_libfc_priv *rp;
398 struct bnx2fc_rport *tgt; 410 struct bnx2fc_rport *tgt;
@@ -403,7 +415,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
403 switch (event) { 415 switch (event) {
404 case RPORT_EV_READY: 416 case RPORT_EV_READY:
405 if (!rport) { 417 if (!rport) {
406 printk(KERN_ALERT PFX "rport is NULL: ERROR!\n"); 418 printk(KERN_ERR PFX "rport is NULL: ERROR!\n");
407 break; 419 break;
408 } 420 }
409 421
@@ -415,7 +427,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
415 * We should not come here, as lport will 427 * We should not come here, as lport will
416 * take care of fabric login 428 * take care of fabric login
417 */ 429 */
418 printk(KERN_ALERT PFX "%x - rport_event_handler ERROR\n", 430 printk(KERN_ERR PFX "%x - rport_event_handler ERROR\n",
419 rdata->ids.port_id); 431 rdata->ids.port_id);
420 break; 432 break;
421 } 433 }
@@ -483,7 +495,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
483 break; 495 break;
484 496
485 if (!rport) { 497 if (!rport) {
486 printk(KERN_ALERT PFX "%x - rport not created Yet!!\n", 498 printk(KERN_INFO PFX "%x - rport not created Yet!!\n",
487 port_id); 499 port_id);
488 break; 500 break;
489 } 501 }
@@ -537,7 +549,8 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
537struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port, 549struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
538 u32 port_id) 550 u32 port_id)
539{ 551{
540 struct bnx2fc_hba *hba = port->priv; 552 struct bnx2fc_interface *interface = port->priv;
553 struct bnx2fc_hba *hba = interface->hba;
541 struct bnx2fc_rport *tgt; 554 struct bnx2fc_rport *tgt;
542 struct fc_rport_priv *rdata; 555 struct fc_rport_priv *rdata;
543 int i; 556 int i;
@@ -552,7 +565,7 @@ struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
552 "obtained\n"); 565 "obtained\n");
553 return tgt; 566 return tgt;
554 } else { 567 } else {
555 printk(KERN_ERR PFX "rport 0x%x " 568 BNX2FC_TGT_DBG(tgt, "rport 0x%x "
556 "is in DELETED state\n", 569 "is in DELETED state\n",
557 rdata->ids.port_id); 570 rdata->ids.port_id);
558 return NULL; 571 return NULL;
@@ -633,7 +646,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
633 tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, 646 tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
634 &tgt->sq_dma, GFP_KERNEL); 647 &tgt->sq_dma, GFP_KERNEL);
635 if (!tgt->sq) { 648 if (!tgt->sq) {
636 printk(KERN_ALERT PFX "unable to allocate SQ memory %d\n", 649 printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
637 tgt->sq_mem_size); 650 tgt->sq_mem_size);
638 goto mem_alloc_failure; 651 goto mem_alloc_failure;
639 } 652 }
@@ -646,7 +659,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
646 tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, 659 tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
647 &tgt->cq_dma, GFP_KERNEL); 660 &tgt->cq_dma, GFP_KERNEL);
648 if (!tgt->cq) { 661 if (!tgt->cq) {
649 printk(KERN_ALERT PFX "unable to allocate CQ memory %d\n", 662 printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
650 tgt->cq_mem_size); 663 tgt->cq_mem_size);
651 goto mem_alloc_failure; 664 goto mem_alloc_failure;
652 } 665 }
@@ -659,7 +672,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
659 tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, 672 tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
660 &tgt->rq_dma, GFP_KERNEL); 673 &tgt->rq_dma, GFP_KERNEL);
661 if (!tgt->rq) { 674 if (!tgt->rq) {
662 printk(KERN_ALERT PFX "unable to allocate RQ memory %d\n", 675 printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
663 tgt->rq_mem_size); 676 tgt->rq_mem_size);
664 goto mem_alloc_failure; 677 goto mem_alloc_failure;
665 } 678 }
@@ -671,7 +684,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
671 tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, 684 tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
672 &tgt->rq_pbl_dma, GFP_KERNEL); 685 &tgt->rq_pbl_dma, GFP_KERNEL);
673 if (!tgt->rq_pbl) { 686 if (!tgt->rq_pbl) {
674 printk(KERN_ALERT PFX "unable to allocate RQ PBL %d\n", 687 printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
675 tgt->rq_pbl_size); 688 tgt->rq_pbl_size);
676 goto mem_alloc_failure; 689 goto mem_alloc_failure;
677 } 690 }
@@ -697,7 +710,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
697 tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, 710 tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
698 &tgt->xferq_dma, GFP_KERNEL); 711 &tgt->xferq_dma, GFP_KERNEL);
699 if (!tgt->xferq) { 712 if (!tgt->xferq) {
700 printk(KERN_ALERT PFX "unable to allocate XFERQ %d\n", 713 printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
701 tgt->xferq_mem_size); 714 tgt->xferq_mem_size);
702 goto mem_alloc_failure; 715 goto mem_alloc_failure;
703 } 716 }
@@ -711,7 +724,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
711 tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size, 724 tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
712 &tgt->confq_dma, GFP_KERNEL); 725 &tgt->confq_dma, GFP_KERNEL);
713 if (!tgt->confq) { 726 if (!tgt->confq) {
714 printk(KERN_ALERT PFX "unable to allocate CONFQ %d\n", 727 printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
715 tgt->confq_mem_size); 728 tgt->confq_mem_size);
716 goto mem_alloc_failure; 729 goto mem_alloc_failure;
717 } 730 }
@@ -726,7 +739,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
726 tgt->confq_pbl_size, 739 tgt->confq_pbl_size,
727 &tgt->confq_pbl_dma, GFP_KERNEL); 740 &tgt->confq_pbl_dma, GFP_KERNEL);
728 if (!tgt->confq_pbl) { 741 if (!tgt->confq_pbl) {
729 printk(KERN_ALERT PFX "unable to allocate CONFQ PBL %d\n", 742 printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
730 tgt->confq_pbl_size); 743 tgt->confq_pbl_size);
731 goto mem_alloc_failure; 744 goto mem_alloc_failure;
732 } 745 }
@@ -751,7 +764,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
751 tgt->conn_db_mem_size, 764 tgt->conn_db_mem_size,
752 &tgt->conn_db_dma, GFP_KERNEL); 765 &tgt->conn_db_dma, GFP_KERNEL);
753 if (!tgt->conn_db) { 766 if (!tgt->conn_db) {
754 printk(KERN_ALERT PFX "unable to allocate conn_db %d\n", 767 printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
755 tgt->conn_db_mem_size); 768 tgt->conn_db_mem_size);
756 goto mem_alloc_failure; 769 goto mem_alloc_failure;
757 } 770 }
@@ -767,7 +780,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
767 &tgt->lcq_dma, GFP_KERNEL); 780 &tgt->lcq_dma, GFP_KERNEL);
768 781
769 if (!tgt->lcq) { 782 if (!tgt->lcq) {
770 printk(KERN_ALERT PFX "unable to allocate lcq %d\n", 783 printk(KERN_ERR PFX "unable to allocate lcq %d\n",
771 tgt->lcq_mem_size); 784 tgt->lcq_mem_size);
772 goto mem_alloc_failure; 785 goto mem_alloc_failure;
773 } 786 }
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 030a96c646c3..9ae80cd5953b 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -332,11 +332,11 @@ int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
332{ 332{
333 struct bnx2i_cmd *bnx2i_cmd; 333 struct bnx2i_cmd *bnx2i_cmd;
334 struct bnx2i_login_request *login_wqe; 334 struct bnx2i_login_request *login_wqe;
335 struct iscsi_login *login_hdr; 335 struct iscsi_login_req *login_hdr;
336 u32 dword; 336 u32 dword;
337 337
338 bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data; 338 bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
339 login_hdr = (struct iscsi_login *)task->hdr; 339 login_hdr = (struct iscsi_login_req *)task->hdr;
340 login_wqe = (struct bnx2i_login_request *) 340 login_wqe = (struct bnx2i_login_request *)
341 bnx2i_conn->ep->qp.sq_prod_qe; 341 bnx2i_conn->ep->qp.sq_prod_qe;
342 342
@@ -1349,7 +1349,7 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
1349 struct bnx2i_cmd_response *resp_cqe; 1349 struct bnx2i_cmd_response *resp_cqe;
1350 struct bnx2i_cmd *bnx2i_cmd; 1350 struct bnx2i_cmd *bnx2i_cmd;
1351 struct iscsi_task *task; 1351 struct iscsi_task *task;
1352 struct iscsi_cmd_rsp *hdr; 1352 struct iscsi_scsi_rsp *hdr;
1353 u32 datalen = 0; 1353 u32 datalen = 0;
1354 1354
1355 resp_cqe = (struct bnx2i_cmd_response *)cqe; 1355 resp_cqe = (struct bnx2i_cmd_response *)cqe;
@@ -1376,7 +1376,7 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
1376 } 1376 }
1377 bnx2i_iscsi_unmap_sg_list(bnx2i_cmd); 1377 bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
1378 1378
1379 hdr = (struct iscsi_cmd_rsp *)task->hdr; 1379 hdr = (struct iscsi_scsi_rsp *)task->hdr;
1380 resp_cqe = (struct bnx2i_cmd_response *)cqe; 1380 resp_cqe = (struct bnx2i_cmd_response *)cqe;
1381 hdr->opcode = resp_cqe->op_code; 1381 hdr->opcode = resp_cqe->op_code;
1382 hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn); 1382 hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 5c55a75ae597..cffd4d75df56 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1213,7 +1213,7 @@ static int bnx2i_task_xmit(struct iscsi_task *task)
1213 struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1213 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1214 struct scsi_cmnd *sc = task->sc; 1214 struct scsi_cmnd *sc = task->sc;
1215 struct bnx2i_cmd *cmd = task->dd_data; 1215 struct bnx2i_cmd *cmd = task->dd_data;
1216 struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr; 1216 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
1217 1217
1218 if (atomic_read(&bnx2i_conn->ep->num_active_cmds) + 1 > 1218 if (atomic_read(&bnx2i_conn->ep->num_active_cmds) + 1 >
1219 hba->max_sqes) 1219 hba->max_sqes)
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 2e7c136bb805..27c9d65d54a9 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -128,25 +128,7 @@ struct c4_inquiry {
128 u8 reserved[2]; 128 u8 reserved[2];
129}; 129};
130 130
131struct rdac_controller { 131#define UNIQUE_ID_LEN 16
132 u8 subsys_id[SUBSYS_ID_LEN];
133 u8 slot_id[SLOT_ID_LEN];
134 int use_ms10;
135 struct kref kref;
136 struct list_head node; /* list of all controllers */
137 union {
138 struct rdac_pg_legacy legacy;
139 struct rdac_pg_expanded expanded;
140 } mode_select;
141 u8 index;
142 u8 array_name[ARRAY_LABEL_LEN];
143 spinlock_t ms_lock;
144 int ms_queued;
145 struct work_struct ms_work;
146 struct scsi_device *ms_sdev;
147 struct list_head ms_head;
148};
149
150struct c8_inquiry { 132struct c8_inquiry {
151 u8 peripheral_info; 133 u8 peripheral_info;
152 u8 page_code; /* 0xC8 */ 134 u8 page_code; /* 0xC8 */
@@ -159,12 +141,31 @@ struct c8_inquiry {
159 u8 vol_user_label_len; 141 u8 vol_user_label_len;
160 u8 vol_user_label[60]; 142 u8 vol_user_label[60];
161 u8 array_uniq_id_len; 143 u8 array_uniq_id_len;
162 u8 array_unique_id[16]; 144 u8 array_unique_id[UNIQUE_ID_LEN];
163 u8 array_user_label_len; 145 u8 array_user_label_len;
164 u8 array_user_label[60]; 146 u8 array_user_label[60];
165 u8 lun[8]; 147 u8 lun[8];
166}; 148};
167 149
150struct rdac_controller {
151 u8 array_id[UNIQUE_ID_LEN];
152 int use_ms10;
153 struct kref kref;
154 struct list_head node; /* list of all controllers */
155 union {
156 struct rdac_pg_legacy legacy;
157 struct rdac_pg_expanded expanded;
158 } mode_select;
159 u8 index;
160 u8 array_name[ARRAY_LABEL_LEN];
161 struct Scsi_Host *host;
162 spinlock_t ms_lock;
163 int ms_queued;
164 struct work_struct ms_work;
165 struct scsi_device *ms_sdev;
166 struct list_head ms_head;
167};
168
168struct c2_inquiry { 169struct c2_inquiry {
169 u8 peripheral_info; 170 u8 peripheral_info;
170 u8 page_code; /* 0xC2 */ 171 u8 page_code; /* 0xC2 */
@@ -369,16 +370,17 @@ static void release_controller(struct kref *kref)
369 kfree(ctlr); 370 kfree(ctlr);
370} 371}
371 372
372static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id, 373static struct rdac_controller *get_controller(int index, char *array_name,
373 char *array_name) 374 u8 *array_id, struct scsi_device *sdev)
374{ 375{
375 struct rdac_controller *ctlr, *tmp; 376 struct rdac_controller *ctlr, *tmp;
376 377
377 spin_lock(&list_lock); 378 spin_lock(&list_lock);
378 379
379 list_for_each_entry(tmp, &ctlr_list, node) { 380 list_for_each_entry(tmp, &ctlr_list, node) {
380 if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) && 381 if ((memcmp(tmp->array_id, array_id, UNIQUE_ID_LEN) == 0) &&
381 (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) { 382 (tmp->index == index) &&
383 (tmp->host == sdev->host)) {
382 kref_get(&tmp->kref); 384 kref_get(&tmp->kref);
383 spin_unlock(&list_lock); 385 spin_unlock(&list_lock);
384 return tmp; 386 return tmp;
@@ -389,16 +391,11 @@ static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id,
389 goto done; 391 goto done;
390 392
391 /* initialize fields of controller */ 393 /* initialize fields of controller */
392 memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN); 394 memcpy(ctlr->array_id, array_id, UNIQUE_ID_LEN);
393 memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN); 395 ctlr->index = index;
396 ctlr->host = sdev->host;
394 memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN); 397 memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN);
395 398
396 /* update the controller index */
397 if (slot_id[1] == 0x31)
398 ctlr->index = 0;
399 else
400 ctlr->index = 1;
401
402 kref_init(&ctlr->kref); 399 kref_init(&ctlr->kref);
403 ctlr->use_ms10 = -1; 400 ctlr->use_ms10 = -1;
404 ctlr->ms_queued = 0; 401 ctlr->ms_queued = 0;
@@ -444,7 +441,7 @@ done:
444} 441}
445 442
446static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h, 443static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
447 char *array_name) 444 char *array_name, u8 *array_id)
448{ 445{
449 int err, i; 446 int err, i;
450 struct c8_inquiry *inqp; 447 struct c8_inquiry *inqp;
@@ -463,6 +460,8 @@ static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
463 *(array_name+i) = inqp->array_user_label[(2*i)+1]; 460 *(array_name+i) = inqp->array_user_label[(2*i)+1];
464 461
465 *(array_name+ARRAY_LABEL_LEN-1) = '\0'; 462 *(array_name+ARRAY_LABEL_LEN-1) = '\0';
463 memset(array_id, 0, UNIQUE_ID_LEN);
464 memcpy(array_id, inqp->array_unique_id, inqp->array_uniq_id_len);
466 } 465 }
467 return err; 466 return err;
468} 467}
@@ -504,16 +503,20 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
504} 503}
505 504
506static int initialize_controller(struct scsi_device *sdev, 505static int initialize_controller(struct scsi_device *sdev,
507 struct rdac_dh_data *h, char *array_name) 506 struct rdac_dh_data *h, char *array_name, u8 *array_id)
508{ 507{
509 int err; 508 int err, index;
510 struct c4_inquiry *inqp; 509 struct c4_inquiry *inqp;
511 510
512 err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h); 511 err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
513 if (err == SCSI_DH_OK) { 512 if (err == SCSI_DH_OK) {
514 inqp = &h->inq.c4; 513 inqp = &h->inq.c4;
515 h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id, 514 /* get the controller index */
516 array_name); 515 if (inqp->slot_id[1] == 0x31)
516 index = 0;
517 else
518 index = 1;
519 h->ctlr = get_controller(index, array_name, array_id, sdev);
517 if (!h->ctlr) 520 if (!h->ctlr)
518 err = SCSI_DH_RES_TEMP_UNAVAIL; 521 err = SCSI_DH_RES_TEMP_UNAVAIL;
519 } 522 }
@@ -835,6 +838,7 @@ static int rdac_bus_attach(struct scsi_device *sdev)
835 unsigned long flags; 838 unsigned long flags;
836 int err; 839 int err;
837 char array_name[ARRAY_LABEL_LEN]; 840 char array_name[ARRAY_LABEL_LEN];
841 char array_id[UNIQUE_ID_LEN];
838 842
839 scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) 843 scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
840 + sizeof(*h) , GFP_KERNEL); 844 + sizeof(*h) , GFP_KERNEL);
@@ -849,11 +853,11 @@ static int rdac_bus_attach(struct scsi_device *sdev)
849 h->lun = UNINITIALIZED_LUN; 853 h->lun = UNINITIALIZED_LUN;
850 h->state = RDAC_STATE_ACTIVE; 854 h->state = RDAC_STATE_ACTIVE;
851 855
852 err = get_lun_info(sdev, h, array_name); 856 err = get_lun_info(sdev, h, array_name, array_id);
853 if (err != SCSI_DH_OK) 857 if (err != SCSI_DH_OK)
854 goto failed; 858 goto failed;
855 859
856 err = initialize_controller(sdev, h, array_name); 860 err = initialize_controller(sdev, h, array_name, array_id);
857 if (err != SCSI_DH_OK) 861 if (err != SCSI_DH_OK)
858 goto failed; 862 goto failed;
859 863
diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h
index 179ad77f6cc9..bd9e31e16249 100644
--- a/drivers/scsi/dpt/dpti_i2o.h
+++ b/drivers/scsi/dpt/dpti_i2o.h
@@ -22,7 +22,7 @@
22#include <linux/i2o-dev.h> 22#include <linux/i2o-dev.h>
23 23
24#include <linux/notifier.h> 24#include <linux/notifier.h>
25#include <asm/atomic.h> 25#include <linux/atomic.h>
26 26
27 27
28/* 28/*
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 204fa8d4b4ab..ba710e350ac5 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -487,6 +487,19 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
487} 487}
488 488
489/** 489/**
490 * fcoe_port_send() - Send an Ethernet-encapsulated FIP/FCoE frame
491 * @port: The FCoE port
492 * @skb: The FIP/FCoE packet to be sent
493 */
494static void fcoe_port_send(struct fcoe_port *port, struct sk_buff *skb)
495{
496 if (port->fcoe_pending_queue.qlen)
497 fcoe_check_wait_queue(port->lport, skb);
498 else if (fcoe_start_io(skb))
499 fcoe_check_wait_queue(port->lport, skb);
500}
501
502/**
490 * fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame 503 * fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame
491 * @fip: The FCoE controller 504 * @fip: The FCoE controller
492 * @skb: The FIP packet to be sent 505 * @skb: The FIP packet to be sent
@@ -494,7 +507,7 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
494static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) 507static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
495{ 508{
496 skb->dev = fcoe_from_ctlr(fip)->netdev; 509 skb->dev = fcoe_from_ctlr(fip)->netdev;
497 dev_queue_xmit(skb); 510 fcoe_port_send(lport_priv(fip->lp), skb);
498} 511}
499 512
500/** 513/**
@@ -1257,30 +1270,20 @@ static int fcoe_cpu_callback(struct notifier_block *nfb,
1257/** 1270/**
1258 * fcoe_select_cpu() - Selects CPU to handle post-processing of incoming 1271 * fcoe_select_cpu() - Selects CPU to handle post-processing of incoming
1259 * command. 1272 * command.
1260 * @curr_cpu: CPU which received request
1261 * 1273 *
1262 * This routine selects next CPU based on cpumask. 1274 * This routine selects next CPU based on cpumask to distribute
1275 * incoming requests in round robin.
1263 * 1276 *
1264 * Returns: int (CPU number). Caller to verify if returned CPU is online or not. 1277 * Returns: int CPU number
1265 */ 1278 */
1266static unsigned int fcoe_select_cpu(unsigned int curr_cpu) 1279static inline unsigned int fcoe_select_cpu(void)
1267{ 1280{
1268 static unsigned int selected_cpu; 1281 static unsigned int selected_cpu;
1269 1282
1270 if (num_online_cpus() == 1) 1283 selected_cpu = cpumask_next(selected_cpu, cpu_online_mask);
1271 return curr_cpu; 1284 if (selected_cpu >= nr_cpu_ids)
1272 /* 1285 selected_cpu = cpumask_first(cpu_online_mask);
1273 * Doing following check, to skip "curr_cpu (smp_processor_id)" 1286
1274 * from selection of CPU is intentional. This is to avoid same CPU
1275 * doing post-processing of command. "curr_cpu" to just receive
1276 * incoming request in case where rx_id is UNKNOWN and all other
1277 * CPU to actually process the command(s)
1278 */
1279 do {
1280 selected_cpu = cpumask_next(selected_cpu, cpu_online_mask);
1281 if (selected_cpu >= nr_cpu_ids)
1282 selected_cpu = cpumask_first(cpu_online_mask);
1283 } while (selected_cpu == curr_cpu);
1284 return selected_cpu; 1287 return selected_cpu;
1285} 1288}
1286 1289
@@ -1350,30 +1353,26 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1350 1353
1351 fr = fcoe_dev_from_skb(skb); 1354 fr = fcoe_dev_from_skb(skb);
1352 fr->fr_dev = lport; 1355 fr->fr_dev = lport;
1353 fr->ptype = ptype;
1354 1356
1355 /* 1357 /*
1356 * In case the incoming frame's exchange is originated from 1358 * In case the incoming frame's exchange is originated from
1357 * the initiator, then received frame's exchange id is ANDed 1359 * the initiator, then received frame's exchange id is ANDed
1358 * with fc_cpu_mask bits to get the same cpu on which exchange 1360 * with fc_cpu_mask bits to get the same cpu on which exchange
1359 * was originated, otherwise just use the current cpu. 1361 * was originated, otherwise select cpu using rx exchange id
1362 * or fcoe_select_cpu().
1360 */ 1363 */
1361 if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX) 1364 if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)
1362 cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask; 1365 cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
1363 else { 1366 else {
1364 cpu = smp_processor_id(); 1367 if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)
1365 1368 cpu = fcoe_select_cpu();
1366 if ((fh->fh_type == FC_TYPE_FCP) && 1369 else
1367 (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)) {
1368 do {
1369 cpu = fcoe_select_cpu(cpu);
1370 } while (!cpu_online(cpu));
1371 } else if ((fh->fh_type == FC_TYPE_FCP) &&
1372 (ntohs(fh->fh_rx_id) != FC_XID_UNKNOWN)) {
1373 cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask; 1370 cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask;
1374 } else
1375 cpu = smp_processor_id();
1376 } 1371 }
1372
1373 if (cpu >= nr_cpu_ids)
1374 goto err;
1375
1377 fps = &per_cpu(fcoe_percpu, cpu); 1376 fps = &per_cpu(fcoe_percpu, cpu);
1378 spin_lock_bh(&fps->fcoe_rx_list.lock); 1377 spin_lock_bh(&fps->fcoe_rx_list.lock);
1379 if (unlikely(!fps->thread)) { 1378 if (unlikely(!fps->thread)) {
@@ -1572,11 +1571,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1572 1571
1573 /* send down to lld */ 1572 /* send down to lld */
1574 fr_dev(fp) = lport; 1573 fr_dev(fp) = lport;
1575 if (port->fcoe_pending_queue.qlen) 1574 fcoe_port_send(port, skb);
1576 fcoe_check_wait_queue(lport, skb);
1577 else if (fcoe_start_io(skb))
1578 fcoe_check_wait_queue(lport, skb);
1579
1580 return 0; 1575 return 0;
1581} 1576}
1582 1577
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 6bba23a26303..ec61bdb833ac 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -46,7 +46,7 @@
46#include <linux/cciss_ioctl.h> 46#include <linux/cciss_ioctl.h>
47#include <linux/string.h> 47#include <linux/string.h>
48#include <linux/bitmap.h> 48#include <linux/bitmap.h>
49#include <asm/atomic.h> 49#include <linux/atomic.h>
50#include <linux/kthread.h> 50#include <linux/kthread.h>
51#include "hpsa_cmd.h" 51#include "hpsa_cmd.h"
52#include "hpsa.h" 52#include "hpsa.h"
@@ -1219,8 +1219,8 @@ static void complete_scsi_command(struct CommandList *cp)
1219 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); 1219 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1220 break; 1220 break;
1221 case CMD_UNSOLICITED_ABORT: 1221 case CMD_UNSOLICITED_ABORT:
1222 cmd->result = DID_RESET << 16; 1222 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
1223 dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited " 1223 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
1224 "abort\n", cp); 1224 "abort\n", cp);
1225 break; 1225 break;
1226 case CMD_TIMEOUT: 1226 case CMD_TIMEOUT:
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 6d8dcd4dd06b..7f53ceaa7239 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -214,7 +214,7 @@ static void SA5_submit_command(struct ctlr_info *h,
214 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr, 214 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
215 c->Header.Tag.lower); 215 c->Header.Tag.lower);
216 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 216 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
217 (void) readl(h->vaddr + SA5_REQUEST_PORT_OFFSET); 217 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
218 h->commands_outstanding++; 218 h->commands_outstanding++;
219 if (h->commands_outstanding > h->max_outstanding) 219 if (h->commands_outstanding > h->max_outstanding)
220 h->max_outstanding = h->commands_outstanding; 220 h->max_outstanding = h->commands_outstanding;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 888086c4e709..8d636301e32c 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -8778,14 +8778,14 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8778 if (rc != PCIBIOS_SUCCESSFUL) { 8778 if (rc != PCIBIOS_SUCCESSFUL) {
8779 dev_err(&pdev->dev, "Failed to save PCI config space\n"); 8779 dev_err(&pdev->dev, "Failed to save PCI config space\n");
8780 rc = -EIO; 8780 rc = -EIO;
8781 goto cleanup_nomem; 8781 goto out_msi_disable;
8782 } 8782 }
8783 8783
8784 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg))) 8784 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8785 goto cleanup_nomem; 8785 goto out_msi_disable;
8786 8786
8787 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) 8787 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8788 goto cleanup_nomem; 8788 goto out_msi_disable;
8789 8789
8790 if (ioa_cfg->sis64) 8790 if (ioa_cfg->sis64)
8791 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) 8791 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
@@ -8800,7 +8800,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8800 if (rc < 0) { 8800 if (rc < 0) {
8801 dev_err(&pdev->dev, 8801 dev_err(&pdev->dev,
8802 "Couldn't allocate enough memory for device driver!\n"); 8802 "Couldn't allocate enough memory for device driver!\n");
8803 goto cleanup_nomem; 8803 goto out_msi_disable;
8804 } 8804 }
8805 8805
8806 /* 8806 /*
@@ -8845,10 +8845,10 @@ out:
8845 8845
8846cleanup_nolog: 8846cleanup_nolog:
8847 ipr_free_mem(ioa_cfg); 8847 ipr_free_mem(ioa_cfg);
8848cleanup_nomem:
8849 iounmap(ipr_regs);
8850out_msi_disable: 8848out_msi_disable:
8851 pci_disable_msi(pdev); 8849 pci_disable_msi(pdev);
8850cleanup_nomem:
8851 iounmap(ipr_regs);
8852out_release_regions: 8852out_release_regions:
8853 pci_release_regions(pdev); 8853 pci_release_regions(pdev);
8854out_scsi_host_put: 8854out_scsi_host_put:
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index f5a0665b6773..01ff082dc34c 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -802,10 +802,8 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
802 pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask); 802 pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask);
803 spin_lock_bh(&pool->lock); 803 spin_lock_bh(&pool->lock);
804 ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order); 804 ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
805 if (ep) { 805 if (ep && ep->xid == xid)
806 fc_exch_hold(ep); 806 fc_exch_hold(ep);
807 WARN_ON(ep->xid != xid);
808 }
809 spin_unlock_bh(&pool->lock); 807 spin_unlock_bh(&pool->lock);
810 } 808 }
811 return ep; 809 return ep;
@@ -2465,8 +2463,11 @@ int fc_setup_exch_mgr(void)
2465 2463
2466 fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue"); 2464 fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
2467 if (!fc_exch_workqueue) 2465 if (!fc_exch_workqueue)
2468 return -ENOMEM; 2466 goto err;
2469 return 0; 2467 return 0;
2468err:
2469 kmem_cache_destroy(fc_em_cachep);
2470 return -ENOMEM;
2470} 2471}
2471 2472
2472/** 2473/**
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 9cd2149519ac..afb63c843144 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -498,7 +498,7 @@ crc_err:
498 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 498 stats = per_cpu_ptr(lport->dev_stats, get_cpu());
499 stats->ErrorFrames++; 499 stats->ErrorFrames++;
500 /* per cpu count, not total count, but OK for limit */ 500 /* per cpu count, not total count, but OK for limit */
501 if (stats->InvalidCRCCount++ < 5) 501 if (stats->InvalidCRCCount++ < FC_MAX_ERROR_CNT)
502 printk(KERN_WARNING "libfc: CRC error on data " 502 printk(KERN_WARNING "libfc: CRC error on data "
503 "frame for port (%6.6x)\n", 503 "frame for port (%6.6x)\n",
504 lport->port_id); 504 lport->port_id);
@@ -690,7 +690,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
690} 690}
691 691
692/** 692/**
693 * fc_fcp_abts_resp() - Send an ABTS response 693 * fc_fcp_abts_resp() - Receive an ABTS response
694 * @fsp: The FCP packet that is being aborted 694 * @fsp: The FCP packet that is being aborted
695 * @fp: The response frame 695 * @fp: The response frame
696 */ 696 */
@@ -730,7 +730,7 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
730} 730}
731 731
732/** 732/**
733 * fc_fcp_recv() - Reveive an FCP frame 733 * fc_fcp_recv() - Receive an FCP frame
734 * @seq: The sequence the frame is on 734 * @seq: The sequence the frame is on
735 * @fp: The received frame 735 * @fp: The received frame
736 * @arg: The related FCP packet 736 * @arg: The related FCP packet
@@ -1084,6 +1084,7 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
1084 rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv); 1084 rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
1085 if (unlikely(rc)) { 1085 if (unlikely(rc)) {
1086 spin_lock_irqsave(&si->scsi_queue_lock, flags); 1086 spin_lock_irqsave(&si->scsi_queue_lock, flags);
1087 fsp->cmd->SCp.ptr = NULL;
1087 list_del(&fsp->list); 1088 list_del(&fsp->list);
1088 spin_unlock_irqrestore(&si->scsi_queue_lock, flags); 1089 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1089 } 1090 }
@@ -1645,12 +1646,10 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1645 struct fc_seq *seq; 1646 struct fc_seq *seq;
1646 struct fcp_srr *srr; 1647 struct fcp_srr *srr;
1647 struct fc_frame *fp; 1648 struct fc_frame *fp;
1648 u8 cdb_op;
1649 unsigned int rec_tov; 1649 unsigned int rec_tov;
1650 1650
1651 rport = fsp->rport; 1651 rport = fsp->rport;
1652 rpriv = rport->dd_data; 1652 rpriv = rport->dd_data;
1653 cdb_op = fsp->cdb_cmd.fc_cdb[0];
1654 1653
1655 if (!(rpriv->flags & FC_RP_FLAGS_RETRY) || 1654 if (!(rpriv->flags & FC_RP_FLAGS_RETRY) ||
1656 rpriv->rp_state != RPORT_ST_READY) 1655 rpriv->rp_state != RPORT_ST_READY)
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index e008b1673507..e55ed9cf23fb 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1352,7 +1352,6 @@ static void fc_lport_timeout(struct work_struct *work)
1352 WARN_ON(1); 1352 WARN_ON(1);
1353 break; 1353 break;
1354 case LPORT_ST_READY: 1354 case LPORT_ST_READY:
1355 WARN_ON(1);
1356 break; 1355 break;
1357 case LPORT_ST_RESET: 1356 case LPORT_ST_RESET:
1358 break; 1357 break;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index d7a4120034a2..256a999d010b 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -84,22 +84,6 @@ MODULE_PARM_DESC(debug_libiscsi_eh,
84 __func__, ##arg); \ 84 __func__, ##arg); \
85 } while (0); 85 } while (0);
86 86
87/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
88#define SNA32_CHECK 2147483648UL
89
90static int iscsi_sna_lt(u32 n1, u32 n2)
91{
92 return n1 != n2 && ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
93 (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
94}
95
96/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
97static int iscsi_sna_lte(u32 n1, u32 n2)
98{
99 return n1 == n2 || ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
100 (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
101}
102
103inline void iscsi_conn_queue_work(struct iscsi_conn *conn) 87inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
104{ 88{
105 struct Scsi_Host *shost = conn->session->host; 89 struct Scsi_Host *shost = conn->session->host;
@@ -360,7 +344,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
360 struct iscsi_conn *conn = task->conn; 344 struct iscsi_conn *conn = task->conn;
361 struct iscsi_session *session = conn->session; 345 struct iscsi_session *session = conn->session;
362 struct scsi_cmnd *sc = task->sc; 346 struct scsi_cmnd *sc = task->sc;
363 struct iscsi_cmd *hdr; 347 struct iscsi_scsi_req *hdr;
364 unsigned hdrlength, cmd_len; 348 unsigned hdrlength, cmd_len;
365 itt_t itt; 349 itt_t itt;
366 int rc; 350 int rc;
@@ -374,7 +358,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
374 if (rc) 358 if (rc)
375 return rc; 359 return rc;
376 } 360 }
377 hdr = (struct iscsi_cmd *) task->hdr; 361 hdr = (struct iscsi_scsi_req *)task->hdr;
378 itt = hdr->itt; 362 itt = hdr->itt;
379 memset(hdr, 0, sizeof(*hdr)); 363 memset(hdr, 0, sizeof(*hdr));
380 364
@@ -830,7 +814,7 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
830 struct iscsi_task *task, char *data, 814 struct iscsi_task *task, char *data,
831 int datalen) 815 int datalen)
832{ 816{
833 struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr; 817 struct iscsi_scsi_rsp *rhdr = (struct iscsi_scsi_rsp *)hdr;
834 struct iscsi_session *session = conn->session; 818 struct iscsi_session *session = conn->session;
835 struct scsi_cmnd *sc = task->sc; 819 struct scsi_cmnd *sc = task->sc;
836 820
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 874e29d9533f..f84084bba2f0 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -849,6 +849,9 @@ static struct domain_device *sas_ex_discover_expander(
849 849
850 res = sas_discover_expander(child); 850 res = sas_discover_expander(child);
851 if (res) { 851 if (res) {
852 spin_lock_irq(&parent->port->dev_list_lock);
853 list_del(&child->dev_list_node);
854 spin_unlock_irq(&parent->port->dev_list_lock);
852 kfree(child); 855 kfree(child);
853 return NULL; 856 return NULL;
854 } 857 }
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 8ec2c86a49d4..c088a36d1f33 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -20,6 +20,11 @@
20 *******************************************************************/ 20 *******************************************************************/
21 21
22#include <scsi/scsi_host.h> 22#include <scsi/scsi_host.h>
23
24#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
25#define CONFIG_SCSI_LPFC_DEBUG_FS
26#endif
27
23struct lpfc_sli2_slim; 28struct lpfc_sli2_slim;
24 29
25#define LPFC_PCI_DEV_LP 0x1 30#define LPFC_PCI_DEV_LP 0x1
@@ -465,9 +470,10 @@ enum intr_type_t {
465struct unsol_rcv_ct_ctx { 470struct unsol_rcv_ct_ctx {
466 uint32_t ctxt_id; 471 uint32_t ctxt_id;
467 uint32_t SID; 472 uint32_t SID;
468 uint32_t oxid;
469 uint32_t flags; 473 uint32_t flags;
470#define UNSOL_VALID 0x00000001 474#define UNSOL_VALID 0x00000001
475 uint16_t oxid;
476 uint16_t rxid;
471}; 477};
472 478
473#define LPFC_USER_LINK_SPEED_AUTO 0 /* auto select (default)*/ 479#define LPFC_USER_LINK_SPEED_AUTO 0 /* auto select (default)*/
@@ -674,6 +680,9 @@ struct lpfc_hba {
674 uint32_t cfg_enable_rrq; 680 uint32_t cfg_enable_rrq;
675 uint32_t cfg_topology; 681 uint32_t cfg_topology;
676 uint32_t cfg_link_speed; 682 uint32_t cfg_link_speed;
683#define LPFC_FCF_FOV 1 /* Fast fcf failover */
684#define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */
685 uint32_t cfg_fcf_failover_policy;
677 uint32_t cfg_cr_delay; 686 uint32_t cfg_cr_delay;
678 uint32_t cfg_cr_count; 687 uint32_t cfg_cr_count;
679 uint32_t cfg_multi_ring_support; 688 uint32_t cfg_multi_ring_support;
@@ -845,9 +854,13 @@ struct lpfc_hba {
845 /* iDiag debugfs sub-directory */ 854 /* iDiag debugfs sub-directory */
846 struct dentry *idiag_root; 855 struct dentry *idiag_root;
847 struct dentry *idiag_pci_cfg; 856 struct dentry *idiag_pci_cfg;
857 struct dentry *idiag_bar_acc;
848 struct dentry *idiag_que_info; 858 struct dentry *idiag_que_info;
849 struct dentry *idiag_que_acc; 859 struct dentry *idiag_que_acc;
850 struct dentry *idiag_drb_acc; 860 struct dentry *idiag_drb_acc;
861 struct dentry *idiag_ctl_acc;
862 struct dentry *idiag_mbx_acc;
863 struct dentry *idiag_ext_acc;
851#endif 864#endif
852 865
853 /* Used for deferred freeing of ELS data buffers */ 866 /* Used for deferred freeing of ELS data buffers */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 135a53baa735..2542f1f8bf86 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -755,6 +755,47 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
755} 755}
756 756
757/** 757/**
758 * lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness
759 * @phba: lpfc_hba pointer.
760 *
761 * Description:
762 * SLI4 interface type-2 device to wait on the sliport status register for
763 * the readyness after performing a firmware reset.
764 *
765 * Returns:
766 * zero for success
767 **/
768static int
769lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
770{
771 struct lpfc_register portstat_reg;
772 int i;
773
774
775 lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
776 &portstat_reg.word0);
777
778 /* wait for the SLI port firmware ready after firmware reset */
779 for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
780 msleep(10);
781 lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
782 &portstat_reg.word0);
783 if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
784 continue;
785 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
786 continue;
787 if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg))
788 continue;
789 break;
790 }
791
792 if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT)
793 return 0;
794 else
795 return -EIO;
796}
797
798/**
758 * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc 799 * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
759 * @phba: lpfc_hba pointer. 800 * @phba: lpfc_hba pointer.
760 * 801 *
@@ -769,6 +810,7 @@ static ssize_t
769lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) 810lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
770{ 811{
771 struct completion online_compl; 812 struct completion online_compl;
813 struct pci_dev *pdev = phba->pcidev;
772 uint32_t reg_val; 814 uint32_t reg_val;
773 int status = 0; 815 int status = 0;
774 int rc; 816 int rc;
@@ -781,6 +823,14 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
781 LPFC_SLI_INTF_IF_TYPE_2)) 823 LPFC_SLI_INTF_IF_TYPE_2))
782 return -EPERM; 824 return -EPERM;
783 825
826 if (!pdev->is_physfn)
827 return -EPERM;
828
829 /* Disable SR-IOV virtual functions if enabled */
830 if (phba->cfg_sriov_nr_virtfn) {
831 pci_disable_sriov(pdev);
832 phba->cfg_sriov_nr_virtfn = 0;
833 }
784 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 834 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
785 835
786 if (status != 0) 836 if (status != 0)
@@ -805,7 +855,10 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
805 readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); 855 readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
806 856
807 /* delay driver action following IF_TYPE_2 reset */ 857 /* delay driver action following IF_TYPE_2 reset */
808 msleep(100); 858 rc = lpfc_sli4_pdev_status_reg_wait(phba);
859
860 if (rc)
861 return -EIO;
809 862
810 init_completion(&online_compl); 863 init_completion(&online_compl);
811 rc = lpfc_workq_post_event(phba, &status, &online_compl, 864 rc = lpfc_workq_post_event(phba, &status, &online_compl,
@@ -895,6 +948,10 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
895 948
896 if (!phba->cfg_enable_hba_reset) 949 if (!phba->cfg_enable_hba_reset)
897 return -EACCES; 950 return -EACCES;
951
952 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
953 "3050 lpfc_board_mode set to %s\n", buf);
954
898 init_completion(&online_compl); 955 init_completion(&online_compl);
899 956
900 if(strncmp(buf, "online", sizeof("online") - 1) == 0) { 957 if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
@@ -1290,6 +1347,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
1290 if (phba->sli_rev == LPFC_SLI_REV4) 1347 if (phba->sli_rev == LPFC_SLI_REV4)
1291 val = 0; 1348 val = 0;
1292 1349
1350 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1351 "3051 lpfc_poll changed from %d to %d\n",
1352 phba->cfg_poll, val);
1353
1293 spin_lock_irq(&phba->hbalock); 1354 spin_lock_irq(&phba->hbalock);
1294 1355
1295 old_val = phba->cfg_poll; 1356 old_val = phba->cfg_poll;
@@ -1414,80 +1475,10 @@ lpfc_sriov_hw_max_virtfn_show(struct device *dev,
1414 struct Scsi_Host *shost = class_to_shost(dev); 1475 struct Scsi_Host *shost = class_to_shost(dev);
1415 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1476 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1416 struct lpfc_hba *phba = vport->phba; 1477 struct lpfc_hba *phba = vport->phba;
1417 struct pci_dev *pdev = phba->pcidev; 1478 uint16_t max_nr_virtfn;
1418 union lpfc_sli4_cfg_shdr *shdr;
1419 uint32_t shdr_status, shdr_add_status;
1420 LPFC_MBOXQ_t *mboxq;
1421 struct lpfc_mbx_get_prof_cfg *get_prof_cfg;
1422 struct lpfc_rsrc_desc_pcie *desc;
1423 uint32_t max_nr_virtfn;
1424 uint32_t desc_count;
1425 int length, rc, i;
1426
1427 if ((phba->sli_rev < LPFC_SLI_REV4) ||
1428 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
1429 LPFC_SLI_INTF_IF_TYPE_2))
1430 return -EPERM;
1431
1432 if (!pdev->is_physfn)
1433 return snprintf(buf, PAGE_SIZE, "%d\n", 0);
1434
1435 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1436 if (!mboxq)
1437 return -ENOMEM;
1438
1439 /* get the maximum number of virtfn support by physfn */
1440 length = (sizeof(struct lpfc_mbx_get_prof_cfg) -
1441 sizeof(struct lpfc_sli4_cfg_mhdr));
1442 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
1443 LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG,
1444 length, LPFC_SLI4_MBX_EMBED);
1445 shdr = (union lpfc_sli4_cfg_shdr *)
1446 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
1447 bf_set(lpfc_mbox_hdr_pf_num, &shdr->request,
1448 phba->sli4_hba.iov.pf_number + 1);
1449
1450 get_prof_cfg = &mboxq->u.mqe.un.get_prof_cfg;
1451 bf_set(lpfc_mbx_get_prof_cfg_prof_tp, &get_prof_cfg->u.request,
1452 LPFC_CFG_TYPE_CURRENT_ACTIVE);
1453
1454 rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
1455 lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
1456
1457 if (rc != MBX_TIMEOUT) {
1458 /* check return status */
1459 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1460 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
1461 &shdr->response);
1462 if (shdr_status || shdr_add_status || rc)
1463 goto error_out;
1464
1465 } else
1466 goto error_out;
1467
1468 desc_count = get_prof_cfg->u.response.prof_cfg.rsrc_desc_count;
1469
1470 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
1471 desc = (struct lpfc_rsrc_desc_pcie *)
1472 &get_prof_cfg->u.response.prof_cfg.desc[i];
1473 if (LPFC_RSRC_DESC_TYPE_PCIE ==
1474 bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
1475 max_nr_virtfn = bf_get(lpfc_rsrc_desc_pcie_nr_virtfn,
1476 desc);
1477 break;
1478 }
1479 }
1480
1481 if (i < LPFC_RSRC_DESC_MAX_NUM) {
1482 if (rc != MBX_TIMEOUT)
1483 mempool_free(mboxq, phba->mbox_mem_pool);
1484 return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
1485 }
1486 1479
1487error_out: 1480 max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
1488 if (rc != MBX_TIMEOUT) 1481 return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
1489 mempool_free(mboxq, phba->mbox_mem_pool);
1490 return -EIO;
1491} 1482}
1492 1483
1493/** 1484/**
@@ -1605,6 +1596,9 @@ static int \
1605lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \ 1596lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
1606{ \ 1597{ \
1607 if (val >= minval && val <= maxval) {\ 1598 if (val >= minval && val <= maxval) {\
1599 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
1600 "3052 lpfc_" #attr " changed from %d to %d\n", \
1601 phba->cfg_##attr, val); \
1608 phba->cfg_##attr = val;\ 1602 phba->cfg_##attr = val;\
1609 return 0;\ 1603 return 0;\
1610 }\ 1604 }\
@@ -1762,6 +1756,9 @@ static int \
1762lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \ 1756lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
1763{ \ 1757{ \
1764 if (val >= minval && val <= maxval) {\ 1758 if (val >= minval && val <= maxval) {\
1759 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
1760 "3053 lpfc_" #attr " changed from %d to %d\n", \
1761 vport->cfg_##attr, val); \
1765 vport->cfg_##attr = val;\ 1762 vport->cfg_##attr = val;\
1766 return 0;\ 1763 return 0;\
1767 }\ 1764 }\
@@ -2196,6 +2193,9 @@ lpfc_param_show(enable_npiv);
2196lpfc_param_init(enable_npiv, 1, 0, 1); 2193lpfc_param_init(enable_npiv, 1, 0, 1);
2197static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL); 2194static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
2198 2195
2196LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
2197 "FCF Fast failover=1 Priority failover=2");
2198
2199int lpfc_enable_rrq; 2199int lpfc_enable_rrq;
2200module_param(lpfc_enable_rrq, int, S_IRUGO); 2200module_param(lpfc_enable_rrq, int, S_IRUGO);
2201MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality"); 2201MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
@@ -2678,6 +2678,9 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
2678 if (nolip) 2678 if (nolip)
2679 return strlen(buf); 2679 return strlen(buf);
2680 2680
2681 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2682 "3054 lpfc_topology changed from %d to %d\n",
2683 prev_val, val);
2681 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); 2684 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
2682 if (err) { 2685 if (err) {
2683 phba->cfg_topology = prev_val; 2686 phba->cfg_topology = prev_val;
@@ -3101,6 +3104,10 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
3101 if (sscanf(val_buf, "%i", &val) != 1) 3104 if (sscanf(val_buf, "%i", &val) != 1)
3102 return -EINVAL; 3105 return -EINVAL;
3103 3106
3107 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3108 "3055 lpfc_link_speed changed from %d to %d %s\n",
3109 phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)");
3110
3104 if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) || 3111 if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
3105 ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) || 3112 ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
3106 ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) || 3113 ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
@@ -3678,7 +3685,9 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
3678# - Default will result in registering capabilities for all profiles. 3685# - Default will result in registering capabilities for all profiles.
3679# 3686#
3680*/ 3687*/
3681unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION; 3688unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION |
3689 SHOST_DIX_TYPE0_PROTECTION |
3690 SHOST_DIX_TYPE1_PROTECTION;
3682 3691
3683module_param(lpfc_prot_mask, uint, S_IRUGO); 3692module_param(lpfc_prot_mask, uint, S_IRUGO);
3684MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask"); 3693MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
@@ -3769,6 +3778,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
3769 &dev_attr_lpfc_fdmi_on, 3778 &dev_attr_lpfc_fdmi_on,
3770 &dev_attr_lpfc_max_luns, 3779 &dev_attr_lpfc_max_luns,
3771 &dev_attr_lpfc_enable_npiv, 3780 &dev_attr_lpfc_enable_npiv,
3781 &dev_attr_lpfc_fcf_failover_policy,
3772 &dev_attr_lpfc_enable_rrq, 3782 &dev_attr_lpfc_enable_rrq,
3773 &dev_attr_nport_evt_cnt, 3783 &dev_attr_nport_evt_cnt,
3774 &dev_attr_board_mode, 3784 &dev_attr_board_mode,
@@ -4989,6 +4999,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4989 lpfc_link_speed_init(phba, lpfc_link_speed); 4999 lpfc_link_speed_init(phba, lpfc_link_speed);
4990 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 5000 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
4991 lpfc_enable_npiv_init(phba, lpfc_enable_npiv); 5001 lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
5002 lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
4992 lpfc_enable_rrq_init(phba, lpfc_enable_rrq); 5003 lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
4993 lpfc_use_msi_init(phba, lpfc_use_msi); 5004 lpfc_use_msi_init(phba, lpfc_use_msi);
4994 lpfc_fcp_imax_init(phba, lpfc_fcp_imax); 5005 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 7fb0ba4cbfa7..6760c69f5253 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -42,6 +42,7 @@
42#include "lpfc.h" 42#include "lpfc.h"
43#include "lpfc_logmsg.h" 43#include "lpfc_logmsg.h"
44#include "lpfc_crtn.h" 44#include "lpfc_crtn.h"
45#include "lpfc_debugfs.h"
45#include "lpfc_vport.h" 46#include "lpfc_vport.h"
46#include "lpfc_version.h" 47#include "lpfc_version.h"
47 48
@@ -960,8 +961,10 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
960 evt_dat->immed_dat].oxid, 961 evt_dat->immed_dat].oxid,
961 phba->ct_ctx[ 962 phba->ct_ctx[
962 evt_dat->immed_dat].SID); 963 evt_dat->immed_dat].SID);
964 phba->ct_ctx[evt_dat->immed_dat].rxid =
965 piocbq->iocb.ulpContext;
963 phba->ct_ctx[evt_dat->immed_dat].oxid = 966 phba->ct_ctx[evt_dat->immed_dat].oxid =
964 piocbq->iocb.ulpContext; 967 piocbq->iocb.unsli3.rcvsli3.ox_id;
965 phba->ct_ctx[evt_dat->immed_dat].SID = 968 phba->ct_ctx[evt_dat->immed_dat].SID =
966 piocbq->iocb.un.rcvels.remoteID; 969 piocbq->iocb.un.rcvels.remoteID;
967 phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID; 970 phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID;
@@ -1312,7 +1315,8 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1312 rc = IOCB_ERROR; 1315 rc = IOCB_ERROR;
1313 goto issue_ct_rsp_exit; 1316 goto issue_ct_rsp_exit;
1314 } 1317 }
1315 icmd->ulpContext = phba->ct_ctx[tag].oxid; 1318 icmd->ulpContext = phba->ct_ctx[tag].rxid;
1319 icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
1316 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID); 1320 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1317 if (!ndlp) { 1321 if (!ndlp) {
1318 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 1322 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
@@ -1337,9 +1341,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1337 goto issue_ct_rsp_exit; 1341 goto issue_ct_rsp_exit;
1338 } 1342 }
1339 1343
1340 icmd->un.ulpWord[3] = ndlp->nlp_rpi; 1344 icmd->un.ulpWord[3] =
1341 if (phba->sli_rev == LPFC_SLI_REV4)
1342 icmd->ulpContext =
1343 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 1345 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
1344 1346
1345 /* The exchange is done, mark the entry as invalid */ 1347 /* The exchange is done, mark the entry as invalid */
@@ -1351,8 +1353,8 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1351 1353
1352 /* Xmit CT response on exchange <xid> */ 1354 /* Xmit CT response on exchange <xid> */
1353 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1355 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1354 "2722 Xmit CT response on exchange x%x Data: x%x x%x\n", 1356 "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
1355 icmd->ulpContext, icmd->ulpIoTag, phba->link_state); 1357 icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
1356 1358
1357 ctiocb->iocb_cmpl = NULL; 1359 ctiocb->iocb_cmpl = NULL;
1358 ctiocb->iocb_flag |= LPFC_IO_LIBDFC; 1360 ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
@@ -1471,13 +1473,12 @@ send_mgmt_rsp_exit:
1471/** 1473/**
1472 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode 1474 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
1473 * @phba: Pointer to HBA context object. 1475 * @phba: Pointer to HBA context object.
1474 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1475 * 1476 *
1476 * This function is responsible for preparing driver for diag loopback 1477 * This function is responsible for preparing driver for diag loopback
1477 * on device. 1478 * on device.
1478 */ 1479 */
1479static int 1480static int
1480lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job) 1481lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
1481{ 1482{
1482 struct lpfc_vport **vports; 1483 struct lpfc_vport **vports;
1483 struct Scsi_Host *shost; 1484 struct Scsi_Host *shost;
@@ -1521,7 +1522,6 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job)
1521/** 1522/**
1522 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode 1523 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
1523 * @phba: Pointer to HBA context object. 1524 * @phba: Pointer to HBA context object.
1524 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1525 * 1525 *
1526 * This function is responsible for driver exit processing of setting up 1526 * This function is responsible for driver exit processing of setting up
1527 * diag loopback mode on device. 1527 * diag loopback mode on device.
@@ -1567,7 +1567,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1567 uint32_t link_flags; 1567 uint32_t link_flags;
1568 uint32_t timeout; 1568 uint32_t timeout;
1569 LPFC_MBOXQ_t *pmboxq; 1569 LPFC_MBOXQ_t *pmboxq;
1570 int mbxstatus; 1570 int mbxstatus = MBX_SUCCESS;
1571 int i = 0; 1571 int i = 0;
1572 int rc = 0; 1572 int rc = 0;
1573 1573
@@ -1586,7 +1586,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1586 goto job_error; 1586 goto job_error;
1587 } 1587 }
1588 1588
1589 rc = lpfc_bsg_diag_mode_enter(phba, job); 1589 rc = lpfc_bsg_diag_mode_enter(phba);
1590 if (rc) 1590 if (rc)
1591 goto job_error; 1591 goto job_error;
1592 1592
@@ -1741,7 +1741,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1741 uint32_t link_flags, timeout, req_len, alloc_len; 1741 uint32_t link_flags, timeout, req_len, alloc_len;
1742 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback; 1742 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1743 LPFC_MBOXQ_t *pmboxq = NULL; 1743 LPFC_MBOXQ_t *pmboxq = NULL;
1744 int mbxstatus, i, rc = 0; 1744 int mbxstatus = MBX_SUCCESS, i, rc = 0;
1745 1745
1746 /* no data to return just the return code */ 1746 /* no data to return just the return code */
1747 job->reply->reply_payload_rcv_len = 0; 1747 job->reply->reply_payload_rcv_len = 0;
@@ -1758,7 +1758,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1758 goto job_error; 1758 goto job_error;
1759 } 1759 }
1760 1760
1761 rc = lpfc_bsg_diag_mode_enter(phba, job); 1761 rc = lpfc_bsg_diag_mode_enter(phba);
1762 if (rc) 1762 if (rc)
1763 goto job_error; 1763 goto job_error;
1764 1764
@@ -1982,7 +1982,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
1982 goto job_error; 1982 goto job_error;
1983 } 1983 }
1984 1984
1985 rc = lpfc_bsg_diag_mode_enter(phba, job); 1985 rc = lpfc_bsg_diag_mode_enter(phba);
1986 if (rc) 1986 if (rc)
1987 goto job_error; 1987 goto job_error;
1988 1988
@@ -3178,6 +3178,11 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3178 "(x%x/x%x) complete bsg job done, bsize:%d\n", 3178 "(x%x/x%x) complete bsg job done, bsize:%d\n",
3179 phba->mbox_ext_buf_ctx.nembType, 3179 phba->mbox_ext_buf_ctx.nembType,
3180 phba->mbox_ext_buf_ctx.mboxType, size); 3180 phba->mbox_ext_buf_ctx.mboxType, size);
3181 lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
3182 phba->mbox_ext_buf_ctx.nembType,
3183 phba->mbox_ext_buf_ctx.mboxType,
3184 dma_ebuf, sta_pos_addr,
3185 phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
3181 } else 3186 } else
3182 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3187 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3183 3188
@@ -3430,6 +3435,10 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3430 "ext_buf_cnt:%d\n", ext_buf_cnt); 3435 "ext_buf_cnt:%d\n", ext_buf_cnt);
3431 } 3436 }
3432 3437
3438 /* before dma descriptor setup */
3439 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3440 sta_pre_addr, dmabuf, ext_buf_cnt);
3441
3433 /* reject non-embedded mailbox command with none external buffer */ 3442 /* reject non-embedded mailbox command with none external buffer */
3434 if (ext_buf_cnt == 0) { 3443 if (ext_buf_cnt == 0) {
3435 rc = -EPERM; 3444 rc = -EPERM;
@@ -3477,6 +3486,10 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3477 } 3486 }
3478 } 3487 }
3479 3488
3489 /* after dma descriptor setup */
3490 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3491 sta_pos_addr, dmabuf, ext_buf_cnt);
3492
3480 /* construct base driver mbox command */ 3493 /* construct base driver mbox command */
3481 pmb = &pmboxq->u.mb; 3494 pmb = &pmboxq->u.mb;
3482 pmbx = (uint8_t *)dmabuf->virt; 3495 pmbx = (uint8_t *)dmabuf->virt;
@@ -3511,7 +3524,7 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3511 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3524 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3512 "2947 Issued SLI_CONFIG ext-buffer " 3525 "2947 Issued SLI_CONFIG ext-buffer "
3513 "maibox command, rc:x%x\n", rc); 3526 "maibox command, rc:x%x\n", rc);
3514 return 1; 3527 return SLI_CONFIG_HANDLED;
3515 } 3528 }
3516 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3529 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3517 "2948 Failed to issue SLI_CONFIG ext-buffer " 3530 "2948 Failed to issue SLI_CONFIG ext-buffer "
@@ -3549,7 +3562,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3549 LPFC_MBOXQ_t *pmboxq = NULL; 3562 LPFC_MBOXQ_t *pmboxq = NULL;
3550 MAILBOX_t *pmb; 3563 MAILBOX_t *pmb;
3551 uint8_t *mbx; 3564 uint8_t *mbx;
3552 int rc = 0, i; 3565 int rc = SLI_CONFIG_NOT_HANDLED, i;
3553 3566
3554 mbox_req = 3567 mbox_req =
3555 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd; 3568 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
@@ -3591,12 +3604,20 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3591 "ext_buf_cnt:%d\n", ext_buf_cnt); 3604 "ext_buf_cnt:%d\n", ext_buf_cnt);
3592 } 3605 }
3593 3606
3607 /* before dma buffer descriptor setup */
3608 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
3609 sta_pre_addr, dmabuf, ext_buf_cnt);
3610
3594 if (ext_buf_cnt == 0) 3611 if (ext_buf_cnt == 0)
3595 return -EPERM; 3612 return -EPERM;
3596 3613
3597 /* for the first external buffer */ 3614 /* for the first external buffer */
3598 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf); 3615 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3599 3616
3617 /* after dma descriptor setup */
3618 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
3619 sta_pos_addr, dmabuf, ext_buf_cnt);
3620
3600 /* log for looking forward */ 3621 /* log for looking forward */
3601 for (i = 1; i < ext_buf_cnt; i++) { 3622 for (i = 1; i < ext_buf_cnt; i++) {
3602 if (nemb_tp == nemb_mse) 3623 if (nemb_tp == nemb_mse)
@@ -3660,7 +3681,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3660 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3681 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3661 "2955 Issued SLI_CONFIG ext-buffer " 3682 "2955 Issued SLI_CONFIG ext-buffer "
3662 "maibox command, rc:x%x\n", rc); 3683 "maibox command, rc:x%x\n", rc);
3663 return 1; 3684 return SLI_CONFIG_HANDLED;
3664 } 3685 }
3665 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3686 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3666 "2956 Failed to issue SLI_CONFIG ext-buffer " 3687 "2956 Failed to issue SLI_CONFIG ext-buffer "
@@ -3668,6 +3689,11 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3668 rc = -EPIPE; 3689 rc = -EPIPE;
3669 } 3690 }
3670 3691
3692 /* wait for additoinal external buffers */
3693 job->reply->result = 0;
3694 job->job_done(job);
3695 return SLI_CONFIG_HANDLED;
3696
3671job_error: 3697job_error:
3672 if (pmboxq) 3698 if (pmboxq)
3673 mempool_free(pmboxq, phba->mbox_mem_pool); 3699 mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -3840,6 +3866,12 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
3840 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list, 3866 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
3841 struct lpfc_dmabuf, list); 3867 struct lpfc_dmabuf, list);
3842 list_del_init(&dmabuf->list); 3868 list_del_init(&dmabuf->list);
3869
3870 /* after dma buffer descriptor setup */
3871 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
3872 mbox_rd, dma_ebuf, sta_pos_addr,
3873 dmabuf, index);
3874
3843 pbuf = (uint8_t *)dmabuf->virt; 3875 pbuf = (uint8_t *)dmabuf->virt;
3844 job->reply->reply_payload_rcv_len = 3876 job->reply->reply_payload_rcv_len =
3845 sg_copy_from_buffer(job->reply_payload.sg_list, 3877 sg_copy_from_buffer(job->reply_payload.sg_list,
@@ -3922,6 +3954,11 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
3922 dmabuf); 3954 dmabuf);
3923 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3955 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3924 3956
3957 /* after write dma buffer */
3958 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
3959 mbox_wr, dma_ebuf, sta_pos_addr,
3960 dmabuf, index);
3961
3925 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) { 3962 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
3926 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3963 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3927 "2968 SLI_CONFIG ext-buffer wr all %d " 3964 "2968 SLI_CONFIG ext-buffer wr all %d "
@@ -3959,7 +3996,7 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
3959 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3996 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3960 "2969 Issued SLI_CONFIG ext-buffer " 3997 "2969 Issued SLI_CONFIG ext-buffer "
3961 "maibox command, rc:x%x\n", rc); 3998 "maibox command, rc:x%x\n", rc);
3962 return 1; 3999 return SLI_CONFIG_HANDLED;
3963 } 4000 }
3964 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4001 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3965 "2970 Failed to issue SLI_CONFIG ext-buffer " 4002 "2970 Failed to issue SLI_CONFIG ext-buffer "
@@ -4039,14 +4076,14 @@ lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
4039 struct lpfc_dmabuf *dmabuf) 4076 struct lpfc_dmabuf *dmabuf)
4040{ 4077{
4041 struct dfc_mbox_req *mbox_req; 4078 struct dfc_mbox_req *mbox_req;
4042 int rc; 4079 int rc = SLI_CONFIG_NOT_HANDLED;
4043 4080
4044 mbox_req = 4081 mbox_req =
4045 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd; 4082 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4046 4083
4047 /* mbox command with/without single external buffer */ 4084 /* mbox command with/without single external buffer */
4048 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0) 4085 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
4049 return SLI_CONFIG_NOT_HANDLED; 4086 return rc;
4050 4087
4051 /* mbox command and first external buffer */ 4088 /* mbox command and first external buffer */
4052 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) { 4089 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
@@ -4249,7 +4286,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4249 * mailbox extension size 4286 * mailbox extension size
4250 */ 4287 */
4251 if ((transmit_length > receive_length) || 4288 if ((transmit_length > receive_length) ||
4252 (transmit_length > MAILBOX_EXT_SIZE)) { 4289 (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4253 rc = -ERANGE; 4290 rc = -ERANGE;
4254 goto job_done; 4291 goto job_done;
4255 } 4292 }
@@ -4272,7 +4309,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4272 /* receive length cannot be greater than mailbox 4309 /* receive length cannot be greater than mailbox
4273 * extension size 4310 * extension size
4274 */ 4311 */
4275 if (receive_length > MAILBOX_EXT_SIZE) { 4312 if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4276 rc = -ERANGE; 4313 rc = -ERANGE;
4277 goto job_done; 4314 goto job_done;
4278 } 4315 }
@@ -4306,7 +4343,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4306 bde = (struct ulp_bde64 *)&pmb->un.varWords[4]; 4343 bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
4307 4344
4308 /* bde size cannot be greater than mailbox ext size */ 4345 /* bde size cannot be greater than mailbox ext size */
4309 if (bde->tus.f.bdeSize > MAILBOX_EXT_SIZE) { 4346 if (bde->tus.f.bdeSize >
4347 BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4310 rc = -ERANGE; 4348 rc = -ERANGE;
4311 goto job_done; 4349 goto job_done;
4312 } 4350 }
@@ -4332,7 +4370,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4332 * mailbox extension size 4370 * mailbox extension size
4333 */ 4371 */
4334 if ((receive_length == 0) || 4372 if ((receive_length == 0) ||
4335 (receive_length > MAILBOX_EXT_SIZE)) { 4373 (receive_length >
4374 BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4336 rc = -ERANGE; 4375 rc = -ERANGE;
4337 goto job_done; 4376 goto job_done;
4338 } 4377 }
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index fc20c247f36b..a6db6aef1331 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -235,9 +235,11 @@ int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *);
235void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *); 235void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *);
236void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *); 236void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *);
237uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *); 237uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *);
238void lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *, uint16_t);
238int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t); 239int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t);
239void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t); 240void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t);
240int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t); 241int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
242void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *);
241 243
242int lpfc_mem_alloc(struct lpfc_hba *, int align); 244int lpfc_mem_alloc(struct lpfc_hba *, int align);
243void lpfc_mem_free(struct lpfc_hba *); 245void lpfc_mem_free(struct lpfc_hba *);
@@ -371,6 +373,10 @@ extern struct lpfc_hbq_init *lpfc_hbq_defs[];
371/* SLI4 if_type 2 externs. */ 373/* SLI4 if_type 2 externs. */
372int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *); 374int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *);
373int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *); 375int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *);
376int lpfc_sli4_get_allocated_extnts(struct lpfc_hba *, uint16_t,
377 uint16_t *, uint16_t *);
378int lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *, uint16_t,
379 uint16_t *, uint16_t *);
374 380
375/* externs BlockGuard */ 381/* externs BlockGuard */
376extern char *_dump_buf_data; 382extern char *_dump_buf_data;
@@ -432,10 +438,16 @@ void lpfc_handle_rrq_active(struct lpfc_hba *);
432int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *); 438int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *);
433int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *, 439int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *,
434 uint16_t, uint16_t, uint16_t); 440 uint16_t, uint16_t, uint16_t);
441uint16_t lpfc_sli4_xri_inrange(struct lpfc_hba *, uint16_t);
435void lpfc_cleanup_wt_rrqs(struct lpfc_hba *); 442void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
436void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *); 443void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
437struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t, 444struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
438 uint32_t); 445 uint32_t);
446void lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *, enum nemb_type,
447 enum mbox_type, enum dma_type, enum sta_type,
448 struct lpfc_dmabuf *, uint32_t);
449void lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *, MAILBOX_t *);
439int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *); 450int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *);
440/* functions to support SR-IOV */ 451/* functions to support SR-IOV */
441int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int); 452int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
453uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 30b25c5fdd7e..a0424dd90e40 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -48,6 +48,7 @@
48#include "lpfc_version.h" 48#include "lpfc_version.h"
49#include "lpfc_compat.h" 49#include "lpfc_compat.h"
50#include "lpfc_debugfs.h" 50#include "lpfc_debugfs.h"
51#include "lpfc_bsg.h"
51 52
52#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 53#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
53/* 54/*
@@ -135,7 +136,11 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
135 int i, index, len, enable; 136 int i, index, len, enable;
136 uint32_t ms; 137 uint32_t ms;
137 struct lpfc_debugfs_trc *dtp; 138 struct lpfc_debugfs_trc *dtp;
138 char buffer[LPFC_DEBUG_TRC_ENTRY_SIZE]; 139 char *buffer;
140
141 buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL);
142 if (!buffer)
143 return 0;
139 144
140 enable = lpfc_debugfs_enable; 145 enable = lpfc_debugfs_enable;
141 lpfc_debugfs_enable = 0; 146 lpfc_debugfs_enable = 0;
@@ -167,6 +172,8 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
167 } 172 }
168 173
169 lpfc_debugfs_enable = enable; 174 lpfc_debugfs_enable = enable;
175 kfree(buffer);
176
170 return len; 177 return len;
171} 178}
172 179
@@ -195,8 +202,11 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
195 int i, index, len, enable; 202 int i, index, len, enable;
196 uint32_t ms; 203 uint32_t ms;
197 struct lpfc_debugfs_trc *dtp; 204 struct lpfc_debugfs_trc *dtp;
198 char buffer[LPFC_DEBUG_TRC_ENTRY_SIZE]; 205 char *buffer;
199 206
207 buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL);
208 if (!buffer)
209 return 0;
200 210
201 enable = lpfc_debugfs_enable; 211 enable = lpfc_debugfs_enable;
202 lpfc_debugfs_enable = 0; 212 lpfc_debugfs_enable = 0;
@@ -228,6 +238,8 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
228 } 238 }
229 239
230 lpfc_debugfs_enable = enable; 240 lpfc_debugfs_enable = enable;
241 kfree(buffer);
242
231 return len; 243 return len;
232} 244}
233 245
@@ -378,7 +390,11 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
378 int len = 0; 390 int len = 0;
379 int i, off; 391 int i, off;
380 uint32_t *ptr; 392 uint32_t *ptr;
381 char buffer[1024]; 393 char *buffer;
394
395 buffer = kmalloc(1024, GFP_KERNEL);
396 if (!buffer)
397 return 0;
382 398
383 off = 0; 399 off = 0;
384 spin_lock_irq(&phba->hbalock); 400 spin_lock_irq(&phba->hbalock);
@@ -407,6 +423,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
407 } 423 }
408 424
409 spin_unlock_irq(&phba->hbalock); 425 spin_unlock_irq(&phba->hbalock);
426 kfree(buffer);
427
410 return len; 428 return len;
411} 429}
412 430
@@ -1327,8 +1345,8 @@ lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes,
1327 return 0; 1345 return 0;
1328 1346
1329 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) { 1347 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) {
1330 where = idiag.cmd.data[0]; 1348 where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX];
1331 count = idiag.cmd.data[1]; 1349 count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX];
1332 } else 1350 } else
1333 return 0; 1351 return 0;
1334 1352
@@ -1373,6 +1391,11 @@ pcicfg_browse:
1373 len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, 1391 len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
1374 "%08x ", u32val); 1392 "%08x ", u32val);
1375 offset += sizeof(uint32_t); 1393 offset += sizeof(uint32_t);
1394 if (offset >= LPFC_PCI_CFG_SIZE) {
1395 len += snprintf(pbuffer+len,
1396 LPFC_PCI_CFG_SIZE-len, "\n");
1397 break;
1398 }
1376 index -= sizeof(uint32_t); 1399 index -= sizeof(uint32_t);
1377 if (!index) 1400 if (!index)
1378 len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, 1401 len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
@@ -1385,8 +1408,11 @@ pcicfg_browse:
1385 } 1408 }
1386 1409
1387 /* Set up the offset for next portion of pci cfg read */ 1410 /* Set up the offset for next portion of pci cfg read */
1388 idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE; 1411 if (index == 0) {
1389 if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE) 1412 idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE;
1413 if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE)
1414 idiag.offset.last_rd = 0;
1415 } else
1390 idiag.offset.last_rd = 0; 1416 idiag.offset.last_rd = 0;
1391 1417
1392 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); 1418 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
@@ -1439,8 +1465,8 @@ lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf,
1439 if (rc != LPFC_PCI_CFG_RD_CMD_ARG) 1465 if (rc != LPFC_PCI_CFG_RD_CMD_ARG)
1440 goto error_out; 1466 goto error_out;
1441 /* Read command from PCI config space, set up command fields */ 1467 /* Read command from PCI config space, set up command fields */
1442 where = idiag.cmd.data[0]; 1468 where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX];
1443 count = idiag.cmd.data[1]; 1469 count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX];
1444 if (count == LPFC_PCI_CFG_BROWSE) { 1470 if (count == LPFC_PCI_CFG_BROWSE) {
1445 if (where % sizeof(uint32_t)) 1471 if (where % sizeof(uint32_t))
1446 goto error_out; 1472 goto error_out;
@@ -1475,9 +1501,9 @@ lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf,
1475 if (rc != LPFC_PCI_CFG_WR_CMD_ARG) 1501 if (rc != LPFC_PCI_CFG_WR_CMD_ARG)
1476 goto error_out; 1502 goto error_out;
1477 /* Write command to PCI config space, read-modify-write */ 1503 /* Write command to PCI config space, read-modify-write */
1478 where = idiag.cmd.data[0]; 1504 where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX];
1479 count = idiag.cmd.data[1]; 1505 count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX];
1480 value = idiag.cmd.data[2]; 1506 value = idiag.cmd.data[IDIAG_PCICFG_VALUE_INDX];
1481 /* Sanity checks */ 1507 /* Sanity checks */
1482 if ((count != sizeof(uint8_t)) && 1508 if ((count != sizeof(uint8_t)) &&
1483 (count != sizeof(uint16_t)) && 1509 (count != sizeof(uint16_t)) &&
@@ -1570,6 +1596,292 @@ error_out:
1570} 1596}
1571 1597
1572/** 1598/**
1599 * lpfc_idiag_baracc_read - idiag debugfs pci bar access read
1600 * @file: The file pointer to read from.
1601 * @buf: The buffer to copy the data to.
1602 * @nbytes: The number of bytes to read.
1603 * @ppos: The position in the file to start reading from.
1604 *
1605 * Description:
1606 * This routine reads data from the @phba pci bar memory mapped space
1607 * according to the idiag command, and copies to user @buf.
1608 *
1609 * Returns:
1610 * This function returns the amount of data that was read (this could be less
1611 * than @nbytes if the end of the file was reached) or a negative error value.
1612 **/
1613static ssize_t
1614lpfc_idiag_baracc_read(struct file *file, char __user *buf, size_t nbytes,
1615 loff_t *ppos)
1616{
1617 struct lpfc_debug *debug = file->private_data;
1618 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
1619 int offset_label, offset, offset_run, len = 0, index;
1620 int bar_num, acc_range, bar_size;
1621 char *pbuffer;
1622 void __iomem *mem_mapped_bar;
1623 uint32_t if_type;
1624 struct pci_dev *pdev;
1625 uint32_t u32val;
1626
1627 pdev = phba->pcidev;
1628 if (!pdev)
1629 return 0;
1630
1631 /* This is a user read operation */
1632 debug->op = LPFC_IDIAG_OP_RD;
1633
1634 if (!debug->buffer)
1635 debug->buffer = kmalloc(LPFC_PCI_BAR_RD_BUF_SIZE, GFP_KERNEL);
1636 if (!debug->buffer)
1637 return 0;
1638 pbuffer = debug->buffer;
1639
1640 if (*ppos)
1641 return 0;
1642
1643 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_RD) {
1644 bar_num = idiag.cmd.data[IDIAG_BARACC_BAR_NUM_INDX];
1645 offset = idiag.cmd.data[IDIAG_BARACC_OFF_SET_INDX];
1646 acc_range = idiag.cmd.data[IDIAG_BARACC_ACC_MOD_INDX];
1647 bar_size = idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX];
1648 } else
1649 return 0;
1650
1651 if (acc_range == 0)
1652 return 0;
1653
1654 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1655 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
1656 if (bar_num == IDIAG_BARACC_BAR_0)
1657 mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
1658 else if (bar_num == IDIAG_BARACC_BAR_1)
1659 mem_mapped_bar = phba->sli4_hba.ctrl_regs_memmap_p;
1660 else if (bar_num == IDIAG_BARACC_BAR_2)
1661 mem_mapped_bar = phba->sli4_hba.drbl_regs_memmap_p;
1662 else
1663 return 0;
1664 } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
1665 if (bar_num == IDIAG_BARACC_BAR_0)
1666 mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
1667 else
1668 return 0;
1669 } else
1670 return 0;
1671
1672 /* Read single PCI bar space register */
1673 if (acc_range == SINGLE_WORD) {
1674 offset_run = offset;
1675 u32val = readl(mem_mapped_bar + offset_run);
1676 len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
1677 "%05x: %08x\n", offset_run, u32val);
1678 } else
1679 goto baracc_browse;
1680
1681 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
1682
1683baracc_browse:
1684
1685 /* Browse all PCI bar space registers */
1686 offset_label = idiag.offset.last_rd;
1687 offset_run = offset_label;
1688
1689 /* Read PCI bar memory mapped space */
1690 len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
1691 "%05x: ", offset_label);
1692 index = LPFC_PCI_BAR_RD_SIZE;
1693 while (index > 0) {
1694 u32val = readl(mem_mapped_bar + offset_run);
1695 len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
1696 "%08x ", u32val);
1697 offset_run += sizeof(uint32_t);
1698 if (acc_range == LPFC_PCI_BAR_BROWSE) {
1699 if (offset_run >= bar_size) {
1700 len += snprintf(pbuffer+len,
1701 LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
1702 break;
1703 }
1704 } else {
1705 if (offset_run >= offset +
1706 (acc_range * sizeof(uint32_t))) {
1707 len += snprintf(pbuffer+len,
1708 LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
1709 break;
1710 }
1711 }
1712 index -= sizeof(uint32_t);
1713 if (!index)
1714 len += snprintf(pbuffer+len,
1715 LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
1716 else if (!(index % (8 * sizeof(uint32_t)))) {
1717 offset_label += (8 * sizeof(uint32_t));
1718 len += snprintf(pbuffer+len,
1719 LPFC_PCI_BAR_RD_BUF_SIZE-len,
1720 "\n%05x: ", offset_label);
1721 }
1722 }
1723
1724 /* Set up the offset for next portion of pci bar read */
1725 if (index == 0) {
1726 idiag.offset.last_rd += LPFC_PCI_BAR_RD_SIZE;
1727 if (acc_range == LPFC_PCI_BAR_BROWSE) {
1728 if (idiag.offset.last_rd >= bar_size)
1729 idiag.offset.last_rd = 0;
1730 } else {
1731 if (offset_run >= offset +
1732 (acc_range * sizeof(uint32_t)))
1733 idiag.offset.last_rd = offset;
1734 }
1735 } else {
1736 if (acc_range == LPFC_PCI_BAR_BROWSE)
1737 idiag.offset.last_rd = 0;
1738 else
1739 idiag.offset.last_rd = offset;
1740 }
1741
1742 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
1743}
1744
1745/**
1746 * lpfc_idiag_baracc_write - Syntax check and set up idiag bar access commands
1747 * @file: The file pointer to read from.
1748 * @buf: The buffer to copy the user data from.
1749 * @nbytes: The number of bytes to get.
1750 * @ppos: The position in the file to start reading from.
1751 *
1752 * This routine get the debugfs idiag command struct from user space and
1753 * then perform the syntax check for PCI bar memory mapped space read or
1754 * write command accordingly. In the case of PCI bar memory mapped space
1755 * read command, it sets up the command in the idiag command struct for
1756 * the debugfs read operation. In the case of PCI bar memorpy mapped space
1757 * write operation, it executes the write operation into the PCI bar memory
1758 * mapped space accordingly.
1759 *
1760 * It returns the @nbytges passing in from debugfs user space when successful.
1761 * In case of error conditions, it returns proper error code back to the user
1762 * space.
1763 */
1764static ssize_t
1765lpfc_idiag_baracc_write(struct file *file, const char __user *buf,
1766 size_t nbytes, loff_t *ppos)
1767{
1768 struct lpfc_debug *debug = file->private_data;
1769 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
1770 uint32_t bar_num, bar_size, offset, value, acc_range;
1771 struct pci_dev *pdev;
1772 void __iomem *mem_mapped_bar;
1773 uint32_t if_type;
1774 uint32_t u32val;
1775 int rc;
1776
1777 pdev = phba->pcidev;
1778 if (!pdev)
1779 return -EFAULT;
1780
1781 /* This is a user write operation */
1782 debug->op = LPFC_IDIAG_OP_WR;
1783
1784 rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
1785 if (rc < 0)
1786 return rc;
1787
1788 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1789 bar_num = idiag.cmd.data[IDIAG_BARACC_BAR_NUM_INDX];
1790
1791 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
1792 if ((bar_num != IDIAG_BARACC_BAR_0) &&
1793 (bar_num != IDIAG_BARACC_BAR_1) &&
1794 (bar_num != IDIAG_BARACC_BAR_2))
1795 goto error_out;
1796 } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
1797 if (bar_num != IDIAG_BARACC_BAR_0)
1798 goto error_out;
1799 } else
1800 goto error_out;
1801
1802 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
1803 if (bar_num == IDIAG_BARACC_BAR_0) {
1804 idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
1805 LPFC_PCI_IF0_BAR0_SIZE;
1806 mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
1807 } else if (bar_num == IDIAG_BARACC_BAR_1) {
1808 idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
1809 LPFC_PCI_IF0_BAR1_SIZE;
1810 mem_mapped_bar = phba->sli4_hba.ctrl_regs_memmap_p;
1811 } else if (bar_num == IDIAG_BARACC_BAR_2) {
1812 idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
1813 LPFC_PCI_IF0_BAR2_SIZE;
1814 mem_mapped_bar = phba->sli4_hba.drbl_regs_memmap_p;
1815 } else
1816 goto error_out;
1817 } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
1818 if (bar_num == IDIAG_BARACC_BAR_0) {
1819 idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
1820 LPFC_PCI_IF2_BAR0_SIZE;
1821 mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
1822 } else
1823 goto error_out;
1824 } else
1825 goto error_out;
1826
1827 offset = idiag.cmd.data[IDIAG_BARACC_OFF_SET_INDX];
1828 if (offset % sizeof(uint32_t))
1829 goto error_out;
1830
1831 bar_size = idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX];
1832 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_RD) {
1833 /* Sanity check on PCI config read command line arguments */
1834 if (rc != LPFC_PCI_BAR_RD_CMD_ARG)
1835 goto error_out;
1836 acc_range = idiag.cmd.data[IDIAG_BARACC_ACC_MOD_INDX];
1837 if (acc_range == LPFC_PCI_BAR_BROWSE) {
1838 if (offset > bar_size - sizeof(uint32_t))
1839 goto error_out;
1840 /* Starting offset to browse */
1841 idiag.offset.last_rd = offset;
1842 } else if (acc_range > SINGLE_WORD) {
1843 if (offset + acc_range * sizeof(uint32_t) > bar_size)
1844 goto error_out;
1845 /* Starting offset to browse */
1846 idiag.offset.last_rd = offset;
1847 } else if (acc_range != SINGLE_WORD)
1848 goto error_out;
1849 } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_WR ||
1850 idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_ST ||
1851 idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_CL) {
1852 /* Sanity check on PCI bar write command line arguments */
1853 if (rc != LPFC_PCI_BAR_WR_CMD_ARG)
1854 goto error_out;
1855 /* Write command to PCI bar space, read-modify-write */
1856 acc_range = SINGLE_WORD;
1857 value = idiag.cmd.data[IDIAG_BARACC_REG_VAL_INDX];
1858 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_WR) {
1859 writel(value, mem_mapped_bar + offset);
1860 readl(mem_mapped_bar + offset);
1861 }
1862 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_ST) {
1863 u32val = readl(mem_mapped_bar + offset);
1864 u32val |= value;
1865 writel(u32val, mem_mapped_bar + offset);
1866 readl(mem_mapped_bar + offset);
1867 }
1868 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_CL) {
1869 u32val = readl(mem_mapped_bar + offset);
1870 u32val &= ~value;
1871 writel(u32val, mem_mapped_bar + offset);
1872 readl(mem_mapped_bar + offset);
1873 }
1874 } else
1875 /* All other opecodes are illegal for now */
1876 goto error_out;
1877
1878 return nbytes;
1879error_out:
1880 memset(&idiag, 0, sizeof(idiag));
1881 return -EINVAL;
1882}
1883
1884/**
1573 * lpfc_idiag_queinfo_read - idiag debugfs read queue information 1885 * lpfc_idiag_queinfo_read - idiag debugfs read queue information
1574 * @file: The file pointer to read from. 1886 * @file: The file pointer to read from.
1575 * @buf: The buffer to copy the data to. 1887 * @buf: The buffer to copy the data to.
@@ -1871,8 +2183,8 @@ lpfc_idiag_queacc_read(struct file *file, char __user *buf, size_t nbytes,
1871 return 0; 2183 return 0;
1872 2184
1873 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) { 2185 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) {
1874 index = idiag.cmd.data[2]; 2186 index = idiag.cmd.data[IDIAG_QUEACC_INDEX_INDX];
1875 count = idiag.cmd.data[3]; 2187 count = idiag.cmd.data[IDIAG_QUEACC_COUNT_INDX];
1876 pque = (struct lpfc_queue *)idiag.ptr_private; 2188 pque = (struct lpfc_queue *)idiag.ptr_private;
1877 } else 2189 } else
1878 return 0; 2190 return 0;
@@ -1944,12 +2256,12 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
1944 return rc; 2256 return rc;
1945 2257
1946 /* Get and sanity check on command feilds */ 2258 /* Get and sanity check on command feilds */
1947 quetp = idiag.cmd.data[0]; 2259 quetp = idiag.cmd.data[IDIAG_QUEACC_QUETP_INDX];
1948 queid = idiag.cmd.data[1]; 2260 queid = idiag.cmd.data[IDIAG_QUEACC_QUEID_INDX];
1949 index = idiag.cmd.data[2]; 2261 index = idiag.cmd.data[IDIAG_QUEACC_INDEX_INDX];
1950 count = idiag.cmd.data[3]; 2262 count = idiag.cmd.data[IDIAG_QUEACC_COUNT_INDX];
1951 offset = idiag.cmd.data[4]; 2263 offset = idiag.cmd.data[IDIAG_QUEACC_OFFST_INDX];
1952 value = idiag.cmd.data[5]; 2264 value = idiag.cmd.data[IDIAG_QUEACC_VALUE_INDX];
1953 2265
1954 /* Sanity check on command line arguments */ 2266 /* Sanity check on command line arguments */
1955 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR || 2267 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR ||
@@ -2218,7 +2530,7 @@ lpfc_idiag_drbacc_read(struct file *file, char __user *buf, size_t nbytes,
2218 return 0; 2530 return 0;
2219 2531
2220 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD) 2532 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD)
2221 drb_reg_id = idiag.cmd.data[0]; 2533 drb_reg_id = idiag.cmd.data[IDIAG_DRBACC_REGID_INDX];
2222 else 2534 else
2223 return 0; 2535 return 0;
2224 2536
@@ -2257,7 +2569,7 @@ lpfc_idiag_drbacc_write(struct file *file, const char __user *buf,
2257{ 2569{
2258 struct lpfc_debug *debug = file->private_data; 2570 struct lpfc_debug *debug = file->private_data;
2259 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; 2571 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
2260 uint32_t drb_reg_id, value, reg_val; 2572 uint32_t drb_reg_id, value, reg_val = 0;
2261 void __iomem *drb_reg; 2573 void __iomem *drb_reg;
2262 int rc; 2574 int rc;
2263 2575
@@ -2269,8 +2581,8 @@ lpfc_idiag_drbacc_write(struct file *file, const char __user *buf,
2269 return rc; 2581 return rc;
2270 2582
2271 /* Sanity check on command line arguments */ 2583 /* Sanity check on command line arguments */
2272 drb_reg_id = idiag.cmd.data[0]; 2584 drb_reg_id = idiag.cmd.data[IDIAG_DRBACC_REGID_INDX];
2273 value = idiag.cmd.data[1]; 2585 value = idiag.cmd.data[IDIAG_DRBACC_VALUE_INDX];
2274 2586
2275 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR || 2587 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR ||
2276 idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST || 2588 idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST ||
@@ -2330,6 +2642,679 @@ error_out:
2330 return -EINVAL; 2642 return -EINVAL;
2331} 2643}
2332 2644
2645/**
2646 * lpfc_idiag_ctlacc_read_reg - idiag debugfs read a control registers
2647 * @phba: The pointer to hba structure.
2648 * @pbuffer: The pointer to the buffer to copy the data to.
2649 * @len: The lenght of bytes to copied.
2650 * @drbregid: The id to doorbell registers.
2651 *
2652 * Description:
2653 * This routine reads a control register and copies its content to the
2654 * user buffer pointed to by @pbuffer.
2655 *
2656 * Returns:
2657 * This function returns the amount of data that was copied into @pbuffer.
2658 **/
2659static int
2660lpfc_idiag_ctlacc_read_reg(struct lpfc_hba *phba, char *pbuffer,
2661 int len, uint32_t ctlregid)
2662{
2663
2664 if (!pbuffer)
2665 return 0;
2666
2667 switch (ctlregid) {
2668 case LPFC_CTL_PORT_SEM:
2669 len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
2670 "Port SemReg: 0x%08x\n",
2671 readl(phba->sli4_hba.conf_regs_memmap_p +
2672 LPFC_CTL_PORT_SEM_OFFSET));
2673 break;
2674 case LPFC_CTL_PORT_STA:
2675 len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
2676 "Port StaReg: 0x%08x\n",
2677 readl(phba->sli4_hba.conf_regs_memmap_p +
2678 LPFC_CTL_PORT_STA_OFFSET));
2679 break;
2680 case LPFC_CTL_PORT_CTL:
2681 len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
2682 "Port CtlReg: 0x%08x\n",
2683 readl(phba->sli4_hba.conf_regs_memmap_p +
2684 LPFC_CTL_PORT_CTL_OFFSET));
2685 break;
2686 case LPFC_CTL_PORT_ER1:
2687 len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
2688 "Port Er1Reg: 0x%08x\n",
2689 readl(phba->sli4_hba.conf_regs_memmap_p +
2690 LPFC_CTL_PORT_ER1_OFFSET));
2691 break;
2692 case LPFC_CTL_PORT_ER2:
2693 len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
2694 "Port Er2Reg: 0x%08x\n",
2695 readl(phba->sli4_hba.conf_regs_memmap_p +
2696 LPFC_CTL_PORT_ER2_OFFSET));
2697 break;
2698 case LPFC_CTL_PDEV_CTL:
2699 len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
2700 "PDev CtlReg: 0x%08x\n",
2701 readl(phba->sli4_hba.conf_regs_memmap_p +
2702 LPFC_CTL_PDEV_CTL_OFFSET));
2703 break;
2704 default:
2705 break;
2706 }
2707 return len;
2708}
2709
2710/**
2711 * lpfc_idiag_ctlacc_read - idiag debugfs read port and device control register
2712 * @file: The file pointer to read from.
2713 * @buf: The buffer to copy the data to.
2714 * @nbytes: The number of bytes to read.
2715 * @ppos: The position in the file to start reading from.
2716 *
2717 * Description:
2718 * This routine reads data from the @phba port and device registers according
2719 * to the idiag command, and copies to user @buf.
2720 *
2721 * Returns:
2722 * This function returns the amount of data that was read (this could be less
2723 * than @nbytes if the end of the file was reached) or a negative error value.
2724 **/
2725static ssize_t
2726lpfc_idiag_ctlacc_read(struct file *file, char __user *buf, size_t nbytes,
2727 loff_t *ppos)
2728{
2729 struct lpfc_debug *debug = file->private_data;
2730 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
2731 uint32_t ctl_reg_id, i;
2732 char *pbuffer;
2733 int len = 0;
2734
2735 /* This is a user read operation */
2736 debug->op = LPFC_IDIAG_OP_RD;
2737
2738 if (!debug->buffer)
2739 debug->buffer = kmalloc(LPFC_CTL_ACC_BUF_SIZE, GFP_KERNEL);
2740 if (!debug->buffer)
2741 return 0;
2742 pbuffer = debug->buffer;
2743
2744 if (*ppos)
2745 return 0;
2746
2747 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_RD)
2748 ctl_reg_id = idiag.cmd.data[IDIAG_CTLACC_REGID_INDX];
2749 else
2750 return 0;
2751
2752 if (ctl_reg_id == LPFC_CTL_ACC_ALL)
2753 for (i = 1; i <= LPFC_CTL_MAX; i++)
2754 len = lpfc_idiag_ctlacc_read_reg(phba,
2755 pbuffer, len, i);
2756 else
2757 len = lpfc_idiag_ctlacc_read_reg(phba,
2758 pbuffer, len, ctl_reg_id);
2759
2760 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
2761}
2762
2763/**
2764 * lpfc_idiag_ctlacc_write - Syntax check and set up idiag ctlacc commands
2765 * @file: The file pointer to read from.
2766 * @buf: The buffer to copy the user data from.
2767 * @nbytes: The number of bytes to get.
2768 * @ppos: The position in the file to start reading from.
2769 *
2770 * This routine get the debugfs idiag command struct from user space and then
2771 * perform the syntax check for port and device control register read (dump)
2772 * or write (set) command accordingly.
2773 *
2774 * It returns the @nbytges passing in from debugfs user space when successful.
2775 * In case of error conditions, it returns proper error code back to the user
2776 * space.
2777 **/
2778static ssize_t
2779lpfc_idiag_ctlacc_write(struct file *file, const char __user *buf,
2780 size_t nbytes, loff_t *ppos)
2781{
2782 struct lpfc_debug *debug = file->private_data;
2783 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
2784 uint32_t ctl_reg_id, value, reg_val = 0;
2785 void __iomem *ctl_reg;
2786 int rc;
2787
2788 /* This is a user write operation */
2789 debug->op = LPFC_IDIAG_OP_WR;
2790
2791 rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
2792 if (rc < 0)
2793 return rc;
2794
2795 /* Sanity check on command line arguments */
2796 ctl_reg_id = idiag.cmd.data[IDIAG_CTLACC_REGID_INDX];
2797 value = idiag.cmd.data[IDIAG_CTLACC_VALUE_INDX];
2798
2799 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR ||
2800 idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST ||
2801 idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) {
2802 if (rc != LPFC_CTL_ACC_WR_CMD_ARG)
2803 goto error_out;
2804 if (ctl_reg_id > LPFC_CTL_MAX)
2805 goto error_out;
2806 } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_RD) {
2807 if (rc != LPFC_CTL_ACC_RD_CMD_ARG)
2808 goto error_out;
2809 if ((ctl_reg_id > LPFC_CTL_MAX) &&
2810 (ctl_reg_id != LPFC_CTL_ACC_ALL))
2811 goto error_out;
2812 } else
2813 goto error_out;
2814
2815 /* Perform the write access operation */
2816 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR ||
2817 idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST ||
2818 idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) {
2819 switch (ctl_reg_id) {
2820 case LPFC_CTL_PORT_SEM:
2821 ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
2822 LPFC_CTL_PORT_SEM_OFFSET;
2823 break;
2824 case LPFC_CTL_PORT_STA:
2825 ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
2826 LPFC_CTL_PORT_STA_OFFSET;
2827 break;
2828 case LPFC_CTL_PORT_CTL:
2829 ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
2830 LPFC_CTL_PORT_CTL_OFFSET;
2831 break;
2832 case LPFC_CTL_PORT_ER1:
2833 ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
2834 LPFC_CTL_PORT_ER1_OFFSET;
2835 break;
2836 case LPFC_CTL_PORT_ER2:
2837 ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
2838 LPFC_CTL_PORT_ER2_OFFSET;
2839 break;
2840 case LPFC_CTL_PDEV_CTL:
2841 ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
2842 LPFC_CTL_PDEV_CTL_OFFSET;
2843 break;
2844 default:
2845 goto error_out;
2846 }
2847
2848 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR)
2849 reg_val = value;
2850 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST) {
2851 reg_val = readl(ctl_reg);
2852 reg_val |= value;
2853 }
2854 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) {
2855 reg_val = readl(ctl_reg);
2856 reg_val &= ~value;
2857 }
2858 writel(reg_val, ctl_reg);
2859 readl(ctl_reg); /* flush */
2860 }
2861 return nbytes;
2862
2863error_out:
2864 /* Clean out command structure on command error out */
2865 memset(&idiag, 0, sizeof(idiag));
2866 return -EINVAL;
2867}
2868
2869/**
2870 * lpfc_idiag_mbxacc_get_setup - idiag debugfs get mailbox access setup
2871 * @phba: Pointer to HBA context object.
2872 * @pbuffer: Pointer to data buffer.
2873 *
2874 * Description:
2875 * This routine gets the driver mailbox access debugfs setup information.
2876 *
2877 * Returns:
2878 * This function returns the amount of data that was read (this could be less
2879 * than @nbytes if the end of the file was reached) or a negative error value.
2880 **/
2881static int
2882lpfc_idiag_mbxacc_get_setup(struct lpfc_hba *phba, char *pbuffer)
2883{
2884 uint32_t mbx_dump_map, mbx_dump_cnt, mbx_word_cnt, mbx_mbox_cmd;
2885 int len = 0;
2886
2887 mbx_mbox_cmd = idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
2888 mbx_dump_map = idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
2889 mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
2890 mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
2891
2892 len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
2893 "mbx_dump_map: 0x%08x\n", mbx_dump_map);
2894 len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
2895 "mbx_dump_cnt: %04d\n", mbx_dump_cnt);
2896 len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
2897 "mbx_word_cnt: %04d\n", mbx_word_cnt);
2898 len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
2899 "mbx_mbox_cmd: 0x%02x\n", mbx_mbox_cmd);
2900
2901 return len;
2902}
2903
2904/**
2905 * lpfc_idiag_mbxacc_read - idiag debugfs read on mailbox access
2906 * @file: The file pointer to read from.
2907 * @buf: The buffer to copy the data to.
2908 * @nbytes: The number of bytes to read.
2909 * @ppos: The position in the file to start reading from.
2910 *
2911 * Description:
2912 * This routine reads data from the @phba driver mailbox access debugfs setup
2913 * information.
2914 *
2915 * Returns:
2916 * This function returns the amount of data that was read (this could be less
2917 * than @nbytes if the end of the file was reached) or a negative error value.
2918 **/
2919static ssize_t
2920lpfc_idiag_mbxacc_read(struct file *file, char __user *buf, size_t nbytes,
2921 loff_t *ppos)
2922{
2923 struct lpfc_debug *debug = file->private_data;
2924 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
2925 char *pbuffer;
2926 int len = 0;
2927
2928 /* This is a user read operation */
2929 debug->op = LPFC_IDIAG_OP_RD;
2930
2931 if (!debug->buffer)
2932 debug->buffer = kmalloc(LPFC_MBX_ACC_BUF_SIZE, GFP_KERNEL);
2933 if (!debug->buffer)
2934 return 0;
2935 pbuffer = debug->buffer;
2936
2937 if (*ppos)
2938 return 0;
2939
2940 if ((idiag.cmd.opcode != LPFC_IDIAG_CMD_MBXACC_DP) &&
2941 (idiag.cmd.opcode != LPFC_IDIAG_BSG_MBXACC_DP))
2942 return 0;
2943
2944 len = lpfc_idiag_mbxacc_get_setup(phba, pbuffer);
2945
2946 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
2947}
2948
2949/**
2950 * lpfc_idiag_mbxacc_write - Syntax check and set up idiag mbxacc commands
2951 * @file: The file pointer to read from.
2952 * @buf: The buffer to copy the user data from.
2953 * @nbytes: The number of bytes to get.
2954 * @ppos: The position in the file to start reading from.
2955 *
2956 * This routine get the debugfs idiag command struct from user space and then
2957 * perform the syntax check for driver mailbox command (dump) and sets up the
2958 * necessary states in the idiag command struct accordingly.
2959 *
2960 * It returns the @nbytges passing in from debugfs user space when successful.
2961 * In case of error conditions, it returns proper error code back to the user
2962 * space.
2963 **/
2964static ssize_t
2965lpfc_idiag_mbxacc_write(struct file *file, const char __user *buf,
2966 size_t nbytes, loff_t *ppos)
2967{
2968 struct lpfc_debug *debug = file->private_data;
2969 uint32_t mbx_dump_map, mbx_dump_cnt, mbx_word_cnt, mbx_mbox_cmd;
2970 int rc;
2971
2972 /* This is a user write operation */
2973 debug->op = LPFC_IDIAG_OP_WR;
2974
2975 rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
2976 if (rc < 0)
2977 return rc;
2978
2979 /* Sanity check on command line arguments */
2980 mbx_mbox_cmd = idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
2981 mbx_dump_map = idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
2982 mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
2983 mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
2984
2985 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_MBXACC_DP) {
2986 if (!(mbx_dump_map & LPFC_MBX_DMP_MBX_ALL))
2987 goto error_out;
2988 if ((mbx_dump_map & ~LPFC_MBX_DMP_MBX_ALL) &&
2989 (mbx_dump_map != LPFC_MBX_DMP_ALL))
2990 goto error_out;
2991 if (mbx_word_cnt > sizeof(MAILBOX_t))
2992 goto error_out;
2993 } else if (idiag.cmd.opcode == LPFC_IDIAG_BSG_MBXACC_DP) {
2994 if (!(mbx_dump_map & LPFC_BSG_DMP_MBX_ALL))
2995 goto error_out;
2996 if ((mbx_dump_map & ~LPFC_BSG_DMP_MBX_ALL) &&
2997 (mbx_dump_map != LPFC_MBX_DMP_ALL))
2998 goto error_out;
2999 if (mbx_word_cnt > (BSG_MBOX_SIZE)/4)
3000 goto error_out;
3001 if (mbx_mbox_cmd != 0x9b)
3002 goto error_out;
3003 } else
3004 goto error_out;
3005
3006 if (mbx_word_cnt == 0)
3007 goto error_out;
3008 if (rc != LPFC_MBX_DMP_ARG)
3009 goto error_out;
3010 if (mbx_mbox_cmd & ~0xff)
3011 goto error_out;
3012
3013 /* condition for stop mailbox dump */
3014 if (mbx_dump_cnt == 0)
3015 goto reset_out;
3016
3017 return nbytes;
3018
3019reset_out:
3020 /* Clean out command structure on command error out */
3021 memset(&idiag, 0, sizeof(idiag));
3022 return nbytes;
3023
3024error_out:
3025 /* Clean out command structure on command error out */
3026 memset(&idiag, 0, sizeof(idiag));
3027 return -EINVAL;
3028}
3029
3030/**
3031 * lpfc_idiag_extacc_avail_get - get the available extents information
3032 * @phba: pointer to lpfc hba data structure.
3033 * @pbuffer: pointer to internal buffer.
3034 * @len: length into the internal buffer data has been copied.
3035 *
3036 * Description:
3037 * This routine is to get the available extent information.
3038 *
3039 * Returns:
3040 * overall lenth of the data read into the internal buffer.
3041 **/
3042static int
3043lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len)
3044{
3045 uint16_t ext_cnt, ext_size;
3046
3047 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3048 "\nAvailable Extents Information:\n");
3049
3050 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3051 "\tPort Available VPI extents: ");
3052 lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VPI,
3053 &ext_cnt, &ext_size);
3054 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3055 "Count %3d, Size %3d\n", ext_cnt, ext_size);
3056
3057 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3058 "\tPort Available VFI extents: ");
3059 lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VFI,
3060 &ext_cnt, &ext_size);
3061 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3062 "Count %3d, Size %3d\n", ext_cnt, ext_size);
3063
3064 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3065 "\tPort Available RPI extents: ");
3066 lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_RPI,
3067 &ext_cnt, &ext_size);
3068 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3069 "Count %3d, Size %3d\n", ext_cnt, ext_size);
3070
3071 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3072 "\tPort Available XRI extents: ");
3073 lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_XRI,
3074 &ext_cnt, &ext_size);
3075 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3076 "Count %3d, Size %3d\n", ext_cnt, ext_size);
3077
3078 return len;
3079}
3080
3081/**
3082 * lpfc_idiag_extacc_alloc_get - get the allocated extents information
3083 * @phba: pointer to lpfc hba data structure.
3084 * @pbuffer: pointer to internal buffer.
3085 * @len: length into the internal buffer data has been copied.
3086 *
3087 * Description:
3088 * This routine is to get the allocated extent information.
3089 *
3090 * Returns:
3091 * overall lenth of the data read into the internal buffer.
3092 **/
3093static int
3094lpfc_idiag_extacc_alloc_get(struct lpfc_hba *phba, char *pbuffer, int len)
3095{
3096 uint16_t ext_cnt, ext_size;
3097 int rc;
3098
3099 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3100 "\nAllocated Extents Information:\n");
3101
3102 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3103 "\tHost Allocated VPI extents: ");
3104 rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VPI,
3105 &ext_cnt, &ext_size);
3106 if (!rc)
3107 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3108 "Port %d Extent %3d, Size %3d\n",
3109 phba->brd_no, ext_cnt, ext_size);
3110 else
3111 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3112 "N/A\n");
3113
3114 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3115 "\tHost Allocated VFI extents: ");
3116 rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VFI,
3117 &ext_cnt, &ext_size);
3118 if (!rc)
3119 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3120 "Port %d Extent %3d, Size %3d\n",
3121 phba->brd_no, ext_cnt, ext_size);
3122 else
3123 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3124 "N/A\n");
3125
3126 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3127 "\tHost Allocated RPI extents: ");
3128 rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_RPI,
3129 &ext_cnt, &ext_size);
3130 if (!rc)
3131 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3132 "Port %d Extent %3d, Size %3d\n",
3133 phba->brd_no, ext_cnt, ext_size);
3134 else
3135 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3136 "N/A\n");
3137
3138 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3139 "\tHost Allocated XRI extents: ");
3140 rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_XRI,
3141 &ext_cnt, &ext_size);
3142 if (!rc)
3143 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3144 "Port %d Extent %3d, Size %3d\n",
3145 phba->brd_no, ext_cnt, ext_size);
3146 else
3147 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3148 "N/A\n");
3149
3150 return len;
3151}
3152
3153/**
3154 * lpfc_idiag_extacc_drivr_get - get driver extent information
3155 * @phba: pointer to lpfc hba data structure.
3156 * @pbuffer: pointer to internal buffer.
3157 * @len: length into the internal buffer data has been copied.
3158 *
3159 * Description:
3160 * This routine is to get the driver extent information.
3161 *
3162 * Returns:
3163 * overall lenth of the data read into the internal buffer.
3164 **/
3165static int
3166lpfc_idiag_extacc_drivr_get(struct lpfc_hba *phba, char *pbuffer, int len)
3167{
3168 struct lpfc_rsrc_blks *rsrc_blks;
3169 int index;
3170
3171 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3172 "\nDriver Extents Information:\n");
3173
3174 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3175 "\tVPI extents:\n");
3176 index = 0;
3177 list_for_each_entry(rsrc_blks, &phba->lpfc_vpi_blk_list, list) {
3178 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3179 "\t\tBlock %3d: Start %4d, Count %4d\n",
3180 index, rsrc_blks->rsrc_start,
3181 rsrc_blks->rsrc_size);
3182 index++;
3183 }
3184 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3185 "\tVFI extents:\n");
3186 index = 0;
3187 list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_vfi_blk_list,
3188 list) {
3189 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3190 "\t\tBlock %3d: Start %4d, Count %4d\n",
3191 index, rsrc_blks->rsrc_start,
3192 rsrc_blks->rsrc_size);
3193 index++;
3194 }
3195
3196 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3197 "\tRPI extents:\n");
3198 index = 0;
3199 list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_rpi_blk_list,
3200 list) {
3201 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3202 "\t\tBlock %3d: Start %4d, Count %4d\n",
3203 index, rsrc_blks->rsrc_start,
3204 rsrc_blks->rsrc_size);
3205 index++;
3206 }
3207
3208 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3209 "\tXRI extents:\n");
3210 index = 0;
3211 list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_xri_blk_list,
3212 list) {
3213 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3214 "\t\tBlock %3d: Start %4d, Count %4d\n",
3215 index, rsrc_blks->rsrc_start,
3216 rsrc_blks->rsrc_size);
3217 index++;
3218 }
3219
3220 return len;
3221}
3222
3223/**
3224 * lpfc_idiag_extacc_write - Syntax check and set up idiag extacc commands
3225 * @file: The file pointer to read from.
3226 * @buf: The buffer to copy the user data from.
3227 * @nbytes: The number of bytes to get.
3228 * @ppos: The position in the file to start reading from.
3229 *
3230 * This routine get the debugfs idiag command struct from user space and then
3231 * perform the syntax check for extent information access commands and sets
3232 * up the necessary states in the idiag command struct accordingly.
3233 *
3234 * It returns the @nbytges passing in from debugfs user space when successful.
3235 * In case of error conditions, it returns proper error code back to the user
3236 * space.
3237 **/
3238static ssize_t
3239lpfc_idiag_extacc_write(struct file *file, const char __user *buf,
3240 size_t nbytes, loff_t *ppos)
3241{
3242 struct lpfc_debug *debug = file->private_data;
3243 uint32_t ext_map;
3244 int rc;
3245
3246 /* This is a user write operation */
3247 debug->op = LPFC_IDIAG_OP_WR;
3248
3249 rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
3250 if (rc < 0)
3251 return rc;
3252
3253 ext_map = idiag.cmd.data[IDIAG_EXTACC_EXMAP_INDX];
3254
3255 if (idiag.cmd.opcode != LPFC_IDIAG_CMD_EXTACC_RD)
3256 goto error_out;
3257 if (rc != LPFC_EXT_ACC_CMD_ARG)
3258 goto error_out;
3259 if (!(ext_map & LPFC_EXT_ACC_ALL))
3260 goto error_out;
3261
3262 return nbytes;
3263error_out:
3264 /* Clean out command structure on command error out */
3265 memset(&idiag, 0, sizeof(idiag));
3266 return -EINVAL;
3267}
3268
3269/**
3270 * lpfc_idiag_extacc_read - idiag debugfs read access to extent information
3271 * @file: The file pointer to read from.
3272 * @buf: The buffer to copy the data to.
3273 * @nbytes: The number of bytes to read.
3274 * @ppos: The position in the file to start reading from.
3275 *
3276 * Description:
3277 * This routine reads data from the proper extent information according to
3278 * the idiag command, and copies to user @buf.
3279 *
3280 * Returns:
3281 * This function returns the amount of data that was read (this could be less
3282 * than @nbytes if the end of the file was reached) or a negative error value.
3283 **/
3284static ssize_t
3285lpfc_idiag_extacc_read(struct file *file, char __user *buf, size_t nbytes,
3286 loff_t *ppos)
3287{
3288 struct lpfc_debug *debug = file->private_data;
3289 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
3290 char *pbuffer;
3291 uint32_t ext_map;
3292 int len = 0;
3293
3294 /* This is a user read operation */
3295 debug->op = LPFC_IDIAG_OP_RD;
3296
3297 if (!debug->buffer)
3298 debug->buffer = kmalloc(LPFC_EXT_ACC_BUF_SIZE, GFP_KERNEL);
3299 if (!debug->buffer)
3300 return 0;
3301 pbuffer = debug->buffer;
3302 if (*ppos)
3303 return 0;
3304 if (idiag.cmd.opcode != LPFC_IDIAG_CMD_EXTACC_RD)
3305 return 0;
3306
3307 ext_map = idiag.cmd.data[IDIAG_EXTACC_EXMAP_INDX];
3308 if (ext_map & LPFC_EXT_ACC_AVAIL)
3309 len = lpfc_idiag_extacc_avail_get(phba, pbuffer, len);
3310 if (ext_map & LPFC_EXT_ACC_ALLOC)
3311 len = lpfc_idiag_extacc_alloc_get(phba, pbuffer, len);
3312 if (ext_map & LPFC_EXT_ACC_DRIVR)
3313 len = lpfc_idiag_extacc_drivr_get(phba, pbuffer, len);
3314
3315 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
3316}
3317
2333#undef lpfc_debugfs_op_disc_trc 3318#undef lpfc_debugfs_op_disc_trc
2334static const struct file_operations lpfc_debugfs_op_disc_trc = { 3319static const struct file_operations lpfc_debugfs_op_disc_trc = {
2335 .owner = THIS_MODULE, 3320 .owner = THIS_MODULE,
@@ -2420,6 +3405,16 @@ static const struct file_operations lpfc_idiag_op_pciCfg = {
2420 .release = lpfc_idiag_cmd_release, 3405 .release = lpfc_idiag_cmd_release,
2421}; 3406};
2422 3407
3408#undef lpfc_idiag_op_barAcc
3409static const struct file_operations lpfc_idiag_op_barAcc = {
3410 .owner = THIS_MODULE,
3411 .open = lpfc_idiag_open,
3412 .llseek = lpfc_debugfs_lseek,
3413 .read = lpfc_idiag_baracc_read,
3414 .write = lpfc_idiag_baracc_write,
3415 .release = lpfc_idiag_cmd_release,
3416};
3417
2423#undef lpfc_idiag_op_queInfo 3418#undef lpfc_idiag_op_queInfo
2424static const struct file_operations lpfc_idiag_op_queInfo = { 3419static const struct file_operations lpfc_idiag_op_queInfo = {
2425 .owner = THIS_MODULE, 3420 .owner = THIS_MODULE,
@@ -2428,7 +3423,7 @@ static const struct file_operations lpfc_idiag_op_queInfo = {
2428 .release = lpfc_idiag_release, 3423 .release = lpfc_idiag_release,
2429}; 3424};
2430 3425
2431#undef lpfc_idiag_op_queacc 3426#undef lpfc_idiag_op_queAcc
2432static const struct file_operations lpfc_idiag_op_queAcc = { 3427static const struct file_operations lpfc_idiag_op_queAcc = {
2433 .owner = THIS_MODULE, 3428 .owner = THIS_MODULE,
2434 .open = lpfc_idiag_open, 3429 .open = lpfc_idiag_open,
@@ -2438,7 +3433,7 @@ static const struct file_operations lpfc_idiag_op_queAcc = {
2438 .release = lpfc_idiag_cmd_release, 3433 .release = lpfc_idiag_cmd_release,
2439}; 3434};
2440 3435
2441#undef lpfc_idiag_op_drbacc 3436#undef lpfc_idiag_op_drbAcc
2442static const struct file_operations lpfc_idiag_op_drbAcc = { 3437static const struct file_operations lpfc_idiag_op_drbAcc = {
2443 .owner = THIS_MODULE, 3438 .owner = THIS_MODULE,
2444 .open = lpfc_idiag_open, 3439 .open = lpfc_idiag_open,
@@ -2448,8 +3443,234 @@ static const struct file_operations lpfc_idiag_op_drbAcc = {
2448 .release = lpfc_idiag_cmd_release, 3443 .release = lpfc_idiag_cmd_release,
2449}; 3444};
2450 3445
3446#undef lpfc_idiag_op_ctlAcc
3447static const struct file_operations lpfc_idiag_op_ctlAcc = {
3448 .owner = THIS_MODULE,
3449 .open = lpfc_idiag_open,
3450 .llseek = lpfc_debugfs_lseek,
3451 .read = lpfc_idiag_ctlacc_read,
3452 .write = lpfc_idiag_ctlacc_write,
3453 .release = lpfc_idiag_cmd_release,
3454};
3455
3456#undef lpfc_idiag_op_mbxAcc
3457static const struct file_operations lpfc_idiag_op_mbxAcc = {
3458 .owner = THIS_MODULE,
3459 .open = lpfc_idiag_open,
3460 .llseek = lpfc_debugfs_lseek,
3461 .read = lpfc_idiag_mbxacc_read,
3462 .write = lpfc_idiag_mbxacc_write,
3463 .release = lpfc_idiag_cmd_release,
3464};
3465
3466#undef lpfc_idiag_op_extAcc
3467static const struct file_operations lpfc_idiag_op_extAcc = {
3468 .owner = THIS_MODULE,
3469 .open = lpfc_idiag_open,
3470 .llseek = lpfc_debugfs_lseek,
3471 .read = lpfc_idiag_extacc_read,
3472 .write = lpfc_idiag_extacc_write,
3473 .release = lpfc_idiag_cmd_release,
3474};
3475
2451#endif 3476#endif
2452 3477
3478/* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command
3479 * @phba: Pointer to HBA context object.
3480 * @dmabuf: Pointer to a DMA buffer descriptor.
3481 *
3482 * Description:
3483 * This routine dump a bsg pass-through non-embedded mailbox command with
3484 * external buffer.
3485 **/
3486void
3487lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3488 enum mbox_type mbox_tp, enum dma_type dma_tp,
3489 enum sta_type sta_tp,
3490 struct lpfc_dmabuf *dmabuf, uint32_t ext_buf)
3491{
3492#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3493 uint32_t *mbx_mbox_cmd, *mbx_dump_map, *mbx_dump_cnt, *mbx_word_cnt;
3494 char line_buf[LPFC_MBX_ACC_LBUF_SZ];
3495 int len = 0;
3496 uint32_t do_dump = 0;
3497 uint32_t *pword;
3498 uint32_t i;
3499
3500 if (idiag.cmd.opcode != LPFC_IDIAG_BSG_MBXACC_DP)
3501 return;
3502
3503 mbx_mbox_cmd = &idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
3504 mbx_dump_map = &idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
3505 mbx_dump_cnt = &idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
3506 mbx_word_cnt = &idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
3507
3508 if (!(*mbx_dump_map & LPFC_MBX_DMP_ALL) ||
3509 (*mbx_dump_cnt == 0) ||
3510 (*mbx_word_cnt == 0))
3511 return;
3512
3513 if (*mbx_mbox_cmd != 0x9B)
3514 return;
3515
3516 if ((mbox_tp == mbox_rd) && (dma_tp == dma_mbox)) {
3517 if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_MBX) {
3518 do_dump |= LPFC_BSG_DMP_MBX_RD_MBX;
3519 printk(KERN_ERR "\nRead mbox command (x%x), "
3520 "nemb:0x%x, extbuf_cnt:%d:\n",
3521 sta_tp, nemb_tp, ext_buf);
3522 }
3523 }
3524 if ((mbox_tp == mbox_rd) && (dma_tp == dma_ebuf)) {
3525 if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_BUF) {
3526 do_dump |= LPFC_BSG_DMP_MBX_RD_BUF;
3527 printk(KERN_ERR "\nRead mbox buffer (x%x), "
3528 "nemb:0x%x, extbuf_seq:%d:\n",
3529 sta_tp, nemb_tp, ext_buf);
3530 }
3531 }
3532 if ((mbox_tp == mbox_wr) && (dma_tp == dma_mbox)) {
3533 if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_MBX) {
3534 do_dump |= LPFC_BSG_DMP_MBX_WR_MBX;
3535 printk(KERN_ERR "\nWrite mbox command (x%x), "
3536 "nemb:0x%x, extbuf_cnt:%d:\n",
3537 sta_tp, nemb_tp, ext_buf);
3538 }
3539 }
3540 if ((mbox_tp == mbox_wr) && (dma_tp == dma_ebuf)) {
3541 if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_BUF) {
3542 do_dump |= LPFC_BSG_DMP_MBX_WR_BUF;
3543 printk(KERN_ERR "\nWrite mbox buffer (x%x), "
3544 "nemb:0x%x, extbuf_seq:%d:\n",
3545 sta_tp, nemb_tp, ext_buf);
3546 }
3547 }
3548
3549 /* dump buffer content */
3550 if (do_dump) {
3551 pword = (uint32_t *)dmabuf->virt;
3552 for (i = 0; i < *mbx_word_cnt; i++) {
3553 if (!(i % 8)) {
3554 if (i != 0)
3555 printk(KERN_ERR "%s\n", line_buf);
3556 len = 0;
3557 len += snprintf(line_buf+len,
3558 LPFC_MBX_ACC_LBUF_SZ-len,
3559 "%03d: ", i);
3560 }
3561 len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
3562 "%08x ", (uint32_t)*pword);
3563 pword++;
3564 }
3565 if ((i - 1) % 8)
3566 printk(KERN_ERR "%s\n", line_buf);
3567 (*mbx_dump_cnt)--;
3568 }
3569
3570 /* Clean out command structure on reaching dump count */
3571 if (*mbx_dump_cnt == 0)
3572 memset(&idiag, 0, sizeof(idiag));
3573 return;
3574#endif
3575}
3576
3577/* lpfc_idiag_mbxacc_dump_issue_mbox - idiag debugfs dump issue mailbox command
3578 * @phba: Pointer to HBA context object.
3579 * @dmabuf: Pointer to a DMA buffer descriptor.
3580 *
3581 * Description:
3582 * This routine dump a pass-through non-embedded mailbox command from issue
3583 * mailbox command.
3584 **/
3585void
3586lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox)
3587{
3588#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3589 uint32_t *mbx_dump_map, *mbx_dump_cnt, *mbx_word_cnt, *mbx_mbox_cmd;
3590 char line_buf[LPFC_MBX_ACC_LBUF_SZ];
3591 int len = 0;
3592 uint32_t *pword;
3593 uint8_t *pbyte;
3594 uint32_t i, j;
3595
3596 if (idiag.cmd.opcode != LPFC_IDIAG_CMD_MBXACC_DP)
3597 return;
3598
3599 mbx_mbox_cmd = &idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
3600 mbx_dump_map = &idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
3601 mbx_dump_cnt = &idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
3602 mbx_word_cnt = &idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
3603
3604 if (!(*mbx_dump_map & LPFC_MBX_DMP_MBX_ALL) ||
3605 (*mbx_dump_cnt == 0) ||
3606 (*mbx_word_cnt == 0))
3607 return;
3608
3609 if ((*mbx_mbox_cmd != LPFC_MBX_ALL_CMD) &&
3610 (*mbx_mbox_cmd != pmbox->mbxCommand))
3611 return;
3612
3613 /* dump buffer content */
3614 if (*mbx_dump_map & LPFC_MBX_DMP_MBX_WORD) {
3615 printk(KERN_ERR "Mailbox command:0x%x dump by word:\n",
3616 pmbox->mbxCommand);
3617 pword = (uint32_t *)pmbox;
3618 for (i = 0; i < *mbx_word_cnt; i++) {
3619 if (!(i % 8)) {
3620 if (i != 0)
3621 printk(KERN_ERR "%s\n", line_buf);
3622 len = 0;
3623 memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
3624 len += snprintf(line_buf+len,
3625 LPFC_MBX_ACC_LBUF_SZ-len,
3626 "%03d: ", i);
3627 }
3628 len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
3629 "%08x ",
3630 ((uint32_t)*pword) & 0xffffffff);
3631 pword++;
3632 }
3633 if ((i - 1) % 8)
3634 printk(KERN_ERR "%s\n", line_buf);
3635 printk(KERN_ERR "\n");
3636 }
3637 if (*mbx_dump_map & LPFC_MBX_DMP_MBX_BYTE) {
3638 printk(KERN_ERR "Mailbox command:0x%x dump by byte:\n",
3639 pmbox->mbxCommand);
3640 pbyte = (uint8_t *)pmbox;
3641 for (i = 0; i < *mbx_word_cnt; i++) {
3642 if (!(i % 8)) {
3643 if (i != 0)
3644 printk(KERN_ERR "%s\n", line_buf);
3645 len = 0;
3646 memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
3647 len += snprintf(line_buf+len,
3648 LPFC_MBX_ACC_LBUF_SZ-len,
3649 "%03d: ", i);
3650 }
3651 for (j = 0; j < 4; j++) {
3652 len += snprintf(line_buf+len,
3653 LPFC_MBX_ACC_LBUF_SZ-len,
3654 "%02x",
3655 ((uint8_t)*pbyte) & 0xff);
3656 pbyte++;
3657 }
3658 len += snprintf(line_buf+len,
3659 LPFC_MBX_ACC_LBUF_SZ-len, " ");
3660 }
3661 if ((i - 1) % 8)
3662 printk(KERN_ERR "%s\n", line_buf);
3663 printk(KERN_ERR "\n");
3664 }
3665 (*mbx_dump_cnt)--;
3666
3667 /* Clean out command structure on reaching dump count */
3668 if (*mbx_dump_cnt == 0)
3669 memset(&idiag, 0, sizeof(idiag));
3670 return;
3671#endif
3672}
3673
2453/** 3674/**
2454 * lpfc_debugfs_initialize - Initialize debugfs for a vport 3675 * lpfc_debugfs_initialize - Initialize debugfs for a vport
2455 * @vport: The vport pointer to initialize. 3676 * @vport: The vport pointer to initialize.
@@ -2673,7 +3894,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
2673 vport, &lpfc_debugfs_op_nodelist); 3894 vport, &lpfc_debugfs_op_nodelist);
2674 if (!vport->debug_nodelist) { 3895 if (!vport->debug_nodelist) {
2675 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3896 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2676 "0409 Can't create debugfs nodelist\n"); 3897 "2985 Can't create debugfs nodelist\n");
2677 goto debug_failed; 3898 goto debug_failed;
2678 } 3899 }
2679 3900
@@ -2710,6 +3931,20 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
2710 idiag.offset.last_rd = 0; 3931 idiag.offset.last_rd = 0;
2711 } 3932 }
2712 3933
3934 /* iDiag PCI BAR access */
3935 snprintf(name, sizeof(name), "barAcc");
3936 if (!phba->idiag_bar_acc) {
3937 phba->idiag_bar_acc =
3938 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
3939 phba->idiag_root, phba, &lpfc_idiag_op_barAcc);
3940 if (!phba->idiag_bar_acc) {
3941 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3942 "3056 Can't create idiag debugfs\n");
3943 goto debug_failed;
3944 }
3945 idiag.offset.last_rd = 0;
3946 }
3947
2713 /* iDiag get PCI function queue information */ 3948 /* iDiag get PCI function queue information */
2714 snprintf(name, sizeof(name), "queInfo"); 3949 snprintf(name, sizeof(name), "queInfo");
2715 if (!phba->idiag_que_info) { 3950 if (!phba->idiag_que_info) {
@@ -2749,6 +3984,50 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
2749 } 3984 }
2750 } 3985 }
2751 3986
3987 /* iDiag access PCI function control registers */
3988 snprintf(name, sizeof(name), "ctlAcc");
3989 if (!phba->idiag_ctl_acc) {
3990 phba->idiag_ctl_acc =
3991 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
3992 phba->idiag_root, phba, &lpfc_idiag_op_ctlAcc);
3993 if (!phba->idiag_ctl_acc) {
3994 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3995 "2981 Can't create idiag debugfs\n");
3996 goto debug_failed;
3997 }
3998 }
3999
4000 /* iDiag access mbox commands */
4001 snprintf(name, sizeof(name), "mbxAcc");
4002 if (!phba->idiag_mbx_acc) {
4003 phba->idiag_mbx_acc =
4004 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
4005 phba->idiag_root, phba, &lpfc_idiag_op_mbxAcc);
4006 if (!phba->idiag_mbx_acc) {
4007 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4008 "2980 Can't create idiag debugfs\n");
4009 goto debug_failed;
4010 }
4011 }
4012
4013 /* iDiag extents access commands */
4014 if (phba->sli4_hba.extents_in_use) {
4015 snprintf(name, sizeof(name), "extAcc");
4016 if (!phba->idiag_ext_acc) {
4017 phba->idiag_ext_acc =
4018 debugfs_create_file(name,
4019 S_IFREG|S_IRUGO|S_IWUSR,
4020 phba->idiag_root, phba,
4021 &lpfc_idiag_op_extAcc);
4022 if (!phba->idiag_ext_acc) {
4023 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4024 "2986 Cant create "
4025 "idiag debugfs\n");
4026 goto debug_failed;
4027 }
4028 }
4029 }
4030
2752debug_failed: 4031debug_failed:
2753 return; 4032 return;
2754#endif 4033#endif
@@ -2783,7 +4062,6 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
2783 debugfs_remove(vport->debug_nodelist); /* nodelist */ 4062 debugfs_remove(vport->debug_nodelist); /* nodelist */
2784 vport->debug_nodelist = NULL; 4063 vport->debug_nodelist = NULL;
2785 } 4064 }
2786
2787 if (vport->vport_debugfs_root) { 4065 if (vport->vport_debugfs_root) {
2788 debugfs_remove(vport->vport_debugfs_root); /* vportX */ 4066 debugfs_remove(vport->vport_debugfs_root); /* vportX */
2789 vport->vport_debugfs_root = NULL; 4067 vport->vport_debugfs_root = NULL;
@@ -2827,6 +4105,21 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
2827 * iDiag release 4105 * iDiag release
2828 */ 4106 */
2829 if (phba->sli_rev == LPFC_SLI_REV4) { 4107 if (phba->sli_rev == LPFC_SLI_REV4) {
4108 if (phba->idiag_ext_acc) {
4109 /* iDiag extAcc */
4110 debugfs_remove(phba->idiag_ext_acc);
4111 phba->idiag_ext_acc = NULL;
4112 }
4113 if (phba->idiag_mbx_acc) {
4114 /* iDiag mbxAcc */
4115 debugfs_remove(phba->idiag_mbx_acc);
4116 phba->idiag_mbx_acc = NULL;
4117 }
4118 if (phba->idiag_ctl_acc) {
4119 /* iDiag ctlAcc */
4120 debugfs_remove(phba->idiag_ctl_acc);
4121 phba->idiag_ctl_acc = NULL;
4122 }
2830 if (phba->idiag_drb_acc) { 4123 if (phba->idiag_drb_acc) {
2831 /* iDiag drbAcc */ 4124 /* iDiag drbAcc */
2832 debugfs_remove(phba->idiag_drb_acc); 4125 debugfs_remove(phba->idiag_drb_acc);
@@ -2842,6 +4135,11 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
2842 debugfs_remove(phba->idiag_que_info); 4135 debugfs_remove(phba->idiag_que_info);
2843 phba->idiag_que_info = NULL; 4136 phba->idiag_que_info = NULL;
2844 } 4137 }
4138 if (phba->idiag_bar_acc) {
4139 /* iDiag barAcc */
4140 debugfs_remove(phba->idiag_bar_acc);
4141 phba->idiag_bar_acc = NULL;
4142 }
2845 if (phba->idiag_pci_cfg) { 4143 if (phba->idiag_pci_cfg) {
2846 /* iDiag pciCfg */ 4144 /* iDiag pciCfg */
2847 debugfs_remove(phba->idiag_pci_cfg); 4145 debugfs_remove(phba->idiag_pci_cfg);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 6525a5e62d27..f83bd944edd8 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -39,14 +39,51 @@
39/* hbqinfo output buffer size */ 39/* hbqinfo output buffer size */
40#define LPFC_HBQINFO_SIZE 8192 40#define LPFC_HBQINFO_SIZE 8192
41 41
42/*
43 * For SLI4 iDiag debugfs diagnostics tool
44 */
45
42/* pciConf */ 46/* pciConf */
43#define LPFC_PCI_CFG_BROWSE 0xffff 47#define LPFC_PCI_CFG_BROWSE 0xffff
44#define LPFC_PCI_CFG_RD_CMD_ARG 2 48#define LPFC_PCI_CFG_RD_CMD_ARG 2
45#define LPFC_PCI_CFG_WR_CMD_ARG 3 49#define LPFC_PCI_CFG_WR_CMD_ARG 3
46#define LPFC_PCI_CFG_SIZE 4096 50#define LPFC_PCI_CFG_SIZE 4096
47#define LPFC_PCI_CFG_RD_BUF_SIZE (LPFC_PCI_CFG_SIZE/2)
48#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4) 51#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4)
49 52
53#define IDIAG_PCICFG_WHERE_INDX 0
54#define IDIAG_PCICFG_COUNT_INDX 1
55#define IDIAG_PCICFG_VALUE_INDX 2
56
57/* barAcc */
58#define LPFC_PCI_BAR_BROWSE 0xffff
59#define LPFC_PCI_BAR_RD_CMD_ARG 3
60#define LPFC_PCI_BAR_WR_CMD_ARG 3
61
62#define LPFC_PCI_IF0_BAR0_SIZE (1024 * 16)
63#define LPFC_PCI_IF0_BAR1_SIZE (1024 * 128)
64#define LPFC_PCI_IF0_BAR2_SIZE (1024 * 128)
65#define LPFC_PCI_IF2_BAR0_SIZE (1024 * 32)
66
67#define LPFC_PCI_BAR_RD_BUF_SIZE 4096
68#define LPFC_PCI_BAR_RD_SIZE (LPFC_PCI_BAR_RD_BUF_SIZE/4)
69
70#define LPFC_PCI_IF0_BAR0_RD_SIZE (LPFC_PCI_IF0_BAR0_SIZE/4)
71#define LPFC_PCI_IF0_BAR1_RD_SIZE (LPFC_PCI_IF0_BAR1_SIZE/4)
72#define LPFC_PCI_IF0_BAR2_RD_SIZE (LPFC_PCI_IF0_BAR2_SIZE/4)
73#define LPFC_PCI_IF2_BAR0_RD_SIZE (LPFC_PCI_IF2_BAR0_SIZE/4)
74
75#define IDIAG_BARACC_BAR_NUM_INDX 0
76#define IDIAG_BARACC_OFF_SET_INDX 1
77#define IDIAG_BARACC_ACC_MOD_INDX 2
78#define IDIAG_BARACC_REG_VAL_INDX 2
79#define IDIAG_BARACC_BAR_SZE_INDX 3
80
81#define IDIAG_BARACC_BAR_0 0
82#define IDIAG_BARACC_BAR_1 1
83#define IDIAG_BARACC_BAR_2 2
84
85#define SINGLE_WORD 1
86
50/* queue info */ 87/* queue info */
51#define LPFC_QUE_INFO_GET_BUF_SIZE 4096 88#define LPFC_QUE_INFO_GET_BUF_SIZE 4096
52 89
@@ -63,7 +100,14 @@
63#define LPFC_IDIAG_WQ 4 100#define LPFC_IDIAG_WQ 4
64#define LPFC_IDIAG_RQ 5 101#define LPFC_IDIAG_RQ 5
65 102
66/* doorbell acc */ 103#define IDIAG_QUEACC_QUETP_INDX 0
104#define IDIAG_QUEACC_QUEID_INDX 1
105#define IDIAG_QUEACC_INDEX_INDX 2
106#define IDIAG_QUEACC_COUNT_INDX 3
107#define IDIAG_QUEACC_OFFST_INDX 4
108#define IDIAG_QUEACC_VALUE_INDX 5
109
110/* doorbell register acc */
67#define LPFC_DRB_ACC_ALL 0xffff 111#define LPFC_DRB_ACC_ALL 0xffff
68#define LPFC_DRB_ACC_RD_CMD_ARG 1 112#define LPFC_DRB_ACC_RD_CMD_ARG 1
69#define LPFC_DRB_ACC_WR_CMD_ARG 2 113#define LPFC_DRB_ACC_WR_CMD_ARG 2
@@ -76,6 +120,67 @@
76 120
77#define LPFC_DRB_MAX 4 121#define LPFC_DRB_MAX 4
78 122
123#define IDIAG_DRBACC_REGID_INDX 0
124#define IDIAG_DRBACC_VALUE_INDX 1
125
126/* control register acc */
127#define LPFC_CTL_ACC_ALL 0xffff
128#define LPFC_CTL_ACC_RD_CMD_ARG 1
129#define LPFC_CTL_ACC_WR_CMD_ARG 2
130#define LPFC_CTL_ACC_BUF_SIZE 256
131
132#define LPFC_CTL_PORT_SEM 1
133#define LPFC_CTL_PORT_STA 2
134#define LPFC_CTL_PORT_CTL 3
135#define LPFC_CTL_PORT_ER1 4
136#define LPFC_CTL_PORT_ER2 5
137#define LPFC_CTL_PDEV_CTL 6
138
139#define LPFC_CTL_MAX 6
140
141#define IDIAG_CTLACC_REGID_INDX 0
142#define IDIAG_CTLACC_VALUE_INDX 1
143
144/* mailbox access */
145#define LPFC_MBX_DMP_ARG 4
146
147#define LPFC_MBX_ACC_BUF_SIZE 512
148#define LPFC_MBX_ACC_LBUF_SZ 128
149
150#define LPFC_MBX_DMP_MBX_WORD 0x00000001
151#define LPFC_MBX_DMP_MBX_BYTE 0x00000002
152#define LPFC_MBX_DMP_MBX_ALL (LPFC_MBX_DMP_MBX_WORD | LPFC_MBX_DMP_MBX_BYTE)
153
154#define LPFC_BSG_DMP_MBX_RD_MBX 0x00000001
155#define LPFC_BSG_DMP_MBX_RD_BUF 0x00000002
156#define LPFC_BSG_DMP_MBX_WR_MBX 0x00000004
157#define LPFC_BSG_DMP_MBX_WR_BUF 0x00000008
158#define LPFC_BSG_DMP_MBX_ALL (LPFC_BSG_DMP_MBX_RD_MBX | \
159 LPFC_BSG_DMP_MBX_RD_BUF | \
160 LPFC_BSG_DMP_MBX_WR_MBX | \
161 LPFC_BSG_DMP_MBX_WR_BUF)
162
163#define LPFC_MBX_DMP_ALL 0xffff
164#define LPFC_MBX_ALL_CMD 0xff
165
166#define IDIAG_MBXACC_MBCMD_INDX 0
167#define IDIAG_MBXACC_DPMAP_INDX 1
168#define IDIAG_MBXACC_DPCNT_INDX 2
169#define IDIAG_MBXACC_WDCNT_INDX 3
170
171/* extents access */
172#define LPFC_EXT_ACC_CMD_ARG 1
173#define LPFC_EXT_ACC_BUF_SIZE 4096
174
175#define LPFC_EXT_ACC_AVAIL 0x1
176#define LPFC_EXT_ACC_ALLOC 0x2
177#define LPFC_EXT_ACC_DRIVR 0x4
178#define LPFC_EXT_ACC_ALL (LPFC_EXT_ACC_DRIVR | \
179 LPFC_EXT_ACC_AVAIL | \
180 LPFC_EXT_ACC_ALLOC)
181
182#define IDIAG_EXTACC_EXMAP_INDX 0
183
79#define SIZE_U8 sizeof(uint8_t) 184#define SIZE_U8 sizeof(uint8_t)
80#define SIZE_U16 sizeof(uint16_t) 185#define SIZE_U16 sizeof(uint16_t)
81#define SIZE_U32 sizeof(uint32_t) 186#define SIZE_U32 sizeof(uint32_t)
@@ -110,6 +215,11 @@ struct lpfc_idiag_cmd {
110#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003 215#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003
111#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004 216#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004
112 217
218#define LPFC_IDIAG_CMD_BARACC_RD 0x00000008
219#define LPFC_IDIAG_CMD_BARACC_WR 0x00000009
220#define LPFC_IDIAG_CMD_BARACC_ST 0x0000000a
221#define LPFC_IDIAG_CMD_BARACC_CL 0x0000000b
222
113#define LPFC_IDIAG_CMD_QUEACC_RD 0x00000011 223#define LPFC_IDIAG_CMD_QUEACC_RD 0x00000011
114#define LPFC_IDIAG_CMD_QUEACC_WR 0x00000012 224#define LPFC_IDIAG_CMD_QUEACC_WR 0x00000012
115#define LPFC_IDIAG_CMD_QUEACC_ST 0x00000013 225#define LPFC_IDIAG_CMD_QUEACC_ST 0x00000013
@@ -119,6 +229,17 @@ struct lpfc_idiag_cmd {
119#define LPFC_IDIAG_CMD_DRBACC_WR 0x00000022 229#define LPFC_IDIAG_CMD_DRBACC_WR 0x00000022
120#define LPFC_IDIAG_CMD_DRBACC_ST 0x00000023 230#define LPFC_IDIAG_CMD_DRBACC_ST 0x00000023
121#define LPFC_IDIAG_CMD_DRBACC_CL 0x00000024 231#define LPFC_IDIAG_CMD_DRBACC_CL 0x00000024
232
233#define LPFC_IDIAG_CMD_CTLACC_RD 0x00000031
234#define LPFC_IDIAG_CMD_CTLACC_WR 0x00000032
235#define LPFC_IDIAG_CMD_CTLACC_ST 0x00000033
236#define LPFC_IDIAG_CMD_CTLACC_CL 0x00000034
237
238#define LPFC_IDIAG_CMD_MBXACC_DP 0x00000041
239#define LPFC_IDIAG_BSG_MBXACC_DP 0x00000042
240
241#define LPFC_IDIAG_CMD_EXTACC_RD 0x00000051
242
122 uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE]; 243 uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE];
123}; 244};
124 245
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 32a084534f3e..023da0e00d38 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -647,21 +647,15 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
647 } 647 }
648 lpfc_cleanup_pending_mbox(vport); 648 lpfc_cleanup_pending_mbox(vport);
649 649
650 if (phba->sli_rev == LPFC_SLI_REV4) 650 if (phba->sli_rev == LPFC_SLI_REV4) {
651 lpfc_sli4_unreg_all_rpis(vport); 651 lpfc_sli4_unreg_all_rpis(vport);
652
653 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
654 lpfc_mbx_unreg_vpi(vport); 652 lpfc_mbx_unreg_vpi(vport);
655 spin_lock_irq(shost->host_lock); 653 spin_lock_irq(shost->host_lock);
656 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 654 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
657 spin_unlock_irq(shost->host_lock); 655 /*
658 } 656 * If VPI is unreged, driver need to do INIT_VPI
659 /* 657 * before re-registering
660 * If VPI is unreged, driver need to do INIT_VPI 658 */
661 * before re-registering
662 */
663 if (phba->sli_rev == LPFC_SLI_REV4) {
664 spin_lock_irq(shost->host_lock);
665 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 659 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
666 spin_unlock_irq(shost->host_lock); 660 spin_unlock_irq(shost->host_lock);
667 } 661 }
@@ -880,6 +874,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
880 phba->fcf.current_rec.fcf_indx, 874 phba->fcf.current_rec.fcf_indx,
881 irsp->ulpStatus, irsp->un.ulpWord[4], 875 irsp->ulpStatus, irsp->un.ulpWord[4],
882 irsp->ulpTimeout); 876 irsp->ulpTimeout);
877 lpfc_sli4_set_fcf_flogi_fail(phba,
878 phba->fcf.current_rec.fcf_indx);
883 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 879 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
884 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 880 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
885 if (rc) 881 if (rc)
@@ -1096,11 +1092,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1096 /* Set the fcfi to the fcfi we registered with */ 1092 /* Set the fcfi to the fcfi we registered with */
1097 elsiocb->iocb.ulpContext = phba->fcf.fcfi; 1093 elsiocb->iocb.ulpContext = phba->fcf.fcfi;
1098 } 1094 }
1099 } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1095 } else {
1100 sp->cmn.request_multiple_Nport = 1; 1096 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1101 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1097 sp->cmn.request_multiple_Nport = 1;
1102 icmd->ulpCt_h = 1; 1098 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
1103 icmd->ulpCt_l = 0; 1099 icmd->ulpCt_h = 1;
1100 icmd->ulpCt_l = 0;
1101 } else
1102 sp->cmn.request_multiple_Nport = 0;
1104 } 1103 }
1105 1104
1106 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1105 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
@@ -3656,7 +3655,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3656 } 3655 }
3657 3656
3658 icmd = &elsiocb->iocb; 3657 icmd = &elsiocb->iocb;
3659 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3658 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3659 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3660 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3660 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3661 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 3661 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3662 pcmd += sizeof(uint32_t); 3662 pcmd += sizeof(uint32_t);
@@ -3673,7 +3673,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3673 return 1; 3673 return 1;
3674 3674
3675 icmd = &elsiocb->iocb; 3675 icmd = &elsiocb->iocb;
3676 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3676 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3677 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3677 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3678 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3678 3679
3679 if (mbox) 3680 if (mbox)
@@ -3695,7 +3696,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3695 return 1; 3696 return 1;
3696 3697
3697 icmd = &elsiocb->iocb; 3698 icmd = &elsiocb->iocb;
3698 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3699 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3700 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3699 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3701 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3700 3702
3701 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, 3703 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
@@ -3781,7 +3783,8 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
3781 3783
3782 icmd = &elsiocb->iocb; 3784 icmd = &elsiocb->iocb;
3783 oldcmd = &oldiocb->iocb; 3785 oldcmd = &oldiocb->iocb;
3784 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3786 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3787 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3785 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3788 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3786 3789
3787 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 3790 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
@@ -3853,7 +3856,8 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3853 3856
3854 icmd = &elsiocb->iocb; 3857 icmd = &elsiocb->iocb;
3855 oldcmd = &oldiocb->iocb; 3858 oldcmd = &oldiocb->iocb;
3856 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3859 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3860 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3857 3861
3858 /* Xmit ADISC ACC response tag <ulpIoTag> */ 3862 /* Xmit ADISC ACC response tag <ulpIoTag> */
3859 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3863 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -3931,7 +3935,9 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3931 3935
3932 icmd = &elsiocb->iocb; 3936 icmd = &elsiocb->iocb;
3933 oldcmd = &oldiocb->iocb; 3937 oldcmd = &oldiocb->iocb;
3934 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3938 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3939 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3940
3935 /* Xmit PRLI ACC response tag <ulpIoTag> */ 3941 /* Xmit PRLI ACC response tag <ulpIoTag> */
3936 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3942 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3937 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 3943 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
@@ -4035,7 +4041,9 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
4035 4041
4036 icmd = &elsiocb->iocb; 4042 icmd = &elsiocb->iocb;
4037 oldcmd = &oldiocb->iocb; 4043 oldcmd = &oldiocb->iocb;
4038 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 4044 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
4045 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4046
4039 /* Xmit RNID ACC response tag <ulpIoTag> */ 4047 /* Xmit RNID ACC response tag <ulpIoTag> */
4040 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4048 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4041 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 4049 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
@@ -4163,7 +4171,9 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
4163 if (!elsiocb) 4171 if (!elsiocb)
4164 return 1; 4172 return 1;
4165 4173
4166 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri */ 4174 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */
4175 elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
4176
4167 /* Xmit ECHO ACC response tag <ulpIoTag> */ 4177 /* Xmit ECHO ACC response tag <ulpIoTag> */
4168 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4178 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4169 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 4179 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
@@ -5054,13 +5064,15 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5054 uint8_t *pcmd; 5064 uint8_t *pcmd;
5055 struct lpfc_iocbq *elsiocb; 5065 struct lpfc_iocbq *elsiocb;
5056 struct lpfc_nodelist *ndlp; 5066 struct lpfc_nodelist *ndlp;
5057 uint16_t xri; 5067 uint16_t oxid;
5068 uint16_t rxid;
5058 uint32_t cmdsize; 5069 uint32_t cmdsize;
5059 5070
5060 mb = &pmb->u.mb; 5071 mb = &pmb->u.mb;
5061 5072
5062 ndlp = (struct lpfc_nodelist *) pmb->context2; 5073 ndlp = (struct lpfc_nodelist *) pmb->context2;
5063 xri = (uint16_t) ((unsigned long)(pmb->context1)); 5074 rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
5075 oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
5064 pmb->context1 = NULL; 5076 pmb->context1 = NULL;
5065 pmb->context2 = NULL; 5077 pmb->context2 = NULL;
5066 5078
@@ -5082,7 +5094,8 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5082 return; 5094 return;
5083 5095
5084 icmd = &elsiocb->iocb; 5096 icmd = &elsiocb->iocb;
5085 icmd->ulpContext = xri; 5097 icmd->ulpContext = rxid;
5098 icmd->unsli3.rcvsli3.ox_id = oxid;
5086 5099
5087 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5100 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5088 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5101 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
@@ -5137,13 +5150,16 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5137 uint8_t *pcmd; 5150 uint8_t *pcmd;
5138 struct lpfc_iocbq *elsiocb; 5151 struct lpfc_iocbq *elsiocb;
5139 struct lpfc_nodelist *ndlp; 5152 struct lpfc_nodelist *ndlp;
5140 uint16_t xri, status; 5153 uint16_t status;
5154 uint16_t oxid;
5155 uint16_t rxid;
5141 uint32_t cmdsize; 5156 uint32_t cmdsize;
5142 5157
5143 mb = &pmb->u.mb; 5158 mb = &pmb->u.mb;
5144 5159
5145 ndlp = (struct lpfc_nodelist *) pmb->context2; 5160 ndlp = (struct lpfc_nodelist *) pmb->context2;
5146 xri = (uint16_t) ((unsigned long)(pmb->context1)); 5161 rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
5162 oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
5147 pmb->context1 = NULL; 5163 pmb->context1 = NULL;
5148 pmb->context2 = NULL; 5164 pmb->context2 = NULL;
5149 5165
@@ -5165,7 +5181,8 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5165 return; 5181 return;
5166 5182
5167 icmd = &elsiocb->iocb; 5183 icmd = &elsiocb->iocb;
5168 icmd->ulpContext = xri; 5184 icmd->ulpContext = rxid;
5185 icmd->unsli3.rcvsli3.ox_id = oxid;
5169 5186
5170 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5187 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5171 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5188 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
@@ -5238,8 +5255,9 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5238 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 5255 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
5239 if (mbox) { 5256 if (mbox) {
5240 lpfc_read_lnk_stat(phba, mbox); 5257 lpfc_read_lnk_stat(phba, mbox);
5241 mbox->context1 = 5258 mbox->context1 = (void *)((unsigned long)
5242 (void *)((unsigned long) cmdiocb->iocb.ulpContext); 5259 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
5260 cmdiocb->iocb.ulpContext)); /* rx_id */
5243 mbox->context2 = lpfc_nlp_get(ndlp); 5261 mbox->context2 = lpfc_nlp_get(ndlp);
5244 mbox->vport = vport; 5262 mbox->vport = vport;
5245 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 5263 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
@@ -5314,7 +5332,8 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5314 pcmd += sizeof(uint32_t); /* Skip past command */ 5332 pcmd += sizeof(uint32_t); /* Skip past command */
5315 5333
5316 /* use the command's xri in the response */ 5334 /* use the command's xri in the response */
5317 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; 5335 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */
5336 elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
5318 5337
5319 rtv_rsp = (struct RTV_RSP *)pcmd; 5338 rtv_rsp = (struct RTV_RSP *)pcmd;
5320 5339
@@ -5399,8 +5418,9 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5399 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 5418 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
5400 if (mbox) { 5419 if (mbox) {
5401 lpfc_read_lnk_stat(phba, mbox); 5420 lpfc_read_lnk_stat(phba, mbox);
5402 mbox->context1 = 5421 mbox->context1 = (void *)((unsigned long)
5403 (void *)((unsigned long) cmdiocb->iocb.ulpContext); 5422 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
5423 cmdiocb->iocb.ulpContext)); /* rx_id */
5404 mbox->context2 = lpfc_nlp_get(ndlp); 5424 mbox->context2 = lpfc_nlp_get(ndlp);
5405 mbox->vport = vport; 5425 mbox->vport = vport;
5406 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc; 5426 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
@@ -5554,7 +5574,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
5554 5574
5555 icmd = &elsiocb->iocb; 5575 icmd = &elsiocb->iocb;
5556 oldcmd = &oldiocb->iocb; 5576 oldcmd = &oldiocb->iocb;
5557 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 5577 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5578 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
5558 5579
5559 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5580 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5560 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5581 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
@@ -6586,7 +6607,7 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
6586{ 6607{
6587 struct lpfc_vport *vport; 6608 struct lpfc_vport *vport;
6588 unsigned long flags; 6609 unsigned long flags;
6589 int i; 6610 int i = 0;
6590 6611
6591 /* The physical ports are always vpi 0 - translate is unnecessary. */ 6612 /* The physical ports are always vpi 0 - translate is unnecessary. */
6592 if (vpi > 0) { 6613 if (vpi > 0) {
@@ -6609,7 +6630,7 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
6609 6630
6610 spin_lock_irqsave(&phba->hbalock, flags); 6631 spin_lock_irqsave(&phba->hbalock, flags);
6611 list_for_each_entry(vport, &phba->port_list, listentry) { 6632 list_for_each_entry(vport, &phba->port_list, listentry) {
6612 if (vport->vpi == vpi) { 6633 if (vport->vpi == i) {
6613 spin_unlock_irqrestore(&phba->hbalock, flags); 6634 spin_unlock_irqrestore(&phba->hbalock, flags);
6614 return vport; 6635 return vport;
6615 } 6636 }
@@ -7787,6 +7808,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
7787{ 7808{
7788 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 7809 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
7789 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 7810 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
7811 uint16_t lxri = 0;
7790 7812
7791 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7813 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7792 unsigned long iflag = 0; 7814 unsigned long iflag = 0;
@@ -7815,7 +7837,12 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
7815 } 7837 }
7816 } 7838 }
7817 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 7839 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
7818 sglq_entry = __lpfc_get_active_sglq(phba, xri); 7840 lxri = lpfc_sli4_xri_inrange(phba, xri);
7841 if (lxri == NO_XRI) {
7842 spin_unlock_irqrestore(&phba->hbalock, iflag);
7843 return;
7844 }
7845 sglq_entry = __lpfc_get_active_sglq(phba, lxri);
7819 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 7846 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
7820 spin_unlock_irqrestore(&phba->hbalock, iflag); 7847 spin_unlock_irqrestore(&phba->hbalock, iflag);
7821 return; 7848 return;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 18d0dbfda2bc..0b47adf9fee8 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1109,6 +1109,28 @@ out:
1109 return; 1109 return;
1110} 1110}
1111 1111
1112/**
1113 * lpfc_sli4_clear_fcf_rr_bmask
1114 * @phba pointer to the struct lpfc_hba for this port.
1115 * This fucnction resets the round robin bit mask and clears the
1116 * fcf priority list. The list deletions are done while holding the
1117 * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
1118 * from the lpfc_fcf_pri record.
1119 **/
1120void
1121lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
1122{
1123 struct lpfc_fcf_pri *fcf_pri;
1124 struct lpfc_fcf_pri *next_fcf_pri;
1125 memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
1126 spin_lock_irq(&phba->hbalock);
1127 list_for_each_entry_safe(fcf_pri, next_fcf_pri,
1128 &phba->fcf.fcf_pri_list, list) {
1129 list_del_init(&fcf_pri->list);
1130 fcf_pri->fcf_rec.flag = 0;
1131 }
1132 spin_unlock_irq(&phba->hbalock);
1133}
1112static void 1134static void
1113lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 1135lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1114{ 1136{
@@ -1130,7 +1152,8 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1130 spin_unlock_irq(&phba->hbalock); 1152 spin_unlock_irq(&phba->hbalock);
1131 1153
1132 /* If there is a pending FCoE event, restart FCF table scan. */ 1154 /* If there is a pending FCoE event, restart FCF table scan. */
1133 if (lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) 1155 if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
1156 lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
1134 goto fail_out; 1157 goto fail_out;
1135 1158
1136 /* Mark successful completion of FCF table scan */ 1159 /* Mark successful completion of FCF table scan */
@@ -1250,6 +1273,30 @@ lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
1250} 1273}
1251 1274
1252/** 1275/**
1276 * lpfc_update_fcf_record - Update driver fcf record
1277 * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
1278 * @phba: pointer to lpfc hba data structure.
1279 * @fcf_index: Index for the lpfc_fcf_record.
1280 * @new_fcf_record: pointer to hba fcf record.
1281 *
1282 * This routine updates the driver FCF priority record from the new HBA FCF
1283 * record. This routine is called with the host lock held.
1284 **/
1285static void
1286__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
1287 struct fcf_record *new_fcf_record
1288 )
1289{
1290 struct lpfc_fcf_pri *fcf_pri;
1291
1292 fcf_pri = &phba->fcf.fcf_pri[fcf_index];
1293 fcf_pri->fcf_rec.fcf_index = fcf_index;
1294 /* FCF record priority */
1295 fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
1296
1297}
1298
1299/**
1253 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. 1300 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1254 * @fcf: pointer to driver fcf record. 1301 * @fcf: pointer to driver fcf record.
1255 * @new_fcf_record: pointer to fcf record. 1302 * @new_fcf_record: pointer to fcf record.
@@ -1332,6 +1379,9 @@ __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
1332 fcf_rec->addr_mode = addr_mode; 1379 fcf_rec->addr_mode = addr_mode;
1333 fcf_rec->vlan_id = vlan_id; 1380 fcf_rec->vlan_id = vlan_id;
1334 fcf_rec->flag |= (flag | RECORD_VALID); 1381 fcf_rec->flag |= (flag | RECORD_VALID);
1382 __lpfc_update_fcf_record_pri(phba,
1383 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
1384 new_fcf_record);
1335} 1385}
1336 1386
1337/** 1387/**
@@ -1834,6 +1884,8 @@ lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
1834 return false; 1884 return false;
1835 if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record)) 1885 if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
1836 return false; 1886 return false;
1887 if (fcf_rec->priority != new_fcf_record->fip_priority)
1888 return false;
1837 return true; 1889 return true;
1838} 1890}
1839 1891
@@ -1897,6 +1949,152 @@ stop_flogi_current_fcf:
1897} 1949}
1898 1950
1899/** 1951/**
1952 * lpfc_sli4_fcf_pri_list_del
1953 * @phba: pointer to lpfc hba data structure.
1954 * @fcf_index the index of the fcf record to delete
1955 * This routine checks the on list flag of the fcf_index to be deleted.
1956 * If it is one the list then it is removed from the list, and the flag
1957 * is cleared. This routine grab the hbalock before removing the fcf
1958 * record from the list.
1959 **/
1960static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
1961 uint16_t fcf_index)
1962{
1963 struct lpfc_fcf_pri *new_fcf_pri;
1964
1965 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
1966 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1967 "3058 deleting idx x%x pri x%x flg x%x\n",
1968 fcf_index, new_fcf_pri->fcf_rec.priority,
1969 new_fcf_pri->fcf_rec.flag);
1970 spin_lock_irq(&phba->hbalock);
1971 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) {
1972 if (phba->fcf.current_rec.priority ==
1973 new_fcf_pri->fcf_rec.priority)
1974 phba->fcf.eligible_fcf_cnt--;
1975 list_del_init(&new_fcf_pri->list);
1976 new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST;
1977 }
1978 spin_unlock_irq(&phba->hbalock);
1979}
1980
1981/**
1982 * lpfc_sli4_set_fcf_flogi_fail
1983 * @phba: pointer to lpfc hba data structure.
1984 * @fcf_index the index of the fcf record to update
1985 * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
1986 * flag so the the round robin slection for the particular priority level
1987 * will try a different fcf record that does not have this bit set.
1988 * If the fcf record is re-read for any reason this flag is cleared brfore
1989 * adding it to the priority list.
1990 **/
1991void
1992lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
1993{
1994 struct lpfc_fcf_pri *new_fcf_pri;
1995 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
1996 spin_lock_irq(&phba->hbalock);
1997 new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED;
1998 spin_unlock_irq(&phba->hbalock);
1999}
2000
2001/**
2002 * lpfc_sli4_fcf_pri_list_add
2003 * @phba: pointer to lpfc hba data structure.
2004 * @fcf_index the index of the fcf record to add
2005 * This routine checks the priority of the fcf_index to be added.
2006 * If it is a lower priority than the current head of the fcf_pri list
2007 * then it is added to the list in the right order.
2008 * If it is the same priority as the current head of the list then it
2009 * is added to the head of the list and its bit in the rr_bmask is set.
2010 * If the fcf_index to be added is of a higher priority than the current
2011 * head of the list then the rr_bmask is cleared, its bit is set in the
2012 * rr_bmask and it is added to the head of the list.
2013 * returns:
2014 * 0=success 1=failure
2015 **/
2016int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, uint16_t fcf_index,
2017 struct fcf_record *new_fcf_record)
2018{
2019 uint16_t current_fcf_pri;
2020 uint16_t last_index;
2021 struct lpfc_fcf_pri *fcf_pri;
2022 struct lpfc_fcf_pri *next_fcf_pri;
2023 struct lpfc_fcf_pri *new_fcf_pri;
2024 int ret;
2025
2026 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2027 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2028 "3059 adding idx x%x pri x%x flg x%x\n",
2029 fcf_index, new_fcf_record->fip_priority,
2030 new_fcf_pri->fcf_rec.flag);
2031 spin_lock_irq(&phba->hbalock);
2032 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST)
2033 list_del_init(&new_fcf_pri->list);
2034 new_fcf_pri->fcf_rec.fcf_index = fcf_index;
2035 new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
2036 if (list_empty(&phba->fcf.fcf_pri_list)) {
2037 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2038 ret = lpfc_sli4_fcf_rr_index_set(phba,
2039 new_fcf_pri->fcf_rec.fcf_index);
2040 goto out;
2041 }
2042
2043 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
2044 LPFC_SLI4_FCF_TBL_INDX_MAX);
2045 if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
2046 ret = 0; /* Empty rr list */
2047 goto out;
2048 }
2049 current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority;
2050 if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) {
2051 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2052 if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) {
2053 memset(phba->fcf.fcf_rr_bmask, 0,
2054 sizeof(*phba->fcf.fcf_rr_bmask));
2055 /* fcfs_at_this_priority_level = 1; */
2056 phba->fcf.eligible_fcf_cnt = 1;
2057 } else
2058 /* fcfs_at_this_priority_level++; */
2059 phba->fcf.eligible_fcf_cnt++;
2060 ret = lpfc_sli4_fcf_rr_index_set(phba,
2061 new_fcf_pri->fcf_rec.fcf_index);
2062 goto out;
2063 }
2064
2065 list_for_each_entry_safe(fcf_pri, next_fcf_pri,
2066 &phba->fcf.fcf_pri_list, list) {
2067 if (new_fcf_pri->fcf_rec.priority <=
2068 fcf_pri->fcf_rec.priority) {
2069 if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list)
2070 list_add(&new_fcf_pri->list,
2071 &phba->fcf.fcf_pri_list);
2072 else
2073 list_add(&new_fcf_pri->list,
2074 &((struct lpfc_fcf_pri *)
2075 fcf_pri->list.prev)->list);
2076 ret = 0;
2077 goto out;
2078 } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list
2079 || new_fcf_pri->fcf_rec.priority <
2080 next_fcf_pri->fcf_rec.priority) {
2081 list_add(&new_fcf_pri->list, &fcf_pri->list);
2082 ret = 0;
2083 goto out;
2084 }
2085 if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority)
2086 continue;
2087
2088 }
2089 ret = 1;
2090out:
2091 /* we use = instead of |= to clear the FLOGI_FAILED flag. */
2092 new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST;
2093 spin_unlock_irq(&phba->hbalock);
2094 return ret;
2095}
2096
2097/**
1900 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. 2098 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
1901 * @phba: pointer to lpfc hba data structure. 2099 * @phba: pointer to lpfc hba data structure.
1902 * @mboxq: pointer to mailbox object. 2100 * @mboxq: pointer to mailbox object.
@@ -1958,6 +2156,9 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1958 * record for roundrobin FCF failover. 2156 * record for roundrobin FCF failover.
1959 */ 2157 */
1960 if (!rc) { 2158 if (!rc) {
2159 lpfc_sli4_fcf_pri_list_del(phba,
2160 bf_get(lpfc_fcf_record_fcf_index,
2161 new_fcf_record));
1961 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2162 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1962 "2781 FCF (x%x) failed connection " 2163 "2781 FCF (x%x) failed connection "
1963 "list check: (x%x/x%x)\n", 2164 "list check: (x%x/x%x)\n",
@@ -2005,7 +2206,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2005 goto read_next_fcf; 2206 goto read_next_fcf;
2006 } else { 2207 } else {
2007 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 2208 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2008 rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index); 2209 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index,
2210 new_fcf_record);
2009 if (rc) 2211 if (rc)
2010 goto read_next_fcf; 2212 goto read_next_fcf;
2011 } 2213 }
@@ -2018,7 +2220,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2018 */ 2220 */
2019 spin_lock_irq(&phba->hbalock); 2221 spin_lock_irq(&phba->hbalock);
2020 if (phba->fcf.fcf_flag & FCF_IN_USE) { 2222 if (phba->fcf.fcf_flag & FCF_IN_USE) {
2021 if (lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, 2223 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2224 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2022 new_fcf_record, vlan_id)) { 2225 new_fcf_record, vlan_id)) {
2023 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) == 2226 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
2024 phba->fcf.current_rec.fcf_indx) { 2227 phba->fcf.current_rec.fcf_indx) {
@@ -2232,7 +2435,8 @@ read_next_fcf:
2232 (phba->fcf.fcf_flag & FCF_REDISC_PEND)) 2435 (phba->fcf.fcf_flag & FCF_REDISC_PEND))
2233 return; 2436 return;
2234 2437
2235 if (phba->fcf.fcf_flag & FCF_IN_USE) { 2438 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2439 phba->fcf.fcf_flag & FCF_IN_USE) {
2236 /* 2440 /*
2237 * In case the current in-use FCF record no 2441 * In case the current in-use FCF record no
2238 * longer existed during FCF discovery that 2442 * longer existed during FCF discovery that
@@ -2247,7 +2451,6 @@ read_next_fcf:
2247 spin_lock_irq(&phba->hbalock); 2451 spin_lock_irq(&phba->hbalock);
2248 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 2452 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2249 spin_unlock_irq(&phba->hbalock); 2453 spin_unlock_irq(&phba->hbalock);
2250 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2251 lpfc_sli4_fcf_scan_read_fcf_rec(phba, 2454 lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2252 LPFC_FCOE_FCF_GET_FIRST); 2455 LPFC_FCOE_FCF_GET_FIRST);
2253 return; 2456 return;
@@ -2424,7 +2627,8 @@ lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2424 2627
2425 /* Update the eligible FCF record index bmask */ 2628 /* Update the eligible FCF record index bmask */
2426 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 2629 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2427 rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index); 2630
2631 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record);
2428 2632
2429out: 2633out:
2430 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2634 lpfc_sli4_mbox_cmd_free(phba, mboxq);
@@ -2645,6 +2849,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2645 vport->vpi_state |= LPFC_VPI_REGISTERED; 2849 vport->vpi_state |= LPFC_VPI_REGISTERED;
2646 vport->fc_flag |= FC_VFI_REGISTERED; 2850 vport->fc_flag |= FC_VFI_REGISTERED;
2647 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 2851 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2852 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
2648 spin_unlock_irq(shost->host_lock); 2853 spin_unlock_irq(shost->host_lock);
2649 2854
2650 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 2855 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
@@ -2893,8 +3098,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
2893 goto out; 3098 goto out;
2894 } 3099 }
2895 /* Reset FCF roundrobin bmask for new discovery */ 3100 /* Reset FCF roundrobin bmask for new discovery */
2896 memset(phba->fcf.fcf_rr_bmask, 0, 3101 lpfc_sli4_clear_fcf_rr_bmask(phba);
2897 sizeof(*phba->fcf.fcf_rr_bmask));
2898 } 3102 }
2899 3103
2900 return; 3104 return;
@@ -5592,7 +5796,7 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
5592 spin_unlock_irq(&phba->hbalock); 5796 spin_unlock_irq(&phba->hbalock);
5593 5797
5594 /* Reset FCF roundrobin bmask for new discovery */ 5798 /* Reset FCF roundrobin bmask for new discovery */
5595 memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask)); 5799 lpfc_sli4_clear_fcf_rr_bmask(phba);
5596 5800
5597 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 5801 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
5598 5802
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index ab4c4d651d0c..046edc4ab35f 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -3470,11 +3470,16 @@ typedef struct {
3470 or CMD_IOCB_RCV_SEQ64_CX (0xB5) */ 3470 or CMD_IOCB_RCV_SEQ64_CX (0xB5) */
3471 3471
3472struct rcv_sli3 { 3472struct rcv_sli3 {
3473 uint32_t word8Rsvd;
3474#ifdef __BIG_ENDIAN_BITFIELD 3473#ifdef __BIG_ENDIAN_BITFIELD
3474 uint16_t ox_id;
3475 uint16_t seq_cnt;
3476
3475 uint16_t vpi; 3477 uint16_t vpi;
3476 uint16_t word9Rsvd; 3478 uint16_t word9Rsvd;
3477#else /* __LITTLE_ENDIAN */ 3479#else /* __LITTLE_ENDIAN */
3480 uint16_t seq_cnt;
3481 uint16_t ox_id;
3482
3478 uint16_t word9Rsvd; 3483 uint16_t word9Rsvd;
3479 uint16_t vpi; 3484 uint16_t vpi;
3480#endif 3485#endif
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 11e26a26b5d1..7f8003b5181e 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -170,15 +170,8 @@ struct lpfc_sli_intf {
170#define LPFC_PCI_FUNC3 3 170#define LPFC_PCI_FUNC3 3
171#define LPFC_PCI_FUNC4 4 171#define LPFC_PCI_FUNC4 4
172 172
173/* SLI4 interface type-2 control register offsets */ 173/* SLI4 interface type-2 PDEV_CTL register */
174#define LPFC_CTL_PORT_SEM_OFFSET 0x400
175#define LPFC_CTL_PORT_STA_OFFSET 0x404
176#define LPFC_CTL_PORT_CTL_OFFSET 0x408
177#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
178#define LPFC_CTL_PORT_ER2_OFFSET 0x410
179#define LPFC_CTL_PDEV_CTL_OFFSET 0x414 174#define LPFC_CTL_PDEV_CTL_OFFSET 0x414
180
181/* Some SLI4 interface type-2 PDEV_CTL register bits */
182#define LPFC_CTL_PDEV_CTL_DRST 0x00000001 175#define LPFC_CTL_PDEV_CTL_DRST 0x00000001
183#define LPFC_CTL_PDEV_CTL_FRST 0x00000002 176#define LPFC_CTL_PDEV_CTL_FRST 0x00000002
184#define LPFC_CTL_PDEV_CTL_DD 0x00000004 177#define LPFC_CTL_PDEV_CTL_DD 0x00000004
@@ -337,6 +330,7 @@ struct lpfc_cqe {
337#define CQE_CODE_RELEASE_WQE 0x2 330#define CQE_CODE_RELEASE_WQE 0x2
338#define CQE_CODE_RECEIVE 0x4 331#define CQE_CODE_RECEIVE 0x4
339#define CQE_CODE_XRI_ABORTED 0x5 332#define CQE_CODE_XRI_ABORTED 0x5
333#define CQE_CODE_RECEIVE_V1 0x9
340 334
341/* completion queue entry for wqe completions */ 335/* completion queue entry for wqe completions */
342struct lpfc_wcqe_complete { 336struct lpfc_wcqe_complete {
@@ -440,7 +434,10 @@ struct lpfc_rcqe {
440#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */ 434#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */
441#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */ 435#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */
442#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */ 436#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */
443 uint32_t reserved1; 437 uint32_t word1;
438#define lpfc_rcqe_fcf_id_v1_SHIFT 0
439#define lpfc_rcqe_fcf_id_v1_MASK 0x0000003F
440#define lpfc_rcqe_fcf_id_v1_WORD word1
444 uint32_t word2; 441 uint32_t word2;
445#define lpfc_rcqe_length_SHIFT 16 442#define lpfc_rcqe_length_SHIFT 16
446#define lpfc_rcqe_length_MASK 0x0000FFFF 443#define lpfc_rcqe_length_MASK 0x0000FFFF
@@ -451,6 +448,9 @@ struct lpfc_rcqe {
451#define lpfc_rcqe_fcf_id_SHIFT 0 448#define lpfc_rcqe_fcf_id_SHIFT 0
452#define lpfc_rcqe_fcf_id_MASK 0x0000003F 449#define lpfc_rcqe_fcf_id_MASK 0x0000003F
453#define lpfc_rcqe_fcf_id_WORD word2 450#define lpfc_rcqe_fcf_id_WORD word2
451#define lpfc_rcqe_rq_id_v1_SHIFT 0
452#define lpfc_rcqe_rq_id_v1_MASK 0x0000FFFF
453#define lpfc_rcqe_rq_id_v1_WORD word2
454 uint32_t word3; 454 uint32_t word3;
455#define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT 455#define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT
456#define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK 456#define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK
@@ -515,7 +515,7 @@ struct lpfc_register {
515/* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */ 515/* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */
516#define LPFC_SLI_INTF 0x0058 516#define LPFC_SLI_INTF 0x0058
517 517
518#define LPFC_SLIPORT_IF2_SMPHR 0x0400 518#define LPFC_CTL_PORT_SEM_OFFSET 0x400
519#define lpfc_port_smphr_perr_SHIFT 31 519#define lpfc_port_smphr_perr_SHIFT 31
520#define lpfc_port_smphr_perr_MASK 0x1 520#define lpfc_port_smphr_perr_MASK 0x1
521#define lpfc_port_smphr_perr_WORD word0 521#define lpfc_port_smphr_perr_WORD word0
@@ -575,7 +575,7 @@ struct lpfc_register {
575#define LPFC_POST_STAGE_PORT_READY 0xC000 575#define LPFC_POST_STAGE_PORT_READY 0xC000
576#define LPFC_POST_STAGE_PORT_UE 0xF000 576#define LPFC_POST_STAGE_PORT_UE 0xF000
577 577
578#define LPFC_SLIPORT_STATUS 0x0404 578#define LPFC_CTL_PORT_STA_OFFSET 0x404
579#define lpfc_sliport_status_err_SHIFT 31 579#define lpfc_sliport_status_err_SHIFT 31
580#define lpfc_sliport_status_err_MASK 0x1 580#define lpfc_sliport_status_err_MASK 0x1
581#define lpfc_sliport_status_err_WORD word0 581#define lpfc_sliport_status_err_WORD word0
@@ -593,7 +593,7 @@ struct lpfc_register {
593#define lpfc_sliport_status_rdy_WORD word0 593#define lpfc_sliport_status_rdy_WORD word0
594#define MAX_IF_TYPE_2_RESETS 1000 594#define MAX_IF_TYPE_2_RESETS 1000
595 595
596#define LPFC_SLIPORT_CNTRL 0x0408 596#define LPFC_CTL_PORT_CTL_OFFSET 0x408
597#define lpfc_sliport_ctrl_end_SHIFT 30 597#define lpfc_sliport_ctrl_end_SHIFT 30
598#define lpfc_sliport_ctrl_end_MASK 0x1 598#define lpfc_sliport_ctrl_end_MASK 0x1
599#define lpfc_sliport_ctrl_end_WORD word0 599#define lpfc_sliport_ctrl_end_WORD word0
@@ -604,8 +604,8 @@ struct lpfc_register {
604#define lpfc_sliport_ctrl_ip_WORD word0 604#define lpfc_sliport_ctrl_ip_WORD word0
605#define LPFC_SLIPORT_INIT_PORT 1 605#define LPFC_SLIPORT_INIT_PORT 1
606 606
607#define LPFC_SLIPORT_ERR_1 0x040C 607#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
608#define LPFC_SLIPORT_ERR_2 0x0410 608#define LPFC_CTL_PORT_ER2_OFFSET 0x410
609 609
610/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically 610/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
611 * reside in BAR 2. 611 * reside in BAR 2.
@@ -3198,6 +3198,8 @@ struct lpfc_grp_hdr {
3198#define lpfc_grp_hdr_id_MASK 0x000000FF 3198#define lpfc_grp_hdr_id_MASK 0x000000FF
3199#define lpfc_grp_hdr_id_WORD word2 3199#define lpfc_grp_hdr_id_WORD word2
3200 uint8_t rev_name[128]; 3200 uint8_t rev_name[128];
3201 uint8_t date[12];
3202 uint8_t revision[32];
3201}; 3203};
3202 3204
3203#define FCP_COMMAND 0x0 3205#define FCP_COMMAND 0x0
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 148b98ddbb1d..a3c820083c36 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2927,6 +2927,8 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
2927 sizeof fc_host_symbolic_name(shost)); 2927 sizeof fc_host_symbolic_name(shost));
2928 2928
2929 fc_host_supported_speeds(shost) = 0; 2929 fc_host_supported_speeds(shost) = 0;
2930 if (phba->lmt & LMT_16Gb)
2931 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
2930 if (phba->lmt & LMT_10Gb) 2932 if (phba->lmt & LMT_10Gb)
2931 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 2933 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2932 if (phba->lmt & LMT_8Gb) 2934 if (phba->lmt & LMT_8Gb)
@@ -3632,8 +3634,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3632 lpfc_sli4_fcf_dead_failthrough(phba); 3634 lpfc_sli4_fcf_dead_failthrough(phba);
3633 } else { 3635 } else {
3634 /* Reset FCF roundrobin bmask for new discovery */ 3636 /* Reset FCF roundrobin bmask for new discovery */
3635 memset(phba->fcf.fcf_rr_bmask, 0, 3637 lpfc_sli4_clear_fcf_rr_bmask(phba);
3636 sizeof(*phba->fcf.fcf_rr_bmask));
3637 /* 3638 /*
3638 * Handling fast FCF failover to a DEAD FCF event is 3639 * Handling fast FCF failover to a DEAD FCF event is
3639 * considered equalivant to receiving CVL to all vports. 3640 * considered equalivant to receiving CVL to all vports.
@@ -3647,7 +3648,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3647 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 3648 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
3648 3649
3649 vport = lpfc_find_vport_by_vpid(phba, 3650 vport = lpfc_find_vport_by_vpid(phba,
3650 acqe_fip->index - phba->vpi_base); 3651 acqe_fip->index);
3651 ndlp = lpfc_sli4_perform_vport_cvl(vport); 3652 ndlp = lpfc_sli4_perform_vport_cvl(vport);
3652 if (!ndlp) 3653 if (!ndlp)
3653 break; 3654 break;
@@ -3719,8 +3720,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3719 * Reset FCF roundrobin bmask for new 3720 * Reset FCF roundrobin bmask for new
3720 * discovery. 3721 * discovery.
3721 */ 3722 */
3722 memset(phba->fcf.fcf_rr_bmask, 0, 3723 lpfc_sli4_clear_fcf_rr_bmask(phba);
3723 sizeof(*phba->fcf.fcf_rr_bmask));
3724 } 3724 }
3725 break; 3725 break;
3726 default: 3726 default:
@@ -4035,6 +4035,34 @@ lpfc_reset_hba(struct lpfc_hba *phba)
4035} 4035}
4036 4036
4037/** 4037/**
4038 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
4039 * @phba: pointer to lpfc hba data structure.
4040 *
4041 * This function enables the PCI SR-IOV virtual functions to a physical
4042 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4043 * enable the number of virtual functions to the physical function. As
4044 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4045 * API call does not considered as an error condition for most of the device.
4046 **/
4047uint16_t
4048lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
4049{
4050 struct pci_dev *pdev = phba->pcidev;
4051 uint16_t nr_virtfn;
4052 int pos;
4053
4054 if (!pdev->is_physfn)
4055 return 0;
4056
4057 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4058 if (pos == 0)
4059 return 0;
4060
4061 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
4062 return nr_virtfn;
4063}
4064
4065/**
4038 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 4066 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
4039 * @phba: pointer to lpfc hba data structure. 4067 * @phba: pointer to lpfc hba data structure.
4040 * @nr_vfn: number of virtual functions to be enabled. 4068 * @nr_vfn: number of virtual functions to be enabled.
@@ -4049,8 +4077,17 @@ int
4049lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 4077lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
4050{ 4078{
4051 struct pci_dev *pdev = phba->pcidev; 4079 struct pci_dev *pdev = phba->pcidev;
4080 uint16_t max_nr_vfn;
4052 int rc; 4081 int rc;
4053 4082
4083 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
4084 if (nr_vfn > max_nr_vfn) {
4085 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4086 "3057 Requested vfs (%d) greater than "
4087 "supported vfs (%d)", nr_vfn, max_nr_vfn);
4088 return -EINVAL;
4089 }
4090
4054 rc = pci_enable_sriov(pdev, nr_vfn); 4091 rc = pci_enable_sriov(pdev, nr_vfn);
4055 if (rc) { 4092 if (rc) {
4056 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4093 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -4516,7 +4553,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4516 } 4553 }
4517 } 4554 }
4518 4555
4519 return rc; 4556 return 0;
4520 4557
4521out_free_fcp_eq_hdl: 4558out_free_fcp_eq_hdl:
4522 kfree(phba->sli4_hba.fcp_eq_hdl); 4559 kfree(phba->sli4_hba.fcp_eq_hdl);
@@ -4966,17 +5003,14 @@ out_free_mem:
4966 * @phba: pointer to lpfc hba data structure. 5003 * @phba: pointer to lpfc hba data structure.
4967 * 5004 *
4968 * This routine is invoked to post rpi header templates to the 5005 * This routine is invoked to post rpi header templates to the
4969 * HBA consistent with the SLI-4 interface spec. This routine 5006 * port for those SLI4 ports that do not support extents. This routine
4970 * posts a PAGE_SIZE memory region to the port to hold up to 5007 * posts a PAGE_SIZE memory region to the port to hold up to
4971 * PAGE_SIZE modulo 64 rpi context headers. 5008 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
4972 * No locks are held here because this is an initialization routine 5009 * and should be called only when interrupts are disabled.
4973 * called only from probe or lpfc_online when interrupts are not
4974 * enabled and the driver is reinitializing the device.
4975 * 5010 *
4976 * Return codes 5011 * Return codes
4977 * 0 - successful 5012 * 0 - successful
4978 * -ENOMEM - No available memory 5013 * -ERROR - otherwise.
4979 * -EIO - The mailbox failed to complete successfully.
4980 **/ 5014 **/
4981int 5015int
4982lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 5016lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
@@ -5687,17 +5721,22 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
5687 break; 5721 break;
5688 case LPFC_SLI_INTF_IF_TYPE_2: 5722 case LPFC_SLI_INTF_IF_TYPE_2:
5689 phba->sli4_hba.u.if_type2.ERR1regaddr = 5723 phba->sli4_hba.u.if_type2.ERR1regaddr =
5690 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_1; 5724 phba->sli4_hba.conf_regs_memmap_p +
5725 LPFC_CTL_PORT_ER1_OFFSET;
5691 phba->sli4_hba.u.if_type2.ERR2regaddr = 5726 phba->sli4_hba.u.if_type2.ERR2regaddr =
5692 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_2; 5727 phba->sli4_hba.conf_regs_memmap_p +
5728 LPFC_CTL_PORT_ER2_OFFSET;
5693 phba->sli4_hba.u.if_type2.CTRLregaddr = 5729 phba->sli4_hba.u.if_type2.CTRLregaddr =
5694 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_CNTRL; 5730 phba->sli4_hba.conf_regs_memmap_p +
5731 LPFC_CTL_PORT_CTL_OFFSET;
5695 phba->sli4_hba.u.if_type2.STATUSregaddr = 5732 phba->sli4_hba.u.if_type2.STATUSregaddr =
5696 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_STATUS; 5733 phba->sli4_hba.conf_regs_memmap_p +
5734 LPFC_CTL_PORT_STA_OFFSET;
5697 phba->sli4_hba.SLIINTFregaddr = 5735 phba->sli4_hba.SLIINTFregaddr =
5698 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 5736 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
5699 phba->sli4_hba.PSMPHRregaddr = 5737 phba->sli4_hba.PSMPHRregaddr =
5700 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_IF2_SMPHR; 5738 phba->sli4_hba.conf_regs_memmap_p +
5739 LPFC_CTL_PORT_SEM_OFFSET;
5701 phba->sli4_hba.RQDBregaddr = 5740 phba->sli4_hba.RQDBregaddr =
5702 phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL; 5741 phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
5703 phba->sli4_hba.WQDBregaddr = 5742 phba->sli4_hba.WQDBregaddr =
@@ -8859,11 +8898,11 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
8859 return -EINVAL; 8898 return -EINVAL;
8860 } 8899 }
8861 lpfc_decode_firmware_rev(phba, fwrev, 1); 8900 lpfc_decode_firmware_rev(phba, fwrev, 1);
8862 if (strncmp(fwrev, image->rev_name, strnlen(fwrev, 16))) { 8901 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
8863 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8864 "3023 Updating Firmware. Current Version:%s " 8903 "3023 Updating Firmware. Current Version:%s "
8865 "New Version:%s\n", 8904 "New Version:%s\n",
8866 fwrev, image->rev_name); 8905 fwrev, image->revision);
8867 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 8906 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
8868 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 8907 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
8869 GFP_KERNEL); 8908 GFP_KERNEL);
@@ -8892,9 +8931,9 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
8892 fw->size - offset); 8931 fw->size - offset);
8893 break; 8932 break;
8894 } 8933 }
8895 temp_offset += SLI4_PAGE_SIZE;
8896 memcpy(dmabuf->virt, fw->data + temp_offset, 8934 memcpy(dmabuf->virt, fw->data + temp_offset,
8897 SLI4_PAGE_SIZE); 8935 SLI4_PAGE_SIZE);
8936 temp_offset += SLI4_PAGE_SIZE;
8898 } 8937 }
8899 rc = lpfc_wr_object(phba, &dma_buffer_list, 8938 rc = lpfc_wr_object(phba, &dma_buffer_list,
8900 (fw->size - offset), &offset); 8939 (fw->size - offset), &offset);
@@ -9005,6 +9044,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9005 } 9044 }
9006 9045
9007 INIT_LIST_HEAD(&phba->active_rrq_list); 9046 INIT_LIST_HEAD(&phba->active_rrq_list);
9047 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
9008 9048
9009 /* Set up common device driver resources */ 9049 /* Set up common device driver resources */
9010 error = lpfc_setup_driver_resource_phase2(phba); 9050 error = lpfc_setup_driver_resource_phase2(phba);
@@ -9112,7 +9152,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9112 9152
9113 /* Check if there are static vports to be created. */ 9153 /* Check if there are static vports to be created. */
9114 lpfc_create_static_vport(phba); 9154 lpfc_create_static_vport(phba);
9115
9116 return 0; 9155 return 0;
9117 9156
9118out_disable_intr: 9157out_disable_intr:
@@ -9483,6 +9522,13 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
9483 } 9522 }
9484 9523
9485 pci_restore_state(pdev); 9524 pci_restore_state(pdev);
9525
9526 /*
9527 * As the new kernel behavior of pci_restore_state() API call clears
9528 * device saved_state flag, need to save the restored state again.
9529 */
9530 pci_save_state(pdev);
9531
9486 if (pdev->is_busmaster) 9532 if (pdev->is_busmaster)
9487 pci_set_master(pdev); 9533 pci_set_master(pdev);
9488 9534
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 556767028353..83450cc5c4d3 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2031,7 +2031,7 @@ lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
2031 bf_set(lpfc_init_vfi_vp, init_vfi, 1); 2031 bf_set(lpfc_init_vfi_vp, init_vfi, 1);
2032 bf_set(lpfc_init_vfi_vfi, init_vfi, 2032 bf_set(lpfc_init_vfi_vfi, init_vfi,
2033 vport->phba->sli4_hba.vfi_ids[vport->vfi]); 2033 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2034 bf_set(lpfc_init_vpi_vpi, init_vfi, 2034 bf_set(lpfc_init_vfi_vpi, init_vfi,
2035 vport->phba->vpi_ids[vport->vpi]); 2035 vport->phba->vpi_ids[vport->vpi]);
2036 bf_set(lpfc_init_vfi_fcfi, init_vfi, 2036 bf_set(lpfc_init_vfi_fcfi, init_vfi,
2037 vport->phba->fcf.fcfi); 2037 vport->phba->fcf.fcfi);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 3ccc97496ebf..eadd241eeff1 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1302,13 +1302,13 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1302 case SCSI_PROT_NORMAL: 1302 case SCSI_PROT_NORMAL:
1303 default: 1303 default:
1304 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1304 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1305 "9063 BLKGRD: Bad op/guard:%d/%d combination\n", 1305 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1306 scsi_get_prot_op(sc), guard_type); 1306 scsi_get_prot_op(sc));
1307 ret = 1; 1307 ret = 1;
1308 break; 1308 break;
1309 1309
1310 } 1310 }
1311 } else if (guard_type == SHOST_DIX_GUARD_CRC) { 1311 } else {
1312 switch (scsi_get_prot_op(sc)) { 1312 switch (scsi_get_prot_op(sc)) {
1313 case SCSI_PROT_READ_STRIP: 1313 case SCSI_PROT_READ_STRIP:
1314 case SCSI_PROT_WRITE_INSERT: 1314 case SCSI_PROT_WRITE_INSERT:
@@ -1324,17 +1324,18 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1324 1324
1325 case SCSI_PROT_READ_INSERT: 1325 case SCSI_PROT_READ_INSERT:
1326 case SCSI_PROT_WRITE_STRIP: 1326 case SCSI_PROT_WRITE_STRIP:
1327 *txop = BG_OP_IN_CRC_OUT_NODIF;
1328 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1329 break;
1330
1327 case SCSI_PROT_NORMAL: 1331 case SCSI_PROT_NORMAL:
1328 default: 1332 default:
1329 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1333 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1330 "9075 BLKGRD: Bad op/guard:%d/%d combination\n", 1334 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1331 scsi_get_prot_op(sc), guard_type); 1335 scsi_get_prot_op(sc));
1332 ret = 1; 1336 ret = 1;
1333 break; 1337 break;
1334 } 1338 }
1335 } else {
1336 /* unsupported format */
1337 BUG();
1338 } 1339 }
1339 1340
1340 return ret; 1341 return ret;
@@ -1352,45 +1353,6 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc)
1352 return sc->device->sector_size; 1353 return sc->device->sector_size;
1353} 1354}
1354 1355
1355/**
1356 * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command
1357 * @sc: in: SCSI command
1358 * @apptagmask: out: app tag mask
1359 * @apptagval: out: app tag value
1360 * @reftag: out: ref tag (reference tag)
1361 *
1362 * Description:
1363 * Extract DIF parameters from the command if possible. Otherwise,
1364 * use default parameters.
1365 *
1366 **/
1367static inline void
1368lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
1369 uint16_t *apptagval, uint32_t *reftag)
1370{
1371 struct scsi_dif_tuple *spt;
1372 unsigned char op = scsi_get_prot_op(sc);
1373 unsigned int protcnt = scsi_prot_sg_count(sc);
1374 static int cnt;
1375
1376 if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
1377 op == SCSI_PROT_WRITE_PASS)) {
1378
1379 cnt++;
1380 spt = page_address(sg_page(scsi_prot_sglist(sc))) +
1381 scsi_prot_sglist(sc)[0].offset;
1382 *apptagmask = 0;
1383 *apptagval = 0;
1384 *reftag = cpu_to_be32(spt->ref_tag);
1385
1386 } else {
1387 /* SBC defines ref tag to be lower 32bits of LBA */
1388 *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc));
1389 *apptagmask = 0;
1390 *apptagval = 0;
1391 }
1392}
1393
1394/* 1356/*
1395 * This function sets up buffer list for protection groups of 1357 * This function sets up buffer list for protection groups of
1396 * type LPFC_PG_TYPE_NO_DIF 1358 * type LPFC_PG_TYPE_NO_DIF
@@ -1427,9 +1389,8 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1427 dma_addr_t physaddr; 1389 dma_addr_t physaddr;
1428 int i = 0, num_bde = 0, status; 1390 int i = 0, num_bde = 0, status;
1429 int datadir = sc->sc_data_direction; 1391 int datadir = sc->sc_data_direction;
1430 unsigned blksize;
1431 uint32_t reftag; 1392 uint32_t reftag;
1432 uint16_t apptagmask, apptagval; 1393 unsigned blksize;
1433 uint8_t txop, rxop; 1394 uint8_t txop, rxop;
1434 1395
1435 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1396 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
@@ -1438,17 +1399,16 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1438 1399
1439 /* extract some info from the scsi command for pde*/ 1400 /* extract some info from the scsi command for pde*/
1440 blksize = lpfc_cmd_blksize(sc); 1401 blksize = lpfc_cmd_blksize(sc);
1441 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag); 1402 reftag = scsi_get_lba(sc) & 0xffffffff;
1442 1403
1443 /* setup PDE5 with what we have */ 1404 /* setup PDE5 with what we have */
1444 pde5 = (struct lpfc_pde5 *) bpl; 1405 pde5 = (struct lpfc_pde5 *) bpl;
1445 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1406 memset(pde5, 0, sizeof(struct lpfc_pde5));
1446 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1407 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1447 pde5->reftag = reftag;
1448 1408
1449 /* Endianness conversion if necessary for PDE5 */ 1409 /* Endianness conversion if necessary for PDE5 */
1450 pde5->word0 = cpu_to_le32(pde5->word0); 1410 pde5->word0 = cpu_to_le32(pde5->word0);
1451 pde5->reftag = cpu_to_le32(pde5->reftag); 1411 pde5->reftag = cpu_to_le32(reftag);
1452 1412
1453 /* advance bpl and increment bde count */ 1413 /* advance bpl and increment bde count */
1454 num_bde++; 1414 num_bde++;
@@ -1463,10 +1423,10 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1463 if (datadir == DMA_FROM_DEVICE) { 1423 if (datadir == DMA_FROM_DEVICE) {
1464 bf_set(pde6_ce, pde6, 1); 1424 bf_set(pde6_ce, pde6, 1);
1465 bf_set(pde6_re, pde6, 1); 1425 bf_set(pde6_re, pde6, 1);
1466 bf_set(pde6_ae, pde6, 1);
1467 } 1426 }
1468 bf_set(pde6_ai, pde6, 1); 1427 bf_set(pde6_ai, pde6, 1);
1469 bf_set(pde6_apptagval, pde6, apptagval); 1428 bf_set(pde6_ae, pde6, 0);
1429 bf_set(pde6_apptagval, pde6, 0);
1470 1430
1471 /* Endianness conversion if necessary for PDE6 */ 1431 /* Endianness conversion if necessary for PDE6 */
1472 pde6->word0 = cpu_to_le32(pde6->word0); 1432 pde6->word0 = cpu_to_le32(pde6->word0);
@@ -1551,7 +1511,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1551 unsigned char pgdone = 0, alldone = 0; 1511 unsigned char pgdone = 0, alldone = 0;
1552 unsigned blksize; 1512 unsigned blksize;
1553 uint32_t reftag; 1513 uint32_t reftag;
1554 uint16_t apptagmask, apptagval;
1555 uint8_t txop, rxop; 1514 uint8_t txop, rxop;
1556 int num_bde = 0; 1515 int num_bde = 0;
1557 1516
@@ -1571,7 +1530,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1571 1530
1572 /* extract some info from the scsi command */ 1531 /* extract some info from the scsi command */
1573 blksize = lpfc_cmd_blksize(sc); 1532 blksize = lpfc_cmd_blksize(sc);
1574 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag); 1533 reftag = scsi_get_lba(sc) & 0xffffffff;
1575 1534
1576 split_offset = 0; 1535 split_offset = 0;
1577 do { 1536 do {
@@ -1579,11 +1538,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1579 pde5 = (struct lpfc_pde5 *) bpl; 1538 pde5 = (struct lpfc_pde5 *) bpl;
1580 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1539 memset(pde5, 0, sizeof(struct lpfc_pde5));
1581 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1540 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1582 pde5->reftag = reftag;
1583 1541
1584 /* Endianness conversion if necessary for PDE5 */ 1542 /* Endianness conversion if necessary for PDE5 */
1585 pde5->word0 = cpu_to_le32(pde5->word0); 1543 pde5->word0 = cpu_to_le32(pde5->word0);
1586 pde5->reftag = cpu_to_le32(pde5->reftag); 1544 pde5->reftag = cpu_to_le32(reftag);
1587 1545
1588 /* advance bpl and increment bde count */ 1546 /* advance bpl and increment bde count */
1589 num_bde++; 1547 num_bde++;
@@ -1597,9 +1555,9 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1597 bf_set(pde6_oprx, pde6, rxop); 1555 bf_set(pde6_oprx, pde6, rxop);
1598 bf_set(pde6_ce, pde6, 1); 1556 bf_set(pde6_ce, pde6, 1);
1599 bf_set(pde6_re, pde6, 1); 1557 bf_set(pde6_re, pde6, 1);
1600 bf_set(pde6_ae, pde6, 1);
1601 bf_set(pde6_ai, pde6, 1); 1558 bf_set(pde6_ai, pde6, 1);
1602 bf_set(pde6_apptagval, pde6, apptagval); 1559 bf_set(pde6_ae, pde6, 0);
1560 bf_set(pde6_apptagval, pde6, 0);
1603 1561
1604 /* Endianness conversion if necessary for PDE6 */ 1562 /* Endianness conversion if necessary for PDE6 */
1605 pde6->word0 = cpu_to_le32(pde6->word0); 1563 pde6->word0 = cpu_to_le32(pde6->word0);
@@ -1621,8 +1579,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1621 memset(pde7, 0, sizeof(struct lpfc_pde7)); 1579 memset(pde7, 0, sizeof(struct lpfc_pde7));
1622 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR); 1580 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
1623 1581
1624 pde7->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr)); 1582 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1625 pde7->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr)); 1583 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1626 1584
1627 protgrp_blks = protgroup_len / 8; 1585 protgrp_blks = protgroup_len / 8;
1628 protgrp_bytes = protgrp_blks * blksize; 1586 protgrp_bytes = protgrp_blks * blksize;
@@ -1632,7 +1590,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1632 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff); 1590 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
1633 protgroup_offset += protgroup_remainder; 1591 protgroup_offset += protgroup_remainder;
1634 protgrp_blks = protgroup_remainder / 8; 1592 protgrp_blks = protgroup_remainder / 8;
1635 protgrp_bytes = protgroup_remainder * blksize; 1593 protgrp_bytes = protgrp_blks * blksize;
1636 } else { 1594 } else {
1637 protgroup_offset = 0; 1595 protgroup_offset = 0;
1638 curr_prot++; 1596 curr_prot++;
@@ -2006,16 +1964,21 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2006 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { 1964 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
2007 /* 1965 /*
2008 * setup sense data descriptor 0 per SPC-4 as an information 1966 * setup sense data descriptor 0 per SPC-4 as an information
2009 * field, and put the failing LBA in it 1967 * field, and put the failing LBA in it.
1968 * This code assumes there was also a guard/app/ref tag error
1969 * indication.
2010 */ 1970 */
2011 cmd->sense_buffer[8] = 0; /* Information */ 1971 cmd->sense_buffer[7] = 0xc; /* Additional sense length */
2012 cmd->sense_buffer[9] = 0xa; /* Add. length */ 1972 cmd->sense_buffer[8] = 0; /* Information descriptor type */
1973 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
1974 cmd->sense_buffer[10] = 0x80; /* Validity bit */
2013 bghm /= cmd->device->sector_size; 1975 bghm /= cmd->device->sector_size;
2014 1976
2015 failing_sector = scsi_get_lba(cmd); 1977 failing_sector = scsi_get_lba(cmd);
2016 failing_sector += bghm; 1978 failing_sector += bghm;
2017 1979
2018 put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]); 1980 /* Descriptor Information */
1981 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
2019 } 1982 }
2020 1983
2021 if (!ret) { 1984 if (!ret) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 98999bbd8cbf..8b799f047a99 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -560,7 +560,7 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
560 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 560 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
561 if (rrq) { 561 if (rrq) {
562 rrq->send_rrq = send_rrq; 562 rrq->send_rrq = send_rrq;
563 rrq->xritag = phba->sli4_hba.xri_ids[xritag]; 563 rrq->xritag = xritag;
564 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); 564 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
565 rrq->ndlp = ndlp; 565 rrq->ndlp = ndlp;
566 rrq->nlp_DID = ndlp->nlp_DID; 566 rrq->nlp_DID = ndlp->nlp_DID;
@@ -2452,7 +2452,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2452 2452
2453 /* search continue save q for same XRI */ 2453 /* search continue save q for same XRI */
2454 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 2454 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2455 if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) { 2455 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2456 saveq->iocb.unsli3.rcvsli3.ox_id) {
2456 list_add_tail(&saveq->list, &iocbq->list); 2457 list_add_tail(&saveq->list, &iocbq->list);
2457 found = 1; 2458 found = 1;
2458 break; 2459 break;
@@ -3355,6 +3356,7 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3355 irspiocbq); 3356 irspiocbq);
3356 break; 3357 break;
3357 case CQE_CODE_RECEIVE: 3358 case CQE_CODE_RECEIVE:
3359 case CQE_CODE_RECEIVE_V1:
3358 dmabuf = container_of(cq_event, struct hbq_dmabuf, 3360 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3359 cq_event); 3361 cq_event);
3360 lpfc_sli4_handle_received_buffer(phba, dmabuf); 3362 lpfc_sli4_handle_received_buffer(phba, dmabuf);
@@ -4712,10 +4714,15 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4712 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. 4714 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
4713 * @phba: Pointer to HBA context object. 4715 * @phba: Pointer to HBA context object.
4714 * @type: The resource extent type. 4716 * @type: The resource extent type.
4717 * @extnt_count: buffer to hold port available extent count.
4718 * @extnt_size: buffer to hold element count per extent.
4715 * 4719 *
4716 * This function allocates all SLI4 resource identifiers. 4720 * This function calls the port and retrievs the number of available
4721 * extents and their size for a particular extent type.
4722 *
4723 * Returns: 0 if successful. Nonzero otherwise.
4717 **/ 4724 **/
4718static int 4725int
4719lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, 4726lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
4720 uint16_t *extnt_count, uint16_t *extnt_size) 4727 uint16_t *extnt_count, uint16_t *extnt_size)
4721{ 4728{
@@ -4892,7 +4899,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
4892 req_len, *emb); 4899 req_len, *emb);
4893 if (alloc_len < req_len) { 4900 if (alloc_len < req_len) {
4894 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4901 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4895 "9000 Allocated DMA memory size (x%x) is " 4902 "2982 Allocated DMA memory size (x%x) is "
4896 "less than the requested DMA memory " 4903 "less than the requested DMA memory "
4897 "size (x%x)\n", alloc_len, req_len); 4904 "size (x%x)\n", alloc_len, req_len);
4898 return -ENOMEM; 4905 return -ENOMEM;
@@ -5506,6 +5513,154 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5506} 5513}
5507 5514
5508/** 5515/**
5516 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
5517 * @phba: Pointer to HBA context object.
5518 * @type: The resource extent type.
5519 * @extnt_count: buffer to hold port extent count response
5520 * @extnt_size: buffer to hold port extent size response.
5521 *
5522 * This function calls the port to read the host allocated extents
5523 * for a particular type.
5524 **/
5525int
5526lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
5527 uint16_t *extnt_cnt, uint16_t *extnt_size)
5528{
5529 bool emb;
5530 int rc = 0;
5531 uint16_t curr_blks = 0;
5532 uint32_t req_len, emb_len;
5533 uint32_t alloc_len, mbox_tmo;
5534 struct list_head *blk_list_head;
5535 struct lpfc_rsrc_blks *rsrc_blk;
5536 LPFC_MBOXQ_t *mbox;
5537 void *virtaddr = NULL;
5538 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5539 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5540 union lpfc_sli4_cfg_shdr *shdr;
5541
5542 switch (type) {
5543 case LPFC_RSC_TYPE_FCOE_VPI:
5544 blk_list_head = &phba->lpfc_vpi_blk_list;
5545 break;
5546 case LPFC_RSC_TYPE_FCOE_XRI:
5547 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
5548 break;
5549 case LPFC_RSC_TYPE_FCOE_VFI:
5550 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
5551 break;
5552 case LPFC_RSC_TYPE_FCOE_RPI:
5553 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
5554 break;
5555 default:
5556 return -EIO;
5557 }
5558
5559 /* Count the number of extents currently allocatd for this type. */
5560 list_for_each_entry(rsrc_blk, blk_list_head, list) {
5561 if (curr_blks == 0) {
5562 /*
5563 * The GET_ALLOCATED mailbox does not return the size,
5564 * just the count. The size should be just the size
5565 * stored in the current allocated block and all sizes
5566 * for an extent type are the same so set the return
5567 * value now.
5568 */
5569 *extnt_size = rsrc_blk->rsrc_size;
5570 }
5571 curr_blks++;
5572 }
5573
5574 /* Calculate the total requested length of the dma memory. */
5575 req_len = curr_blks * sizeof(uint16_t);
5576
5577 /*
5578 * Calculate the size of an embedded mailbox. The uint32_t
5579 * accounts for extents-specific word.
5580 */
5581 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5582 sizeof(uint32_t);
5583
5584 /*
5585 * Presume the allocation and response will fit into an embedded
5586 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5587 */
5588 emb = LPFC_SLI4_MBX_EMBED;
5589 req_len = emb_len;
5590 if (req_len > emb_len) {
5591 req_len = curr_blks * sizeof(uint16_t) +
5592 sizeof(union lpfc_sli4_cfg_shdr) +
5593 sizeof(uint32_t);
5594 emb = LPFC_SLI4_MBX_NEMBED;
5595 }
5596
5597 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5598 if (!mbox)
5599 return -ENOMEM;
5600 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
5601
5602 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5603 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
5604 req_len, emb);
5605 if (alloc_len < req_len) {
5606 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5607 "2983 Allocated DMA memory size (x%x) is "
5608 "less than the requested DMA memory "
5609 "size (x%x)\n", alloc_len, req_len);
5610 rc = -ENOMEM;
5611 goto err_exit;
5612 }
5613 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
5614 if (unlikely(rc)) {
5615 rc = -EIO;
5616 goto err_exit;
5617 }
5618
5619 if (!phba->sli4_hba.intr_enable)
5620 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5621 else {
5622 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5623 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5624 }
5625
5626 if (unlikely(rc)) {
5627 rc = -EIO;
5628 goto err_exit;
5629 }
5630
5631 /*
5632 * Figure out where the response is located. Then get local pointers
5633 * to the response data. The port does not guarantee to respond to
5634 * all extents counts request so update the local variable with the
5635 * allocated count from the port.
5636 */
5637 if (emb == LPFC_SLI4_MBX_EMBED) {
5638 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5639 shdr = &rsrc_ext->header.cfg_shdr;
5640 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5641 } else {
5642 virtaddr = mbox->sge_array->addr[0];
5643 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5644 shdr = &n_rsrc->cfg_shdr;
5645 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5646 }
5647
5648 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
5649 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5650 "2984 Failed to read allocated resources "
5651 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
5652 type,
5653 bf_get(lpfc_mbox_hdr_status, &shdr->response),
5654 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
5655 rc = -EIO;
5656 goto err_exit;
5657 }
5658 err_exit:
5659 lpfc_sli4_mbox_cmd_free(phba, mbox);
5660 return rc;
5661}
5662
5663/**
5509 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function 5664 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
5510 * @phba: Pointer to HBA context object. 5665 * @phba: Pointer to HBA context object.
5511 * 5666 *
@@ -5837,6 +5992,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
5837 "Advanced Error Reporting (AER)\n"); 5992 "Advanced Error Reporting (AER)\n");
5838 phba->cfg_aer_support = 0; 5993 phba->cfg_aer_support = 0;
5839 } 5994 }
5995 rc = 0;
5840 } 5996 }
5841 5997
5842 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 5998 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
@@ -6634,6 +6790,9 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
6634 unsigned long iflags; 6790 unsigned long iflags;
6635 int rc; 6791 int rc;
6636 6792
6793 /* dump from issue mailbox command if setup */
6794 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
6795
6637 rc = lpfc_mbox_dev_check(phba); 6796 rc = lpfc_mbox_dev_check(phba);
6638 if (unlikely(rc)) { 6797 if (unlikely(rc)) {
6639 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6798 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -7318,12 +7477,12 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7318 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 7477 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
7319 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 7478 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
7320 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 7479 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
7321 break; 7480 break;
7322 case CMD_XMIT_SEQUENCE64_CX: 7481 case CMD_XMIT_SEQUENCE64_CX:
7323 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 7482 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
7324 iocbq->iocb.un.ulpWord[3]); 7483 iocbq->iocb.un.ulpWord[3]);
7325 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, 7484 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
7326 iocbq->iocb.ulpContext); 7485 iocbq->iocb.unsli3.rcvsli3.ox_id);
7327 /* The entire sequence is transmitted for this IOCB */ 7486 /* The entire sequence is transmitted for this IOCB */
7328 xmit_len = total_len; 7487 xmit_len = total_len;
7329 cmnd = CMD_XMIT_SEQUENCE64_CR; 7488 cmnd = CMD_XMIT_SEQUENCE64_CR;
@@ -7341,7 +7500,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7341 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 7500 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
7342 wqe->xmit_sequence.xmit_len = xmit_len; 7501 wqe->xmit_sequence.xmit_len = xmit_len;
7343 command_type = OTHER_COMMAND; 7502 command_type = OTHER_COMMAND;
7344 break; 7503 break;
7345 case CMD_XMIT_BCAST64_CN: 7504 case CMD_XMIT_BCAST64_CN:
7346 /* word3 iocb=iotag32 wqe=seq_payload_len */ 7505 /* word3 iocb=iotag32 wqe=seq_payload_len */
7347 wqe->xmit_bcast64.seq_payload_len = xmit_len; 7506 wqe->xmit_bcast64.seq_payload_len = xmit_len;
@@ -7355,7 +7514,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7355 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, 7514 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
7356 LPFC_WQE_LENLOC_WORD3); 7515 LPFC_WQE_LENLOC_WORD3);
7357 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); 7516 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
7358 break; 7517 break;
7359 case CMD_FCP_IWRITE64_CR: 7518 case CMD_FCP_IWRITE64_CR:
7360 command_type = FCP_COMMAND_DATA_OUT; 7519 command_type = FCP_COMMAND_DATA_OUT;
7361 /* word3 iocb=iotag wqe=payload_offset_len */ 7520 /* word3 iocb=iotag wqe=payload_offset_len */
@@ -7375,7 +7534,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7375 LPFC_WQE_LENLOC_WORD4); 7534 LPFC_WQE_LENLOC_WORD4);
7376 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); 7535 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
7377 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 7536 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
7378 break; 7537 break;
7379 case CMD_FCP_IREAD64_CR: 7538 case CMD_FCP_IREAD64_CR:
7380 /* word3 iocb=iotag wqe=payload_offset_len */ 7539 /* word3 iocb=iotag wqe=payload_offset_len */
7381 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 7540 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
@@ -7394,7 +7553,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7394 LPFC_WQE_LENLOC_WORD4); 7553 LPFC_WQE_LENLOC_WORD4);
7395 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); 7554 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
7396 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 7555 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
7397 break; 7556 break;
7398 case CMD_FCP_ICMND64_CR: 7557 case CMD_FCP_ICMND64_CR:
7399 /* word3 iocb=IO_TAG wqe=reserved */ 7558 /* word3 iocb=IO_TAG wqe=reserved */
7400 wqe->fcp_icmd.rsrvd3 = 0; 7559 wqe->fcp_icmd.rsrvd3 = 0;
@@ -7407,7 +7566,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7407 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 7566 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
7408 LPFC_WQE_LENLOC_NONE); 7567 LPFC_WQE_LENLOC_NONE);
7409 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); 7568 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
7410 break; 7569 break;
7411 case CMD_GEN_REQUEST64_CR: 7570 case CMD_GEN_REQUEST64_CR:
7412 /* For this command calculate the xmit length of the 7571 /* For this command calculate the xmit length of the
7413 * request bde. 7572 * request bde.
@@ -7442,7 +7601,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7442 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 7601 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
7443 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 7602 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
7444 command_type = OTHER_COMMAND; 7603 command_type = OTHER_COMMAND;
7445 break; 7604 break;
7446 case CMD_XMIT_ELS_RSP64_CX: 7605 case CMD_XMIT_ELS_RSP64_CX:
7447 ndlp = (struct lpfc_nodelist *)iocbq->context1; 7606 ndlp = (struct lpfc_nodelist *)iocbq->context1;
7448 /* words0-2 BDE memcpy */ 7607 /* words0-2 BDE memcpy */
@@ -7457,7 +7616,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7457 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 7616 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
7458 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); 7617 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
7459 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7618 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7460 iocbq->iocb.ulpContext); 7619 iocbq->iocb.unsli3.rcvsli3.ox_id);
7461 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 7620 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
7462 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 7621 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
7463 phba->vpi_ids[iocbq->vport->vpi]); 7622 phba->vpi_ids[iocbq->vport->vpi]);
@@ -7470,7 +7629,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7470 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, 7629 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
7471 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 7630 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
7472 command_type = OTHER_COMMAND; 7631 command_type = OTHER_COMMAND;
7473 break; 7632 break;
7474 case CMD_CLOSE_XRI_CN: 7633 case CMD_CLOSE_XRI_CN:
7475 case CMD_ABORT_XRI_CN: 7634 case CMD_ABORT_XRI_CN:
7476 case CMD_ABORT_XRI_CX: 7635 case CMD_ABORT_XRI_CX:
@@ -7509,7 +7668,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7509 cmnd = CMD_ABORT_XRI_CX; 7668 cmnd = CMD_ABORT_XRI_CX;
7510 command_type = OTHER_COMMAND; 7669 command_type = OTHER_COMMAND;
7511 xritag = 0; 7670 xritag = 0;
7512 break; 7671 break;
7513 case CMD_XMIT_BLS_RSP64_CX: 7672 case CMD_XMIT_BLS_RSP64_CX:
7514 /* As BLS ABTS RSP WQE is very different from other WQEs, 7673 /* As BLS ABTS RSP WQE is very different from other WQEs,
7515 * we re-construct this WQE here based on information in 7674 * we re-construct this WQE here based on information in
@@ -7553,7 +7712,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7553 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); 7712 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
7554 } 7713 }
7555 7714
7556 break; 7715 break;
7557 case CMD_XRI_ABORTED_CX: 7716 case CMD_XRI_ABORTED_CX:
7558 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 7717 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
7559 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 7718 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
@@ -7565,7 +7724,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7565 "2014 Invalid command 0x%x\n", 7724 "2014 Invalid command 0x%x\n",
7566 iocbq->iocb.ulpCommand); 7725 iocbq->iocb.ulpCommand);
7567 return IOCB_ERROR; 7726 return IOCB_ERROR;
7568 break; 7727 break;
7569 } 7728 }
7570 7729
7571 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 7730 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
@@ -10481,10 +10640,14 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
10481 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 10640 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
10482 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 10641 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
10483 struct hbq_dmabuf *dma_buf; 10642 struct hbq_dmabuf *dma_buf;
10484 uint32_t status; 10643 uint32_t status, rq_id;
10485 unsigned long iflags; 10644 unsigned long iflags;
10486 10645
10487 if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id) 10646 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
10647 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
10648 else
10649 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
10650 if (rq_id != hrq->queue_id)
10488 goto out; 10651 goto out;
10489 10652
10490 status = bf_get(lpfc_rcqe_status, rcqe); 10653 status = bf_get(lpfc_rcqe_status, rcqe);
@@ -10563,6 +10726,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
10563 (struct sli4_wcqe_xri_aborted *)&cqevt); 10726 (struct sli4_wcqe_xri_aborted *)&cqevt);
10564 break; 10727 break;
10565 case CQE_CODE_RECEIVE: 10728 case CQE_CODE_RECEIVE:
10729 case CQE_CODE_RECEIVE_V1:
10566 /* Process the RQ event */ 10730 /* Process the RQ event */
10567 phba->last_completion_time = jiffies; 10731 phba->last_completion_time = jiffies;
10568 workposted = lpfc_sli4_sp_handle_rcqe(phba, 10732 workposted = lpfc_sli4_sp_handle_rcqe(phba,
@@ -12345,19 +12509,18 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
12345} 12509}
12346 12510
12347/** 12511/**
12348 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 12512 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
12349 * @phba: pointer to lpfc hba data structure. 12513 * @phba: pointer to lpfc hba data structure.
12350 * 12514 *
12351 * This routine is invoked to post rpi header templates to the 12515 * This routine is invoked to post rpi header templates to the
12352 * port for those SLI4 ports that do not support extents. This routine 12516 * HBA consistent with the SLI-4 interface spec. This routine
12353 * posts a PAGE_SIZE memory region to the port to hold up to 12517 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
12354 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 12518 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
12355 * and should be called only when interrupts are disabled.
12356 * 12519 *
12357 * Return codes 12520 * Returns
12358 * 0 - successful 12521 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
12359 * -ERROR - otherwise. 12522 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
12360 */ 12523 **/
12361uint16_t 12524uint16_t
12362lpfc_sli4_alloc_xri(struct lpfc_hba *phba) 12525lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
12363{ 12526{
@@ -13406,7 +13569,7 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
13406 * This function validates the xri maps to the known range of XRIs allocated an 13569 * This function validates the xri maps to the known range of XRIs allocated an
13407 * used by the driver. 13570 * used by the driver.
13408 **/ 13571 **/
13409static uint16_t 13572uint16_t
13410lpfc_sli4_xri_inrange(struct lpfc_hba *phba, 13573lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
13411 uint16_t xri) 13574 uint16_t xri)
13412{ 13575{
@@ -13643,10 +13806,12 @@ lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
13643static struct lpfc_iocbq * 13806static struct lpfc_iocbq *
13644lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 13807lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
13645{ 13808{
13809 struct hbq_dmabuf *hbq_buf;
13646 struct lpfc_dmabuf *d_buf, *n_buf; 13810 struct lpfc_dmabuf *d_buf, *n_buf;
13647 struct lpfc_iocbq *first_iocbq, *iocbq; 13811 struct lpfc_iocbq *first_iocbq, *iocbq;
13648 struct fc_frame_header *fc_hdr; 13812 struct fc_frame_header *fc_hdr;
13649 uint32_t sid; 13813 uint32_t sid;
13814 uint32_t len, tot_len;
13650 struct ulp_bde64 *pbde; 13815 struct ulp_bde64 *pbde;
13651 13816
13652 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 13817 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
@@ -13655,6 +13820,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
13655 lpfc_update_rcv_time_stamp(vport); 13820 lpfc_update_rcv_time_stamp(vport);
13656 /* get the Remote Port's SID */ 13821 /* get the Remote Port's SID */
13657 sid = sli4_sid_from_fc_hdr(fc_hdr); 13822 sid = sli4_sid_from_fc_hdr(fc_hdr);
13823 tot_len = 0;
13658 /* Get an iocbq struct to fill in. */ 13824 /* Get an iocbq struct to fill in. */
13659 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 13825 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
13660 if (first_iocbq) { 13826 if (first_iocbq) {
@@ -13662,9 +13828,12 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
13662 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; 13828 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
13663 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 13829 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
13664 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 13830 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
13665 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id); 13831 first_iocbq->iocb.ulpContext = NO_XRI;
13666 /* iocbq is prepped for internal consumption. Logical vpi. */ 13832 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
13667 first_iocbq->iocb.unsli3.rcvsli3.vpi = vport->vpi; 13833 be16_to_cpu(fc_hdr->fh_ox_id);
13834 /* iocbq is prepped for internal consumption. Physical vpi. */
13835 first_iocbq->iocb.unsli3.rcvsli3.vpi =
13836 vport->phba->vpi_ids[vport->vpi];
13668 /* put the first buffer into the first IOCBq */ 13837 /* put the first buffer into the first IOCBq */
13669 first_iocbq->context2 = &seq_dmabuf->dbuf; 13838 first_iocbq->context2 = &seq_dmabuf->dbuf;
13670 first_iocbq->context3 = NULL; 13839 first_iocbq->context3 = NULL;
@@ -13672,9 +13841,9 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
13672 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 13841 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
13673 LPFC_DATA_BUF_SIZE; 13842 LPFC_DATA_BUF_SIZE;
13674 first_iocbq->iocb.un.rcvels.remoteID = sid; 13843 first_iocbq->iocb.un.rcvels.remoteID = sid;
13675 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 13844 tot_len = bf_get(lpfc_rcqe_length,
13676 bf_get(lpfc_rcqe_length,
13677 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 13845 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
13846 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
13678 } 13847 }
13679 iocbq = first_iocbq; 13848 iocbq = first_iocbq;
13680 /* 13849 /*
@@ -13692,9 +13861,13 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
13692 pbde = (struct ulp_bde64 *) 13861 pbde = (struct ulp_bde64 *)
13693 &iocbq->iocb.unsli3.sli3Words[4]; 13862 &iocbq->iocb.unsli3.sli3Words[4];
13694 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; 13863 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
13695 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 13864
13696 bf_get(lpfc_rcqe_length, 13865 /* We need to get the size out of the right CQE */
13697 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 13866 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
13867 len = bf_get(lpfc_rcqe_length,
13868 &hbq_buf->cq_event.cqe.rcqe_cmpl);
13869 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
13870 tot_len += len;
13698 } else { 13871 } else {
13699 iocbq = lpfc_sli_get_iocbq(vport->phba); 13872 iocbq = lpfc_sli_get_iocbq(vport->phba);
13700 if (!iocbq) { 13873 if (!iocbq) {
@@ -13712,9 +13885,14 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
13712 iocbq->iocb.ulpBdeCount = 1; 13885 iocbq->iocb.ulpBdeCount = 1;
13713 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 13886 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
13714 LPFC_DATA_BUF_SIZE; 13887 LPFC_DATA_BUF_SIZE;
13715 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 13888
13716 bf_get(lpfc_rcqe_length, 13889 /* We need to get the size out of the right CQE */
13717 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 13890 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
13891 len = bf_get(lpfc_rcqe_length,
13892 &hbq_buf->cq_event.cqe.rcqe_cmpl);
13893 tot_len += len;
13894 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
13895
13718 iocbq->iocb.un.rcvels.remoteID = sid; 13896 iocbq->iocb.un.rcvels.remoteID = sid;
13719 list_add_tail(&iocbq->list, &first_iocbq->list); 13897 list_add_tail(&iocbq->list, &first_iocbq->list);
13720 } 13898 }
@@ -13787,7 +13965,13 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
13787 lpfc_in_buf_free(phba, &dmabuf->dbuf); 13965 lpfc_in_buf_free(phba, &dmabuf->dbuf);
13788 return; 13966 return;
13789 } 13967 }
13790 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl); 13968 if ((bf_get(lpfc_cqe_code,
13969 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
13970 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
13971 &dmabuf->cq_event.cqe.rcqe_cmpl);
13972 else
13973 fcfi = bf_get(lpfc_rcqe_fcf_id,
13974 &dmabuf->cq_event.cqe.rcqe_cmpl);
13791 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); 13975 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
13792 if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) { 13976 if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) {
13793 /* throw out the frame */ 13977 /* throw out the frame */
@@ -14451,6 +14635,92 @@ fail_fcf_read:
14451} 14635}
14452 14636
14453/** 14637/**
14638 * lpfc_check_next_fcf_pri
14639 * phba pointer to the lpfc_hba struct for this port.
14640 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
14641 * routine when the rr_bmask is empty. The FCF indecies are put into the
14642 * rr_bmask based on their priority level. Starting from the highest priority
14643 * to the lowest. The most likely FCF candidate will be in the highest
14644 * priority group. When this routine is called it searches the fcf_pri list for
14645 * next lowest priority group and repopulates the rr_bmask with only those
14646 * fcf_indexes.
14647 * returns:
14648 * 1=success 0=failure
14649 **/
14650int
14651lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
14652{
14653 uint16_t next_fcf_pri;
14654 uint16_t last_index;
14655 struct lpfc_fcf_pri *fcf_pri;
14656 int rc;
14657 int ret = 0;
14658
14659 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
14660 LPFC_SLI4_FCF_TBL_INDX_MAX);
14661 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
14662 "3060 Last IDX %d\n", last_index);
14663 if (list_empty(&phba->fcf.fcf_pri_list)) {
14664 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
14665 "3061 Last IDX %d\n", last_index);
14666 return 0; /* Empty rr list */
14667 }
14668 next_fcf_pri = 0;
14669 /*
14670 * Clear the rr_bmask and set all of the bits that are at this
14671 * priority.
14672 */
14673 memset(phba->fcf.fcf_rr_bmask, 0,
14674 sizeof(*phba->fcf.fcf_rr_bmask));
14675 spin_lock_irq(&phba->hbalock);
14676 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
14677 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
14678 continue;
14679 /*
14680 * the 1st priority that has not FLOGI failed
14681 * will be the highest.
14682 */
14683 if (!next_fcf_pri)
14684 next_fcf_pri = fcf_pri->fcf_rec.priority;
14685 spin_unlock_irq(&phba->hbalock);
14686 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
14687 rc = lpfc_sli4_fcf_rr_index_set(phba,
14688 fcf_pri->fcf_rec.fcf_index);
14689 if (rc)
14690 return 0;
14691 }
14692 spin_lock_irq(&phba->hbalock);
14693 }
14694 /*
14695 * if next_fcf_pri was not set above and the list is not empty then
14696 * we have failed flogis on all of them. So reset flogi failed
14697 * and start at the begining.
14698 */
14699 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
14700 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
14701 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
14702 /*
14703 * the 1st priority that has not FLOGI failed
14704 * will be the highest.
14705 */
14706 if (!next_fcf_pri)
14707 next_fcf_pri = fcf_pri->fcf_rec.priority;
14708 spin_unlock_irq(&phba->hbalock);
14709 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
14710 rc = lpfc_sli4_fcf_rr_index_set(phba,
14711 fcf_pri->fcf_rec.fcf_index);
14712 if (rc)
14713 return 0;
14714 }
14715 spin_lock_irq(&phba->hbalock);
14716 }
14717 } else
14718 ret = 1;
14719 spin_unlock_irq(&phba->hbalock);
14720
14721 return ret;
14722}
14723/**
14454 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 14724 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
14455 * @phba: pointer to lpfc hba data structure. 14725 * @phba: pointer to lpfc hba data structure.
14456 * 14726 *
@@ -14466,6 +14736,7 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
14466 uint16_t next_fcf_index; 14736 uint16_t next_fcf_index;
14467 14737
14468 /* Search start from next bit of currently registered FCF index */ 14738 /* Search start from next bit of currently registered FCF index */
14739next_priority:
14469 next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) % 14740 next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) %
14470 LPFC_SLI4_FCF_TBL_INDX_MAX; 14741 LPFC_SLI4_FCF_TBL_INDX_MAX;
14471 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 14742 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
@@ -14473,17 +14744,46 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
14473 next_fcf_index); 14744 next_fcf_index);
14474 14745
14475 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 14746 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
14476 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 14747 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
14748 /*
14749 * If we have wrapped then we need to clear the bits that
14750 * have been tested so that we can detect when we should
14751 * change the priority level.
14752 */
14477 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 14753 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
14478 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 14754 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
14755 }
14756
14479 14757
14480 /* Check roundrobin failover list empty condition */ 14758 /* Check roundrobin failover list empty condition */
14481 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 14759 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
14760 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
14761 /*
14762 * If next fcf index is not found check if there are lower
14763 * Priority level fcf's in the fcf_priority list.
14764 * Set up the rr_bmask with all of the avaiable fcf bits
14765 * at that level and continue the selection process.
14766 */
14767 if (lpfc_check_next_fcf_pri_level(phba))
14768 goto next_priority;
14482 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 14769 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
14483 "2844 No roundrobin failover FCF available\n"); 14770 "2844 No roundrobin failover FCF available\n");
14484 return LPFC_FCOE_FCF_NEXT_NONE; 14771 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
14772 return LPFC_FCOE_FCF_NEXT_NONE;
14773 else {
14774 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
14775 "3063 Only FCF available idx %d, flag %x\n",
14776 next_fcf_index,
14777 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
14778 return next_fcf_index;
14779 }
14485 } 14780 }
14486 14781
14782 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
14783 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
14784 LPFC_FCF_FLOGI_FAILED)
14785 goto next_priority;
14786
14487 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 14787 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
14488 "2845 Get next roundrobin failover FCF (x%x)\n", 14788 "2845 Get next roundrobin failover FCF (x%x)\n",
14489 next_fcf_index); 14789 next_fcf_index);
@@ -14535,6 +14835,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
14535void 14835void
14536lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 14836lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
14537{ 14837{
14838 struct lpfc_fcf_pri *fcf_pri;
14538 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 14839 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
14539 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 14840 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
14540 "2762 FCF (x%x) reached driver's book " 14841 "2762 FCF (x%x) reached driver's book "
@@ -14543,6 +14844,14 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
14543 return; 14844 return;
14544 } 14845 }
14545 /* Clear the eligible FCF record index bmask */ 14846 /* Clear the eligible FCF record index bmask */
14847 spin_lock_irq(&phba->hbalock);
14848 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
14849 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
14850 list_del_init(&fcf_pri->list);
14851 break;
14852 }
14853 }
14854 spin_unlock_irq(&phba->hbalock);
14546 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 14855 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
14547 14856
14548 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 14857 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 4b1703554a26..19bb87ae8597 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -81,6 +81,8 @@
81 (fc_hdr)->fh_f_ctl[1] << 8 | \ 81 (fc_hdr)->fh_f_ctl[1] << 8 | \
82 (fc_hdr)->fh_f_ctl[2]) 82 (fc_hdr)->fh_f_ctl[2])
83 83
84#define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000
85
84enum lpfc_sli4_queue_type { 86enum lpfc_sli4_queue_type {
85 LPFC_EQ, 87 LPFC_EQ,
86 LPFC_GCQ, 88 LPFC_GCQ,
@@ -157,6 +159,25 @@ struct lpfc_fcf_rec {
157#define RECORD_VALID 0x02 159#define RECORD_VALID 0x02
158}; 160};
159 161
162struct lpfc_fcf_pri_rec {
163 uint16_t fcf_index;
164#define LPFC_FCF_ON_PRI_LIST 0x0001
165#define LPFC_FCF_FLOGI_FAILED 0x0002
166 uint16_t flag;
167 uint32_t priority;
168};
169
170struct lpfc_fcf_pri {
171 struct list_head list;
172 struct lpfc_fcf_pri_rec fcf_rec;
173};
174
175/*
176 * Maximum FCF table index, it is for driver internal book keeping, it
177 * just needs to be no less than the supported HBA's FCF table size.
178 */
179#define LPFC_SLI4_FCF_TBL_INDX_MAX 32
180
160struct lpfc_fcf { 181struct lpfc_fcf {
161 uint16_t fcfi; 182 uint16_t fcfi;
162 uint32_t fcf_flag; 183 uint32_t fcf_flag;
@@ -176,15 +197,13 @@ struct lpfc_fcf {
176 uint32_t eligible_fcf_cnt; 197 uint32_t eligible_fcf_cnt;
177 struct lpfc_fcf_rec current_rec; 198 struct lpfc_fcf_rec current_rec;
178 struct lpfc_fcf_rec failover_rec; 199 struct lpfc_fcf_rec failover_rec;
200 struct list_head fcf_pri_list;
201 struct lpfc_fcf_pri fcf_pri[LPFC_SLI4_FCF_TBL_INDX_MAX];
202 uint32_t current_fcf_scan_pri;
179 struct timer_list redisc_wait; 203 struct timer_list redisc_wait;
180 unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */ 204 unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */
181}; 205};
182 206
183/*
184 * Maximum FCF table index, it is for driver internal book keeping, it
185 * just needs to be no less than the supported HBA's FCF table size.
186 */
187#define LPFC_SLI4_FCF_TBL_INDX_MAX 32
188 207
189#define LPFC_REGION23_SIGNATURE "RG23" 208#define LPFC_REGION23_SIGNATURE "RG23"
190#define LPFC_REGION23_VERSION 1 209#define LPFC_REGION23_VERSION 1
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c03921b1232c..c1e0ae94d9f4 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.23" 21#define LPFC_DRIVER_VERSION "8.3.25"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 7370c084b178..3948a00d81f4 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
33/* 33/*
34 * MegaRAID SAS Driver meta data 34 * MegaRAID SAS Driver meta data
35 */ 35 */
36#define MEGASAS_VERSION "00.00.05.38-rc1" 36#define MEGASAS_VERSION "00.00.05.40-rc1"
37#define MEGASAS_RELDATE "May. 11, 2011" 37#define MEGASAS_RELDATE "Jul. 26, 2011"
38#define MEGASAS_EXT_VERSION "Wed. May. 11 17:00:00 PDT 2011" 38#define MEGASAS_EXT_VERSION "Tue. Jul. 26 17:00:00 PDT 2011"
39 39
40/* 40/*
41 * Device IDs 41 * Device IDs
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 2d8cdce7b2f5..776d01988660 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_base.c 20 * FILE: megaraid_sas_base.c
21 * Version : v00.00.05.38-rc1 21 * Version : v00.00.05.40-rc1
22 * 22 *
23 * Authors: LSI Corporation 23 * Authors: LSI Corporation
24 * Sreenivas Bagalkote 24 * Sreenivas Bagalkote
@@ -54,6 +54,7 @@
54#include <scsi/scsi_cmnd.h> 54#include <scsi/scsi_cmnd.h>
55#include <scsi/scsi_device.h> 55#include <scsi/scsi_device.h>
56#include <scsi/scsi_host.h> 56#include <scsi/scsi_host.h>
57#include <scsi/scsi_tcq.h>
57#include "megaraid_sas_fusion.h" 58#include "megaraid_sas_fusion.h"
58#include "megaraid_sas.h" 59#include "megaraid_sas.h"
59 60
@@ -2057,6 +2058,20 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2057 } 2058 }
2058} 2059}
2059 2060
2061static int megasas_change_queue_depth(struct scsi_device *sdev,
2062 int queue_depth, int reason)
2063{
2064 if (reason != SCSI_QDEPTH_DEFAULT)
2065 return -EOPNOTSUPP;
2066
2067 if (queue_depth > sdev->host->can_queue)
2068 queue_depth = sdev->host->can_queue;
2069 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev),
2070 queue_depth);
2071
2072 return queue_depth;
2073}
2074
2060/* 2075/*
2061 * Scsi host template for megaraid_sas driver 2076 * Scsi host template for megaraid_sas driver
2062 */ 2077 */
@@ -2074,6 +2089,7 @@ static struct scsi_host_template megasas_template = {
2074 .eh_timed_out = megasas_reset_timer, 2089 .eh_timed_out = megasas_reset_timer,
2075 .bios_param = megasas_bios_param, 2090 .bios_param = megasas_bios_param,
2076 .use_clustering = ENABLE_CLUSTERING, 2091 .use_clustering = ENABLE_CLUSTERING,
2092 .change_queue_depth = megasas_change_queue_depth,
2077}; 2093};
2078 2094
2079/** 2095/**
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 8fe3a45794fc..5a5af1fe7581 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -288,7 +288,6 @@ u8 MR_GetPhyParams(u32 ld, u64 stripRow, u16 stripRef, u64 *pdBlock,
288 /* Get dev handle from Pd */ 288 /* Get dev handle from Pd */
289 *pDevHandle = MR_PdDevHandleGet(pd, map); 289 *pDevHandle = MR_PdDevHandleGet(pd, map);
290 } 290 }
291 retval = FALSE;
292 } 291 }
293 292
294 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; 293 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 939f283d0c28..6abd2fcc43e2 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -4258,6 +4258,7 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4258 u32 log_info; 4258 u32 log_info;
4259 struct MPT2SAS_DEVICE *sas_device_priv_data; 4259 struct MPT2SAS_DEVICE *sas_device_priv_data;
4260 u32 response_code = 0; 4260 u32 response_code = 0;
4261 unsigned long flags;
4261 4262
4262 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 4263 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
4263 scmd = _scsih_scsi_lookup_get_clear(ioc, smid); 4264 scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
@@ -4282,6 +4283,9 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4282 * the failed direct I/O should be redirected to volume 4283 * the failed direct I/O should be redirected to volume
4283 */ 4284 */
4284 if (_scsih_scsi_direct_io_get(ioc, smid)) { 4285 if (_scsih_scsi_direct_io_get(ioc, smid)) {
4286 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4287 ioc->scsi_lookup[smid - 1].scmd = scmd;
4288 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4285 _scsih_scsi_direct_io_set(ioc, smid, 0); 4289 _scsih_scsi_direct_io_set(ioc, smid, 0);
4286 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); 4290 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
4287 mpi_request->DevHandle = 4291 mpi_request->DevHandle =
diff --git a/drivers/scsi/mvsas/Kconfig b/drivers/scsi/mvsas/Kconfig
index c82b012aba37..78f7e20a0c1c 100644
--- a/drivers/scsi/mvsas/Kconfig
+++ b/drivers/scsi/mvsas/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4# Copyright 2007 Red Hat, Inc. 4# Copyright 2007 Red Hat, Inc.
5# Copyright 2008 Marvell. <kewei@marvell.com> 5# Copyright 2008 Marvell. <kewei@marvell.com>
6# Copyright 2009-20011 Marvell. <yuxiangl@marvell.com> 6# Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
7# 7#
8# This file is licensed under GPLv2. 8# This file is licensed under GPLv2.
9# 9#
@@ -41,3 +41,10 @@ config SCSI_MVSAS_DEBUG
41 help 41 help
42 Compiles the 88SE64XX/88SE94XX driver in debug mode. In debug mode, 42 Compiles the 88SE64XX/88SE94XX driver in debug mode. In debug mode,
43 the driver prints some messages to the console. 43 the driver prints some messages to the console.
44config SCSI_MVSAS_TASKLET
45 bool "Support for interrupt tasklet"
46 default n
47 depends on SCSI_MVSAS
48 help
49 Compiles the 88SE64xx/88SE94xx driver in interrupt tasklet mode.In this mode,
50 the interrupt will schedule a tasklet.
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
index 13c960481391..8ba47229049f 100644
--- a/drivers/scsi/mvsas/mv_64xx.c
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -33,7 +33,6 @@ static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i)
33 u32 reg; 33 u32 reg;
34 struct mvs_phy *phy = &mvi->phy[i]; 34 struct mvs_phy *phy = &mvi->phy[i];
35 35
36 /* TODO check & save device type */
37 reg = mr32(MVS_GBL_PORT_TYPE); 36 reg = mr32(MVS_GBL_PORT_TYPE);
38 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); 37 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
39 if (reg & MODE_SAS_SATA & (1 << i)) 38 if (reg & MODE_SAS_SATA & (1 << i))
@@ -48,7 +47,7 @@ static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
48 u32 tmp; 47 u32 tmp;
49 48
50 tmp = mr32(MVS_PCS); 49 tmp = mr32(MVS_PCS);
51 if (mvi->chip->n_phy <= 4) 50 if (mvi->chip->n_phy <= MVS_SOC_PORTS)
52 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT); 51 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT);
53 else 52 else
54 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2); 53 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
@@ -58,24 +57,16 @@ static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
58static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi) 57static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi)
59{ 58{
60 void __iomem *regs = mvi->regs; 59 void __iomem *regs = mvi->regs;
60 int i;
61 61
62 mvs_phy_hacks(mvi); 62 mvs_phy_hacks(mvi);
63 63
64 if (!(mvi->flags & MVF_FLAG_SOC)) { 64 if (!(mvi->flags & MVF_FLAG_SOC)) {
65 /* TEST - for phy decoding error, adjust voltage levels */ 65 for (i = 0; i < MVS_SOC_PORTS; i++) {
66 mw32(MVS_P0_VSR_ADDR + 0, 0x8); 66 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE8);
67 mw32(MVS_P0_VSR_DATA + 0, 0x2F0); 67 mvs_write_port_vsr_data(mvi, i, 0x2F0);
68 68 }
69 mw32(MVS_P0_VSR_ADDR + 8, 0x8);
70 mw32(MVS_P0_VSR_DATA + 8, 0x2F0);
71
72 mw32(MVS_P0_VSR_ADDR + 16, 0x8);
73 mw32(MVS_P0_VSR_DATA + 16, 0x2F0);
74
75 mw32(MVS_P0_VSR_ADDR + 24, 0x8);
76 mw32(MVS_P0_VSR_DATA + 24, 0x2F0);
77 } else { 69 } else {
78 int i;
79 /* disable auto port detection */ 70 /* disable auto port detection */
80 mw32(MVS_GBL_PORT_TYPE, 0); 71 mw32(MVS_GBL_PORT_TYPE, 0);
81 for (i = 0; i < mvi->chip->n_phy; i++) { 72 for (i = 0; i < mvi->chip->n_phy; i++) {
@@ -95,7 +86,7 @@ static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
95 u32 reg, tmp; 86 u32 reg, tmp;
96 87
97 if (!(mvi->flags & MVF_FLAG_SOC)) { 88 if (!(mvi->flags & MVF_FLAG_SOC)) {
98 if (phy_id < 4) 89 if (phy_id < MVS_SOC_PORTS)
99 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &reg); 90 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &reg);
100 else 91 else
101 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &reg); 92 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &reg);
@@ -104,13 +95,13 @@ static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
104 reg = mr32(MVS_PHY_CTL); 95 reg = mr32(MVS_PHY_CTL);
105 96
106 tmp = reg; 97 tmp = reg;
107 if (phy_id < 4) 98 if (phy_id < MVS_SOC_PORTS)
108 tmp |= (1U << phy_id) << PCTL_LINK_OFFS; 99 tmp |= (1U << phy_id) << PCTL_LINK_OFFS;
109 else 100 else
110 tmp |= (1U << (phy_id - 4)) << PCTL_LINK_OFFS; 101 tmp |= (1U << (phy_id - MVS_SOC_PORTS)) << PCTL_LINK_OFFS;
111 102
112 if (!(mvi->flags & MVF_FLAG_SOC)) { 103 if (!(mvi->flags & MVF_FLAG_SOC)) {
113 if (phy_id < 4) { 104 if (phy_id < MVS_SOC_PORTS) {
114 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); 105 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
115 mdelay(10); 106 mdelay(10);
116 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg); 107 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg);
@@ -133,9 +124,9 @@ static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
133 tmp &= ~PHYEV_RDY_CH; 124 tmp &= ~PHYEV_RDY_CH;
134 mvs_write_port_irq_stat(mvi, phy_id, tmp); 125 mvs_write_port_irq_stat(mvi, phy_id, tmp);
135 tmp = mvs_read_phy_ctl(mvi, phy_id); 126 tmp = mvs_read_phy_ctl(mvi, phy_id);
136 if (hard == 1) 127 if (hard == MVS_HARD_RESET)
137 tmp |= PHY_RST_HARD; 128 tmp |= PHY_RST_HARD;
138 else if (hard == 0) 129 else if (hard == MVS_SOFT_RESET)
139 tmp |= PHY_RST; 130 tmp |= PHY_RST;
140 mvs_write_phy_ctl(mvi, phy_id, tmp); 131 mvs_write_phy_ctl(mvi, phy_id, tmp);
141 if (hard) { 132 if (hard) {
@@ -321,6 +312,11 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi)
321 /* init phys */ 312 /* init phys */
322 mvs_64xx_phy_hacks(mvi); 313 mvs_64xx_phy_hacks(mvi);
323 314
315 tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
316 tmp &= 0x0000ffff;
317 tmp |= 0x00fa0000;
318 mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
319
324 /* enable auto port detection */ 320 /* enable auto port detection */
325 mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN); 321 mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN);
326 322
@@ -346,7 +342,7 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi)
346 342
347 mvs_64xx_enable_xmt(mvi, i); 343 mvs_64xx_enable_xmt(mvi, i);
348 344
349 mvs_64xx_phy_reset(mvi, i, 1); 345 mvs_64xx_phy_reset(mvi, i, MVS_HARD_RESET);
350 msleep(500); 346 msleep(500);
351 mvs_64xx_detect_porttype(mvi, i); 347 mvs_64xx_detect_porttype(mvi, i);
352 } 348 }
@@ -377,13 +373,7 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi)
377 mvs_update_phyinfo(mvi, i, 1); 373 mvs_update_phyinfo(mvi, i, 1);
378 } 374 }
379 375
380 /* FIXME: update wide port bitmaps */
381
382 /* little endian for open address and command table, etc. */ 376 /* little endian for open address and command table, etc. */
383 /*
384 * it seems that ( from the spec ) turning on big-endian won't
385 * do us any good on big-endian machines, need further confirmation
386 */
387 cctl = mr32(MVS_CTL); 377 cctl = mr32(MVS_CTL);
388 cctl |= CCTL_ENDIAN_CMD; 378 cctl |= CCTL_ENDIAN_CMD;
389 cctl |= CCTL_ENDIAN_DATA; 379 cctl |= CCTL_ENDIAN_DATA;
@@ -394,15 +384,19 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi)
394 /* reset CMD queue */ 384 /* reset CMD queue */
395 tmp = mr32(MVS_PCS); 385 tmp = mr32(MVS_PCS);
396 tmp |= PCS_CMD_RST; 386 tmp |= PCS_CMD_RST;
387 tmp &= ~PCS_SELF_CLEAR;
397 mw32(MVS_PCS, tmp); 388 mw32(MVS_PCS, tmp);
398 /* interrupt coalescing may cause missing HW interrput in some case, 389 /*
399 * and the max count is 0x1ff, while our max slot is 0x200, 390 * the max count is 0x1ff, while our max slot is 0x200,
400 * it will make count 0. 391 * it will make count 0.
401 */ 392 */
402 tmp = 0; 393 tmp = 0;
403 mw32(MVS_INT_COAL, tmp); 394 if (MVS_CHIP_SLOT_SZ > 0x1ff)
395 mw32(MVS_INT_COAL, 0x1ff | COAL_EN);
396 else
397 mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN);
404 398
405 tmp = 0x100; 399 tmp = 0x10000 | interrupt_coalescing;
406 mw32(MVS_INT_COAL_TMOUT, tmp); 400 mw32(MVS_INT_COAL_TMOUT, tmp);
407 401
408 /* ladies and gentlemen, start your engines */ 402 /* ladies and gentlemen, start your engines */
@@ -477,13 +471,11 @@ static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat)
477 471
478 /* clear CMD_CMPLT ASAP */ 472 /* clear CMD_CMPLT ASAP */
479 mw32_f(MVS_INT_STAT, CINT_DONE); 473 mw32_f(MVS_INT_STAT, CINT_DONE);
480#ifndef MVS_USE_TASKLET 474
481 spin_lock(&mvi->lock); 475 spin_lock(&mvi->lock);
482#endif
483 mvs_int_full(mvi); 476 mvs_int_full(mvi);
484#ifndef MVS_USE_TASKLET
485 spin_unlock(&mvi->lock); 477 spin_unlock(&mvi->lock);
486#endif 478
487 return IRQ_HANDLED; 479 return IRQ_HANDLED;
488} 480}
489 481
@@ -630,7 +622,6 @@ static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i)
630{ 622{
631 u32 tmp; 623 u32 tmp;
632 struct mvs_phy *phy = &mvi->phy[i]; 624 struct mvs_phy *phy = &mvi->phy[i];
633 /* workaround for HW phy decoding error on 1.5g disk drive */
634 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6); 625 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
635 tmp = mvs_read_port_vsr_data(mvi, i); 626 tmp = mvs_read_port_vsr_data(mvi, i);
636 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> 627 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
@@ -661,7 +652,7 @@ void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
661 tmp |= lrmax; 652 tmp |= lrmax;
662 } 653 }
663 mvs_write_phy_ctl(mvi, phy_id, tmp); 654 mvs_write_phy_ctl(mvi, phy_id, tmp);
664 mvs_64xx_phy_reset(mvi, phy_id, 1); 655 mvs_64xx_phy_reset(mvi, phy_id, MVS_HARD_RESET);
665} 656}
666 657
667static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi) 658static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi)
@@ -744,11 +735,13 @@ int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
744 return -1; 735 return -1;
745} 736}
746 737
747#ifndef DISABLE_HOTPLUG_DMA_FIX 738void mvs_64xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
748void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd) 739 int buf_len, int from, void *prd)
749{ 740{
750 int i; 741 int i;
751 struct mvs_prd *buf_prd = prd; 742 struct mvs_prd *buf_prd = prd;
743 dma_addr_t buf_dma = mvi->bulk_buffer_dma;
744
752 buf_prd += from; 745 buf_prd += from;
753 for (i = 0; i < MAX_SG_ENTRY - from; i++) { 746 for (i = 0; i < MAX_SG_ENTRY - from; i++) {
754 buf_prd->addr = cpu_to_le64(buf_dma); 747 buf_prd->addr = cpu_to_le64(buf_dma);
@@ -756,7 +749,28 @@ void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
756 ++buf_prd; 749 ++buf_prd;
757 } 750 }
758} 751}
759#endif 752
753static void mvs_64xx_tune_interrupt(struct mvs_info *mvi, u32 time)
754{
755 void __iomem *regs = mvi->regs;
756 u32 tmp = 0;
757 /*
758 * the max count is 0x1ff, while our max slot is 0x200,
759 * it will make count 0.
760 */
761 if (time == 0) {
762 mw32(MVS_INT_COAL, 0);
763 mw32(MVS_INT_COAL_TMOUT, 0x10000);
764 } else {
765 if (MVS_CHIP_SLOT_SZ > 0x1ff)
766 mw32(MVS_INT_COAL, 0x1ff|COAL_EN);
767 else
768 mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN);
769
770 tmp = 0x10000 | time;
771 mw32(MVS_INT_COAL_TMOUT, tmp);
772 }
773}
760 774
761const struct mvs_dispatch mvs_64xx_dispatch = { 775const struct mvs_dispatch mvs_64xx_dispatch = {
762 "mv64xx", 776 "mv64xx",
@@ -780,7 +794,6 @@ const struct mvs_dispatch mvs_64xx_dispatch = {
780 mvs_write_port_irq_stat, 794 mvs_write_port_irq_stat,
781 mvs_read_port_irq_mask, 795 mvs_read_port_irq_mask,
782 mvs_write_port_irq_mask, 796 mvs_write_port_irq_mask,
783 mvs_get_sas_addr,
784 mvs_64xx_command_active, 797 mvs_64xx_command_active,
785 mvs_64xx_clear_srs_irq, 798 mvs_64xx_clear_srs_irq,
786 mvs_64xx_issue_stop, 799 mvs_64xx_issue_stop,
@@ -808,8 +821,8 @@ const struct mvs_dispatch mvs_64xx_dispatch = {
808 mvs_64xx_spi_buildcmd, 821 mvs_64xx_spi_buildcmd,
809 mvs_64xx_spi_issuecmd, 822 mvs_64xx_spi_issuecmd,
810 mvs_64xx_spi_waitdataready, 823 mvs_64xx_spi_waitdataready,
811#ifndef DISABLE_HOTPLUG_DMA_FIX
812 mvs_64xx_fix_dma, 824 mvs_64xx_fix_dma,
813#endif 825 mvs_64xx_tune_interrupt,
826 NULL,
814}; 827};
815 828
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index 78162c3c36e6..3501291618fd 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -48,6 +48,216 @@ static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i)
48 } 48 }
49} 49}
50 50
51void set_phy_tuning(struct mvs_info *mvi, int phy_id,
52 struct phy_tuning phy_tuning)
53{
54 u32 tmp, setting_0 = 0, setting_1 = 0;
55 u8 i;
56
57 /* Remap information for B0 chip:
58 *
59 * R0Ch -> R118h[15:0] (Adapted DFE F3 - F5 coefficient)
60 * R0Dh -> R118h[31:16] (Generation 1 Setting 0)
61 * R0Eh -> R11Ch[15:0] (Generation 1 Setting 1)
62 * R0Fh -> R11Ch[31:16] (Generation 2 Setting 0)
63 * R10h -> R120h[15:0] (Generation 2 Setting 1)
64 * R11h -> R120h[31:16] (Generation 3 Setting 0)
65 * R12h -> R124h[15:0] (Generation 3 Setting 1)
66 * R13h -> R124h[31:16] (Generation 4 Setting 0 (Reserved))
67 */
68
69 /* A0 has a different set of registers */
70 if (mvi->pdev->revision == VANIR_A0_REV)
71 return;
72
73 for (i = 0; i < 3; i++) {
74 /* loop 3 times, set Gen 1, Gen 2, Gen 3 */
75 switch (i) {
76 case 0:
77 setting_0 = GENERATION_1_SETTING;
78 setting_1 = GENERATION_1_2_SETTING;
79 break;
80 case 1:
81 setting_0 = GENERATION_1_2_SETTING;
82 setting_1 = GENERATION_2_3_SETTING;
83 break;
84 case 2:
85 setting_0 = GENERATION_2_3_SETTING;
86 setting_1 = GENERATION_3_4_SETTING;
87 break;
88 }
89
90 /* Set:
91 *
92 * Transmitter Emphasis Enable
93 * Transmitter Emphasis Amplitude
94 * Transmitter Amplitude
95 */
96 mvs_write_port_vsr_addr(mvi, phy_id, setting_0);
97 tmp = mvs_read_port_vsr_data(mvi, phy_id);
98 tmp &= ~(0xFBE << 16);
99 tmp |= (((phy_tuning.trans_emp_en << 11) |
100 (phy_tuning.trans_emp_amp << 7) |
101 (phy_tuning.trans_amp << 1)) << 16);
102 mvs_write_port_vsr_data(mvi, phy_id, tmp);
103
104 /* Set Transmitter Amplitude Adjust */
105 mvs_write_port_vsr_addr(mvi, phy_id, setting_1);
106 tmp = mvs_read_port_vsr_data(mvi, phy_id);
107 tmp &= ~(0xC000);
108 tmp |= (phy_tuning.trans_amp_adj << 14);
109 mvs_write_port_vsr_data(mvi, phy_id, tmp);
110 }
111}
112
113void set_phy_ffe_tuning(struct mvs_info *mvi, int phy_id,
114 struct ffe_control ffe)
115{
116 u32 tmp;
117
118 /* Don't run this if A0/B0 */
119 if ((mvi->pdev->revision == VANIR_A0_REV)
120 || (mvi->pdev->revision == VANIR_B0_REV))
121 return;
122
123 /* FFE Resistor and Capacitor */
124 /* R10Ch DFE Resolution Control/Squelch and FFE Setting
125 *
126 * FFE_FORCE [7]
127 * FFE_RES_SEL [6:4]
128 * FFE_CAP_SEL [3:0]
129 */
130 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_FFE_CONTROL);
131 tmp = mvs_read_port_vsr_data(mvi, phy_id);
132 tmp &= ~0xFF;
133
134 /* Read from HBA_Info_Page */
135 tmp |= ((0x1 << 7) |
136 (ffe.ffe_rss_sel << 4) |
137 (ffe.ffe_cap_sel << 0));
138
139 mvs_write_port_vsr_data(mvi, phy_id, tmp);
140
141 /* R064h PHY Mode Register 1
142 *
143 * DFE_DIS 18
144 */
145 mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL);
146 tmp = mvs_read_port_vsr_data(mvi, phy_id);
147 tmp &= ~0x40001;
148 /* Hard coding */
149 /* No defines in HBA_Info_Page */
150 tmp |= (0 << 18);
151 mvs_write_port_vsr_data(mvi, phy_id, tmp);
152
153 /* R110h DFE F0-F1 Coefficient Control/DFE Update Control
154 *
155 * DFE_UPDATE_EN [11:6]
156 * DFE_FX_FORCE [5:0]
157 */
158 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_DFE_UPDATE_CRTL);
159 tmp = mvs_read_port_vsr_data(mvi, phy_id);
160 tmp &= ~0xFFF;
161 /* Hard coding */
162 /* No defines in HBA_Info_Page */
163 tmp |= ((0x3F << 6) | (0x0 << 0));
164 mvs_write_port_vsr_data(mvi, phy_id, tmp);
165
166 /* R1A0h Interface and Digital Reference Clock Control/Reserved_50h
167 *
168 * FFE_TRAIN_EN 3
169 */
170 mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL);
171 tmp = mvs_read_port_vsr_data(mvi, phy_id);
172 tmp &= ~0x8;
173 /* Hard coding */
174 /* No defines in HBA_Info_Page */
175 tmp |= (0 << 3);
176 mvs_write_port_vsr_data(mvi, phy_id, tmp);
177}
178
179/*Notice: this function must be called when phy is disabled*/
180void set_phy_rate(struct mvs_info *mvi, int phy_id, u8 rate)
181{
182 union reg_phy_cfg phy_cfg, phy_cfg_tmp;
183 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
184 phy_cfg_tmp.v = mvs_read_port_vsr_data(mvi, phy_id);
185 phy_cfg.v = 0;
186 phy_cfg.u.disable_phy = phy_cfg_tmp.u.disable_phy;
187 phy_cfg.u.sas_support = 1;
188 phy_cfg.u.sata_support = 1;
189 phy_cfg.u.sata_host_mode = 1;
190
191 switch (rate) {
192 case 0x0:
193 /* support 1.5 Gbps */
194 phy_cfg.u.speed_support = 1;
195 phy_cfg.u.snw_3_support = 0;
196 phy_cfg.u.tx_lnk_parity = 1;
197 phy_cfg.u.tx_spt_phs_lnk_rate = 0x30;
198 break;
199 case 0x1:
200
201 /* support 1.5, 3.0 Gbps */
202 phy_cfg.u.speed_support = 3;
203 phy_cfg.u.tx_spt_phs_lnk_rate = 0x3c;
204 phy_cfg.u.tx_lgcl_lnk_rate = 0x08;
205 break;
206 case 0x2:
207 default:
208 /* support 1.5, 3.0, 6.0 Gbps */
209 phy_cfg.u.speed_support = 7;
210 phy_cfg.u.snw_3_support = 1;
211 phy_cfg.u.tx_lnk_parity = 1;
212 phy_cfg.u.tx_spt_phs_lnk_rate = 0x3f;
213 phy_cfg.u.tx_lgcl_lnk_rate = 0x09;
214 break;
215 }
216 mvs_write_port_vsr_data(mvi, phy_id, phy_cfg.v);
217}
218
219static void __devinit
220mvs_94xx_config_reg_from_hba(struct mvs_info *mvi, int phy_id)
221{
222 u32 temp;
223 temp = (u32)(*(u32 *)&mvi->hba_info_param.phy_tuning[phy_id]);
224 if (temp == 0xFFFFFFFFL) {
225 mvi->hba_info_param.phy_tuning[phy_id].trans_emp_amp = 0x6;
226 mvi->hba_info_param.phy_tuning[phy_id].trans_amp = 0x1A;
227 mvi->hba_info_param.phy_tuning[phy_id].trans_amp_adj = 0x3;
228 }
229
230 temp = (u8)(*(u8 *)&mvi->hba_info_param.ffe_ctl[phy_id]);
231 if (temp == 0xFFL) {
232 switch (mvi->pdev->revision) {
233 case VANIR_A0_REV:
234 case VANIR_B0_REV:
235 mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7;
236 mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0x7;
237 break;
238 case VANIR_C0_REV:
239 case VANIR_C1_REV:
240 case VANIR_C2_REV:
241 default:
242 mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7;
243 mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0xC;
244 break;
245 }
246 }
247
248 temp = (u8)(*(u8 *)&mvi->hba_info_param.phy_rate[phy_id]);
249 if (temp == 0xFFL)
250 /*set default phy_rate = 6Gbps*/
251 mvi->hba_info_param.phy_rate[phy_id] = 0x2;
252
253 set_phy_tuning(mvi, phy_id,
254 mvi->hba_info_param.phy_tuning[phy_id]);
255 set_phy_ffe_tuning(mvi, phy_id,
256 mvi->hba_info_param.ffe_ctl[phy_id]);
257 set_phy_rate(mvi, phy_id,
258 mvi->hba_info_param.phy_rate[phy_id]);
259}
260
51static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id) 261static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
52{ 262{
53 void __iomem *regs = mvi->regs; 263 void __iomem *regs = mvi->regs;
@@ -61,7 +271,14 @@ static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
61static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard) 271static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
62{ 272{
63 u32 tmp; 273 u32 tmp;
64 274 u32 delay = 5000;
275 if (hard == MVS_PHY_TUNE) {
276 mvs_write_port_cfg_addr(mvi, phy_id, PHYR_SATA_CTL);
277 tmp = mvs_read_port_cfg_data(mvi, phy_id);
278 mvs_write_port_cfg_data(mvi, phy_id, tmp|0x20000000);
279 mvs_write_port_cfg_data(mvi, phy_id, tmp|0x100000);
280 return;
281 }
65 tmp = mvs_read_port_irq_stat(mvi, phy_id); 282 tmp = mvs_read_port_irq_stat(mvi, phy_id);
66 tmp &= ~PHYEV_RDY_CH; 283 tmp &= ~PHYEV_RDY_CH;
67 mvs_write_port_irq_stat(mvi, phy_id, tmp); 284 mvs_write_port_irq_stat(mvi, phy_id, tmp);
@@ -71,12 +288,15 @@ static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
71 mvs_write_phy_ctl(mvi, phy_id, tmp); 288 mvs_write_phy_ctl(mvi, phy_id, tmp);
72 do { 289 do {
73 tmp = mvs_read_phy_ctl(mvi, phy_id); 290 tmp = mvs_read_phy_ctl(mvi, phy_id);
74 } while (tmp & PHY_RST_HARD); 291 udelay(10);
292 delay--;
293 } while ((tmp & PHY_RST_HARD) && delay);
294 if (!delay)
295 mv_dprintk("phy hard reset failed.\n");
75 } else { 296 } else {
76 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_STAT); 297 tmp = mvs_read_phy_ctl(mvi, phy_id);
77 tmp = mvs_read_port_vsr_data(mvi, phy_id);
78 tmp |= PHY_RST; 298 tmp |= PHY_RST;
79 mvs_write_port_vsr_data(mvi, phy_id, tmp); 299 mvs_write_phy_ctl(mvi, phy_id, tmp);
80 } 300 }
81} 301}
82 302
@@ -90,12 +310,25 @@ static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
90 310
91static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id) 311static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
92{ 312{
93 mvs_write_port_vsr_addr(mvi, phy_id, 0x1B4); 313 u32 tmp;
94 mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1); 314 u8 revision = 0;
95 mvs_write_port_vsr_addr(mvi, phy_id, 0x104); 315
96 mvs_write_port_vsr_data(mvi, phy_id, 0x00018080); 316 revision = mvi->pdev->revision;
317 if (revision == VANIR_A0_REV) {
318 mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA);
319 mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
320 }
321 if (revision == VANIR_B0_REV) {
322 mvs_write_port_vsr_addr(mvi, phy_id, CMD_APP_MEM_CTL);
323 mvs_write_port_vsr_data(mvi, phy_id, 0x08001006);
324 mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA);
325 mvs_write_port_vsr_data(mvi, phy_id, 0x0000705f);
326 }
327
97 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2); 328 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
98 mvs_write_port_vsr_data(mvi, phy_id, 0x00207fff); 329 tmp = mvs_read_port_vsr_data(mvi, phy_id);
330 tmp |= bit(0);
331 mvs_write_port_vsr_data(mvi, phy_id, tmp & 0xfd7fffff);
99} 332}
100 333
101static int __devinit mvs_94xx_init(struct mvs_info *mvi) 334static int __devinit mvs_94xx_init(struct mvs_info *mvi)
@@ -103,7 +336,9 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
103 void __iomem *regs = mvi->regs; 336 void __iomem *regs = mvi->regs;
104 int i; 337 int i;
105 u32 tmp, cctl; 338 u32 tmp, cctl;
339 u8 revision;
106 340
341 revision = mvi->pdev->revision;
107 mvs_show_pcie_usage(mvi); 342 mvs_show_pcie_usage(mvi);
108 if (mvi->flags & MVF_FLAG_SOC) { 343 if (mvi->flags & MVF_FLAG_SOC) {
109 tmp = mr32(MVS_PHY_CTL); 344 tmp = mr32(MVS_PHY_CTL);
@@ -133,6 +368,28 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
133 msleep(100); 368 msleep(100);
134 } 369 }
135 370
371 /* disable Multiplexing, enable phy implemented */
372 mw32(MVS_PORTS_IMP, 0xFF);
373
374 if (revision == VANIR_A0_REV) {
375 mw32(MVS_PA_VSR_ADDR, CMD_CMWK_OOB_DET);
376 mw32(MVS_PA_VSR_PORT, 0x00018080);
377 }
378 mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE2);
379 if (revision == VANIR_A0_REV || revision == VANIR_B0_REV)
380 /* set 6G/3G/1.5G, multiplexing, without SSC */
381 mw32(MVS_PA_VSR_PORT, 0x0084d4fe);
382 else
383 /* set 6G/3G/1.5G, multiplexing, with and without SSC */
384 mw32(MVS_PA_VSR_PORT, 0x0084fffe);
385
386 if (revision == VANIR_B0_REV) {
387 mw32(MVS_PA_VSR_ADDR, CMD_APP_MEM_CTL);
388 mw32(MVS_PA_VSR_PORT, 0x08001006);
389 mw32(MVS_PA_VSR_ADDR, CMD_HOST_RD_DATA);
390 mw32(MVS_PA_VSR_PORT, 0x0000705f);
391 }
392
136 /* reset control */ 393 /* reset control */
137 mw32(MVS_PCS, 0); /* MVS_PCS */ 394 mw32(MVS_PCS, 0); /* MVS_PCS */
138 mw32(MVS_STP_REG_SET_0, 0); 395 mw32(MVS_STP_REG_SET_0, 0);
@@ -141,17 +398,8 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
141 /* init phys */ 398 /* init phys */
142 mvs_phy_hacks(mvi); 399 mvs_phy_hacks(mvi);
143 400
144 /* disable Multiplexing, enable phy implemented */
145 mw32(MVS_PORTS_IMP, 0xFF);
146
147
148 mw32(MVS_PA_VSR_ADDR, 0x00000104);
149 mw32(MVS_PA_VSR_PORT, 0x00018080);
150 mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE8);
151 mw32(MVS_PA_VSR_PORT, 0x0084ffff);
152
153 /* set LED blink when IO*/ 401 /* set LED blink when IO*/
154 mw32(MVS_PA_VSR_ADDR, 0x00000030); 402 mw32(MVS_PA_VSR_ADDR, VSR_PHY_ACT_LED);
155 tmp = mr32(MVS_PA_VSR_PORT); 403 tmp = mr32(MVS_PA_VSR_PORT);
156 tmp &= 0xFFFF00FF; 404 tmp &= 0xFFFF00FF;
157 tmp |= 0x00003300; 405 tmp |= 0x00003300;
@@ -175,12 +423,13 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
175 mvs_94xx_phy_disable(mvi, i); 423 mvs_94xx_phy_disable(mvi, i);
176 /* set phy local SAS address */ 424 /* set phy local SAS address */
177 mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4, 425 mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4,
178 (mvi->phy[i].dev_sas_addr)); 426 cpu_to_le64(mvi->phy[i].dev_sas_addr));
179 427
180 mvs_94xx_enable_xmt(mvi, i); 428 mvs_94xx_enable_xmt(mvi, i);
429 mvs_94xx_config_reg_from_hba(mvi, i);
181 mvs_94xx_phy_enable(mvi, i); 430 mvs_94xx_phy_enable(mvi, i);
182 431
183 mvs_94xx_phy_reset(mvi, i, 1); 432 mvs_94xx_phy_reset(mvi, i, PHY_RST_HARD);
184 msleep(500); 433 msleep(500);
185 mvs_94xx_detect_porttype(mvi, i); 434 mvs_94xx_detect_porttype(mvi, i);
186 } 435 }
@@ -211,16 +460,9 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
211 mvs_update_phyinfo(mvi, i, 1); 460 mvs_update_phyinfo(mvi, i, 1);
212 } 461 }
213 462
214 /* FIXME: update wide port bitmaps */
215
216 /* little endian for open address and command table, etc. */ 463 /* little endian for open address and command table, etc. */
217 /*
218 * it seems that ( from the spec ) turning on big-endian won't
219 * do us any good on big-endian machines, need further confirmation
220 */
221 cctl = mr32(MVS_CTL); 464 cctl = mr32(MVS_CTL);
222 cctl |= CCTL_ENDIAN_CMD; 465 cctl |= CCTL_ENDIAN_CMD;
223 cctl |= CCTL_ENDIAN_DATA;
224 cctl &= ~CCTL_ENDIAN_OPEN; 466 cctl &= ~CCTL_ENDIAN_OPEN;
225 cctl |= CCTL_ENDIAN_RSP; 467 cctl |= CCTL_ENDIAN_RSP;
226 mw32_f(MVS_CTL, cctl); 468 mw32_f(MVS_CTL, cctl);
@@ -228,15 +470,20 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
228 /* reset CMD queue */ 470 /* reset CMD queue */
229 tmp = mr32(MVS_PCS); 471 tmp = mr32(MVS_PCS);
230 tmp |= PCS_CMD_RST; 472 tmp |= PCS_CMD_RST;
473 tmp &= ~PCS_SELF_CLEAR;
231 mw32(MVS_PCS, tmp); 474 mw32(MVS_PCS, tmp);
232 /* interrupt coalescing may cause missing HW interrput in some case, 475 /*
233 * and the max count is 0x1ff, while our max slot is 0x200, 476 * the max count is 0x1ff, while our max slot is 0x200,
234 * it will make count 0. 477 * it will make count 0.
235 */ 478 */
236 tmp = 0; 479 tmp = 0;
237 mw32(MVS_INT_COAL, tmp); 480 if (MVS_CHIP_SLOT_SZ > 0x1ff)
481 mw32(MVS_INT_COAL, 0x1ff | COAL_EN);
482 else
483 mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN);
238 484
239 tmp = 0x100; 485 /* default interrupt coalescing time is 128us */
486 tmp = 0x10000 | interrupt_coalescing;
240 mw32(MVS_INT_COAL_TMOUT, tmp); 487 mw32(MVS_INT_COAL_TMOUT, tmp);
241 488
242 /* ladies and gentlemen, start your engines */ 489 /* ladies and gentlemen, start your engines */
@@ -249,7 +496,7 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
249 496
250 /* enable completion queue interrupt */ 497 /* enable completion queue interrupt */
251 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP | 498 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
252 CINT_DMA_PCIE); 499 CINT_DMA_PCIE | CINT_NON_SPEC_NCQ_ERROR);
253 tmp |= CINT_PHY_MASK; 500 tmp |= CINT_PHY_MASK;
254 mw32(MVS_INT_MASK, tmp); 501 mw32(MVS_INT_MASK, tmp);
255 502
@@ -332,13 +579,10 @@ static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
332 if (((stat & IRQ_SAS_A) && mvi->id == 0) || 579 if (((stat & IRQ_SAS_A) && mvi->id == 0) ||
333 ((stat & IRQ_SAS_B) && mvi->id == 1)) { 580 ((stat & IRQ_SAS_B) && mvi->id == 1)) {
334 mw32_f(MVS_INT_STAT, CINT_DONE); 581 mw32_f(MVS_INT_STAT, CINT_DONE);
335 #ifndef MVS_USE_TASKLET 582
336 spin_lock(&mvi->lock); 583 spin_lock(&mvi->lock);
337 #endif
338 mvs_int_full(mvi); 584 mvs_int_full(mvi);
339 #ifndef MVS_USE_TASKLET
340 spin_unlock(&mvi->lock); 585 spin_unlock(&mvi->lock);
341 #endif
342 } 586 }
343 return IRQ_HANDLED; 587 return IRQ_HANDLED;
344} 588}
@@ -346,10 +590,48 @@ static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
346static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx) 590static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
347{ 591{
348 u32 tmp; 592 u32 tmp;
349 mvs_cw32(mvi, 0x300 + (slot_idx >> 3), 1 << (slot_idx % 32)); 593 tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3));
350 do { 594 if (tmp && 1 << (slot_idx % 32)) {
351 tmp = mvs_cr32(mvi, 0x300 + (slot_idx >> 3)); 595 mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx);
352 } while (tmp & 1 << (slot_idx % 32)); 596 mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3),
597 1 << (slot_idx % 32));
598 do {
599 tmp = mvs_cr32(mvi,
600 MVS_COMMAND_ACTIVE + (slot_idx >> 3));
601 } while (tmp & 1 << (slot_idx % 32));
602 }
603}
604
605void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
606{
607 void __iomem *regs = mvi->regs;
608 u32 tmp;
609
610 if (clear_all) {
611 tmp = mr32(MVS_INT_STAT_SRS_0);
612 if (tmp) {
613 mv_dprintk("check SRS 0 %08X.\n", tmp);
614 mw32(MVS_INT_STAT_SRS_0, tmp);
615 }
616 tmp = mr32(MVS_INT_STAT_SRS_1);
617 if (tmp) {
618 mv_dprintk("check SRS 1 %08X.\n", tmp);
619 mw32(MVS_INT_STAT_SRS_1, tmp);
620 }
621 } else {
622 if (reg_set > 31)
623 tmp = mr32(MVS_INT_STAT_SRS_1);
624 else
625 tmp = mr32(MVS_INT_STAT_SRS_0);
626
627 if (tmp & (1 << (reg_set % 32))) {
628 mv_dprintk("register set 0x%x was stopped.\n", reg_set);
629 if (reg_set > 31)
630 mw32(MVS_INT_STAT_SRS_1, 1 << (reg_set % 32));
631 else
632 mw32(MVS_INT_STAT_SRS_0, 1 << (reg_set % 32));
633 }
634 }
353} 635}
354 636
355static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type, 637static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
@@ -357,37 +639,56 @@ static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
357{ 639{
358 void __iomem *regs = mvi->regs; 640 void __iomem *regs = mvi->regs;
359 u32 tmp; 641 u32 tmp;
642 mvs_94xx_clear_srs_irq(mvi, 0, 1);
360 643
361 if (type == PORT_TYPE_SATA) { 644 tmp = mr32(MVS_INT_STAT);
362 tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs); 645 mw32(MVS_INT_STAT, tmp | CINT_CI_STOP);
363 mw32(MVS_INT_STAT_SRS_0, tmp);
364 }
365 mw32(MVS_INT_STAT, CINT_CI_STOP);
366 tmp = mr32(MVS_PCS) | 0xFF00; 646 tmp = mr32(MVS_PCS) | 0xFF00;
367 mw32(MVS_PCS, tmp); 647 mw32(MVS_PCS, tmp);
368} 648}
369 649
650static void mvs_94xx_non_spec_ncq_error(struct mvs_info *mvi)
651{
652 void __iomem *regs = mvi->regs;
653 u32 err_0, err_1;
654 u8 i;
655 struct mvs_device *device;
656
657 err_0 = mr32(MVS_NON_NCQ_ERR_0);
658 err_1 = mr32(MVS_NON_NCQ_ERR_1);
659
660 mv_dprintk("non specific ncq error err_0:%x,err_1:%x.\n",
661 err_0, err_1);
662 for (i = 0; i < 32; i++) {
663 if (err_0 & bit(i)) {
664 device = mvs_find_dev_by_reg_set(mvi, i);
665 if (device)
666 mvs_release_task(mvi, device->sas_device);
667 }
668 if (err_1 & bit(i)) {
669 device = mvs_find_dev_by_reg_set(mvi, i+32);
670 if (device)
671 mvs_release_task(mvi, device->sas_device);
672 }
673 }
674
675 mw32(MVS_NON_NCQ_ERR_0, err_0);
676 mw32(MVS_NON_NCQ_ERR_1, err_1);
677}
678
370static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs) 679static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
371{ 680{
372 void __iomem *regs = mvi->regs; 681 void __iomem *regs = mvi->regs;
373 u32 tmp;
374 u8 reg_set = *tfs; 682 u8 reg_set = *tfs;
375 683
376 if (*tfs == MVS_ID_NOT_MAPPED) 684 if (*tfs == MVS_ID_NOT_MAPPED)
377 return; 685 return;
378 686
379 mvi->sata_reg_set &= ~bit(reg_set); 687 mvi->sata_reg_set &= ~bit(reg_set);
380 if (reg_set < 32) { 688 if (reg_set < 32)
381 w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set); 689 w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set);
382 tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set; 690 else
383 if (tmp) 691 w_reg_set_enable(reg_set, (u32)(mvi->sata_reg_set >> 32));
384 mw32(MVS_INT_STAT_SRS_0, tmp);
385 } else {
386 w_reg_set_enable(reg_set, mvi->sata_reg_set);
387 tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set;
388 if (tmp)
389 mw32(MVS_INT_STAT_SRS_1, tmp);
390 }
391 692
392 *tfs = MVS_ID_NOT_MAPPED; 693 *tfs = MVS_ID_NOT_MAPPED;
393 694
@@ -403,7 +704,7 @@ static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
403 return 0; 704 return 0;
404 705
405 i = mv_ffc64(mvi->sata_reg_set); 706 i = mv_ffc64(mvi->sata_reg_set);
406 if (i > 32) { 707 if (i >= 32) {
407 mvi->sata_reg_set |= bit(i); 708 mvi->sata_reg_set |= bit(i);
408 w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32)); 709 w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32));
409 *tfs = i; 710 *tfs = i;
@@ -422,9 +723,12 @@ static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
422 int i; 723 int i;
423 struct scatterlist *sg; 724 struct scatterlist *sg;
424 struct mvs_prd *buf_prd = prd; 725 struct mvs_prd *buf_prd = prd;
726 struct mvs_prd_imt im_len;
727 *(u32 *)&im_len = 0;
425 for_each_sg(scatter, sg, nr, i) { 728 for_each_sg(scatter, sg, nr, i) {
426 buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); 729 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
427 buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg)); 730 im_len.len = sg_dma_len(sg);
731 buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len);
428 buf_prd++; 732 buf_prd++;
429 } 733 }
430} 734}
@@ -433,7 +737,7 @@ static int mvs_94xx_oob_done(struct mvs_info *mvi, int i)
433{ 737{
434 u32 phy_st; 738 u32 phy_st;
435 phy_st = mvs_read_phy_ctl(mvi, i); 739 phy_st = mvs_read_phy_ctl(mvi, i);
436 if (phy_st & PHY_READY_MASK) /* phy ready */ 740 if (phy_st & PHY_READY_MASK)
437 return 1; 741 return 1;
438 return 0; 742 return 0;
439} 743}
@@ -447,7 +751,7 @@ static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id,
447 for (i = 0; i < 7; i++) { 751 for (i = 0; i < 7; i++) {
448 mvs_write_port_cfg_addr(mvi, port_id, 752 mvs_write_port_cfg_addr(mvi, port_id,
449 CONFIG_ID_FRAME0 + i * 4); 753 CONFIG_ID_FRAME0 + i * 4);
450 id_frame[i] = mvs_read_port_cfg_data(mvi, port_id); 754 id_frame[i] = cpu_to_le32(mvs_read_port_cfg_data(mvi, port_id));
451 } 755 }
452 memcpy(id, id_frame, 28); 756 memcpy(id, id_frame, 28);
453} 757}
@@ -458,15 +762,13 @@ static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id,
458 int i; 762 int i;
459 u32 id_frame[7]; 763 u32 id_frame[7];
460 764
461 /* mvs_hexdump(28, (u8 *)id_frame, 0); */
462 for (i = 0; i < 7; i++) { 765 for (i = 0; i < 7; i++) {
463 mvs_write_port_cfg_addr(mvi, port_id, 766 mvs_write_port_cfg_addr(mvi, port_id,
464 CONFIG_ATT_ID_FRAME0 + i * 4); 767 CONFIG_ATT_ID_FRAME0 + i * 4);
465 id_frame[i] = mvs_read_port_cfg_data(mvi, port_id); 768 id_frame[i] = cpu_to_le32(mvs_read_port_cfg_data(mvi, port_id));
466 mv_dprintk("94xx phy %d atta frame %d %x.\n", 769 mv_dprintk("94xx phy %d atta frame %d %x.\n",
467 port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]); 770 port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]);
468 } 771 }
469 /* mvs_hexdump(28, (u8 *)id_frame, 0); */
470 memcpy(id, id_frame, 28); 772 memcpy(id, id_frame, 28);
471} 773}
472 774
@@ -526,7 +828,18 @@ static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
526void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id, 828void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
527 struct sas_phy_linkrates *rates) 829 struct sas_phy_linkrates *rates)
528{ 830{
529 /* TODO */ 831 u32 lrmax = 0;
832 u32 tmp;
833
834 tmp = mvs_read_phy_ctl(mvi, phy_id);
835 lrmax = (rates->maximum_linkrate - SAS_LINK_RATE_1_5_GBPS) << 12;
836
837 if (lrmax) {
838 tmp &= ~(0x3 << 12);
839 tmp |= lrmax;
840 }
841 mvs_write_phy_ctl(mvi, phy_id, tmp);
842 mvs_94xx_phy_reset(mvi, phy_id, PHY_RST_HARD);
530} 843}
531 844
532static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi) 845static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi)
@@ -603,27 +916,59 @@ int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
603 return -1; 916 return -1;
604} 917}
605 918
606#ifndef DISABLE_HOTPLUG_DMA_FIX 919void mvs_94xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
607void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd) 920 int buf_len, int from, void *prd)
608{ 921{
609 int i; 922 int i;
610 struct mvs_prd *buf_prd = prd; 923 struct mvs_prd *buf_prd = prd;
924 dma_addr_t buf_dma;
925 struct mvs_prd_imt im_len;
926
927 *(u32 *)&im_len = 0;
611 buf_prd += from; 928 buf_prd += from;
612 for (i = 0; i < MAX_SG_ENTRY - from; i++) { 929
613 buf_prd->addr = cpu_to_le64(buf_dma); 930#define PRD_CHAINED_ENTRY 0x01
614 buf_prd->im_len.len = cpu_to_le32(buf_len); 931 if ((mvi->pdev->revision == VANIR_A0_REV) ||
615 ++buf_prd; 932 (mvi->pdev->revision == VANIR_B0_REV))
933 buf_dma = (phy_mask <= 0x08) ?
934 mvi->bulk_buffer_dma : mvi->bulk_buffer_dma1;
935 else
936 return;
937
938 for (i = from; i < MAX_SG_ENTRY; i++, ++buf_prd) {
939 if (i == MAX_SG_ENTRY - 1) {
940 buf_prd->addr = cpu_to_le64(virt_to_phys(buf_prd - 1));
941 im_len.len = 2;
942 im_len.misc_ctl = PRD_CHAINED_ENTRY;
943 } else {
944 buf_prd->addr = cpu_to_le64(buf_dma);
945 im_len.len = buf_len;
946 }
947 buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len);
616 } 948 }
617} 949}
618#endif
619 950
620/* 951static void mvs_94xx_tune_interrupt(struct mvs_info *mvi, u32 time)
621 * FIXME JEJB: temporary nop clear_srs_irq to make 94xx still work
622 * with 64xx fixes
623 */
624static void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set,
625 u8 clear_all)
626{ 952{
953 void __iomem *regs = mvi->regs;
954 u32 tmp = 0;
955 /*
956 * the max count is 0x1ff, while our max slot is 0x200,
957 * it will make count 0.
958 */
959 if (time == 0) {
960 mw32(MVS_INT_COAL, 0);
961 mw32(MVS_INT_COAL_TMOUT, 0x10000);
962 } else {
963 if (MVS_CHIP_SLOT_SZ > 0x1ff)
964 mw32(MVS_INT_COAL, 0x1ff|COAL_EN);
965 else
966 mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN);
967
968 tmp = 0x10000 | time;
969 mw32(MVS_INT_COAL_TMOUT, tmp);
970 }
971
627} 972}
628 973
629const struct mvs_dispatch mvs_94xx_dispatch = { 974const struct mvs_dispatch mvs_94xx_dispatch = {
@@ -648,7 +993,6 @@ const struct mvs_dispatch mvs_94xx_dispatch = {
648 mvs_write_port_irq_stat, 993 mvs_write_port_irq_stat,
649 mvs_read_port_irq_mask, 994 mvs_read_port_irq_mask,
650 mvs_write_port_irq_mask, 995 mvs_write_port_irq_mask,
651 mvs_get_sas_addr,
652 mvs_94xx_command_active, 996 mvs_94xx_command_active,
653 mvs_94xx_clear_srs_irq, 997 mvs_94xx_clear_srs_irq,
654 mvs_94xx_issue_stop, 998 mvs_94xx_issue_stop,
@@ -676,8 +1020,8 @@ const struct mvs_dispatch mvs_94xx_dispatch = {
676 mvs_94xx_spi_buildcmd, 1020 mvs_94xx_spi_buildcmd,
677 mvs_94xx_spi_issuecmd, 1021 mvs_94xx_spi_issuecmd,
678 mvs_94xx_spi_waitdataready, 1022 mvs_94xx_spi_waitdataready,
679#ifndef DISABLE_HOTPLUG_DMA_FIX
680 mvs_94xx_fix_dma, 1023 mvs_94xx_fix_dma,
681#endif 1024 mvs_94xx_tune_interrupt,
1025 mvs_94xx_non_spec_ncq_error,
682}; 1026};
683 1027
diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
index 8835befe2c0e..8f7eb4f21140 100644
--- a/drivers/scsi/mvsas/mv_94xx.h
+++ b/drivers/scsi/mvsas/mv_94xx.h
@@ -30,6 +30,14 @@
30 30
31#define MAX_LINK_RATE SAS_LINK_RATE_6_0_GBPS 31#define MAX_LINK_RATE SAS_LINK_RATE_6_0_GBPS
32 32
33enum VANIR_REVISION_ID {
34 VANIR_A0_REV = 0xA0,
35 VANIR_B0_REV = 0x01,
36 VANIR_C0_REV = 0x02,
37 VANIR_C1_REV = 0x03,
38 VANIR_C2_REV = 0xC2,
39};
40
33enum hw_registers { 41enum hw_registers {
34 MVS_GBL_CTL = 0x04, /* global control */ 42 MVS_GBL_CTL = 0x04, /* global control */
35 MVS_GBL_INT_STAT = 0x00, /* global irq status */ 43 MVS_GBL_INT_STAT = 0x00, /* global irq status */
@@ -101,6 +109,7 @@ enum hw_registers {
101 MVS_P4_VSR_DATA = 0x254, /* phy4 VSR data */ 109 MVS_P4_VSR_DATA = 0x254, /* phy4 VSR data */
102 MVS_PA_VSR_ADDR = 0x290, /* All port VSR addr */ 110 MVS_PA_VSR_ADDR = 0x290, /* All port VSR addr */
103 MVS_PA_VSR_PORT = 0x294, /* All port VSR data */ 111 MVS_PA_VSR_PORT = 0x294, /* All port VSR data */
112 MVS_COMMAND_ACTIVE = 0x300,
104}; 113};
105 114
106enum pci_cfg_registers { 115enum pci_cfg_registers {
@@ -112,26 +121,29 @@ enum pci_cfg_registers {
112 121
113/* SAS/SATA Vendor Specific Port Registers */ 122/* SAS/SATA Vendor Specific Port Registers */
114enum sas_sata_vsp_regs { 123enum sas_sata_vsp_regs {
115 VSR_PHY_STAT = 0x00 * 4, /* Phy Status */ 124 VSR_PHY_STAT = 0x00 * 4, /* Phy Interrupt Status */
116 VSR_PHY_MODE1 = 0x01 * 4, /* phy tx */ 125 VSR_PHY_MODE1 = 0x01 * 4, /* phy Interrupt Enable */
117 VSR_PHY_MODE2 = 0x02 * 4, /* tx scc */ 126 VSR_PHY_MODE2 = 0x02 * 4, /* Phy Configuration */
118 VSR_PHY_MODE3 = 0x03 * 4, /* pll */ 127 VSR_PHY_MODE3 = 0x03 * 4, /* Phy Status */
119 VSR_PHY_MODE4 = 0x04 * 4, /* VCO */ 128 VSR_PHY_MODE4 = 0x04 * 4, /* Phy Counter 0 */
120 VSR_PHY_MODE5 = 0x05 * 4, /* Rx */ 129 VSR_PHY_MODE5 = 0x05 * 4, /* Phy Counter 1 */
121 VSR_PHY_MODE6 = 0x06 * 4, /* CDR */ 130 VSR_PHY_MODE6 = 0x06 * 4, /* Event Counter Control */
122 VSR_PHY_MODE7 = 0x07 * 4, /* Impedance */ 131 VSR_PHY_MODE7 = 0x07 * 4, /* Event Counter Select */
123 VSR_PHY_MODE8 = 0x08 * 4, /* Voltage */ 132 VSR_PHY_MODE8 = 0x08 * 4, /* Event Counter 0 */
124 VSR_PHY_MODE9 = 0x09 * 4, /* Test */ 133 VSR_PHY_MODE9 = 0x09 * 4, /* Event Counter 1 */
125 VSR_PHY_MODE10 = 0x0A * 4, /* Power */ 134 VSR_PHY_MODE10 = 0x0A * 4, /* Event Counter 2 */
126 VSR_PHY_MODE11 = 0x0B * 4, /* Phy Mode */ 135 VSR_PHY_MODE11 = 0x0B * 4, /* Event Counter 3 */
127 VSR_PHY_VS0 = 0x0C * 4, /* Vednor Specific 0 */ 136 VSR_PHY_ACT_LED = 0x0C * 4, /* Activity LED control */
128 VSR_PHY_VS1 = 0x0D * 4, /* Vednor Specific 1 */ 137
138 VSR_PHY_FFE_CONTROL = 0x10C,
139 VSR_PHY_DFE_UPDATE_CRTL = 0x110,
140 VSR_REF_CLOCK_CRTL = 0x1A0,
129}; 141};
130 142
131enum chip_register_bits { 143enum chip_register_bits {
132 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8), 144 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
133 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8), 145 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 12),
134 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (12), 146 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
135 PHY_NEG_SPP_PHYS_LINK_RATE_MASK = 147 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
136 (0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), 148 (0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
137}; 149};
@@ -169,22 +181,75 @@ enum pci_interrupt_cause {
169 IRQ_PCIE_ERR = (1 << 31), 181 IRQ_PCIE_ERR = (1 << 31),
170}; 182};
171 183
184union reg_phy_cfg {
185 u32 v;
186 struct {
187 u32 phy_reset:1;
188 u32 sas_support:1;
189 u32 sata_support:1;
190 u32 sata_host_mode:1;
191 /*
192 * bit 2: 6Gbps support
193 * bit 1: 3Gbps support
194 * bit 0: 1.5Gbps support
195 */
196 u32 speed_support:3;
197 u32 snw_3_support:1;
198 u32 tx_lnk_parity:1;
199 /*
200 * bit 5: G1 (1.5Gbps) Without SSC
201 * bit 4: G1 (1.5Gbps) with SSC
202 * bit 3: G2 (3.0Gbps) Without SSC
203 * bit 2: G2 (3.0Gbps) with SSC
204 * bit 1: G3 (6.0Gbps) without SSC
205 * bit 0: G3 (6.0Gbps) with SSC
206 */
207 u32 tx_spt_phs_lnk_rate:6;
208 /* 8h: 1.5Gbps 9h: 3Gbps Ah: 6Gbps */
209 u32 tx_lgcl_lnk_rate:4;
210 u32 tx_ssc_type:1;
211 u32 sata_spin_up_spt:1;
212 u32 sata_spin_up_en:1;
213 u32 bypass_oob:1;
214 u32 disable_phy:1;
215 u32 rsvd:8;
216 } u;
217};
218
172#define MAX_SG_ENTRY 255 219#define MAX_SG_ENTRY 255
173 220
174struct mvs_prd_imt { 221struct mvs_prd_imt {
222#ifndef __BIG_ENDIAN
175 __le32 len:22; 223 __le32 len:22;
176 u8 _r_a:2; 224 u8 _r_a:2;
177 u8 misc_ctl:4; 225 u8 misc_ctl:4;
178 u8 inter_sel:4; 226 u8 inter_sel:4;
227#else
228 u32 inter_sel:4;
229 u32 misc_ctl:4;
230 u32 _r_a:2;
231 u32 len:22;
232#endif
179}; 233};
180 234
181struct mvs_prd { 235struct mvs_prd {
182 /* 64-bit buffer address */ 236 /* 64-bit buffer address */
183 __le64 addr; 237 __le64 addr;
184 /* 22-bit length */ 238 /* 22-bit length */
185 struct mvs_prd_imt im_len; 239 __le32 im_len;
186} __attribute__ ((packed)); 240} __attribute__ ((packed));
187 241
242/*
243 * these registers are accessed through port vendor
244 * specific address/data registers
245 */
246enum sas_sata_phy_regs {
247 GENERATION_1_SETTING = 0x118,
248 GENERATION_1_2_SETTING = 0x11C,
249 GENERATION_2_3_SETTING = 0x120,
250 GENERATION_3_4_SETTING = 0x124,
251};
252
188#define SPI_CTRL_REG_94XX 0xc800 253#define SPI_CTRL_REG_94XX 0xc800
189#define SPI_ADDR_REG_94XX 0xc804 254#define SPI_ADDR_REG_94XX 0xc804
190#define SPI_WR_DATA_REG_94XX 0xc808 255#define SPI_WR_DATA_REG_94XX 0xc808
diff --git a/drivers/scsi/mvsas/mv_chips.h b/drivers/scsi/mvsas/mv_chips.h
index 1753a6fc42d0..bcc408042cee 100644
--- a/drivers/scsi/mvsas/mv_chips.h
+++ b/drivers/scsi/mvsas/mv_chips.h
@@ -164,7 +164,6 @@ static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
164{ 164{
165 u32 tmp; 165 u32 tmp;
166 166
167 /* workaround for SATA R-ERR, to ignore phy glitch */
168 tmp = mvs_cr32(mvi, CMD_PHY_TIMER); 167 tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
169 tmp &= ~(1 << 9); 168 tmp &= ~(1 << 9);
170 tmp |= (1 << 10); 169 tmp |= (1 << 10);
@@ -179,23 +178,10 @@ static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
179 tmp |= 0x3fff; 178 tmp |= 0x3fff;
180 mvs_cw32(mvi, CMD_SAS_CTL0, tmp); 179 mvs_cw32(mvi, CMD_SAS_CTL0, tmp);
181 180
182 /* workaround for WDTIMEOUT , set to 550 ms */
183 mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000); 181 mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000);
184 182
185 /* not to halt for different port op during wideport link change */ 183 /* not to halt for different port op during wideport link change */
186 mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d); 184 mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d);
187
188 /* workaround for Seagate disk not-found OOB sequence, recv
189 * COMINIT before sending out COMWAKE */
190 tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
191 tmp &= 0x0000ffff;
192 tmp |= 0x00fa0000;
193 mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
194
195 tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
196 tmp &= 0x1fffffff;
197 tmp |= (2U << 29); /* 8 ms retry */
198 mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
199} 185}
200 186
201static inline void mvs_int_sata(struct mvs_info *mvi) 187static inline void mvs_int_sata(struct mvs_info *mvi)
@@ -223,6 +209,9 @@ static inline void mvs_int_full(struct mvs_info *mvi)
223 mvs_int_port(mvi, i, tmp); 209 mvs_int_port(mvi, i, tmp);
224 } 210 }
225 211
212 if (stat & CINT_NON_SPEC_NCQ_ERROR)
213 MVS_CHIP_DISP->non_spec_ncq_error(mvi);
214
226 if (stat & CINT_SRS) 215 if (stat & CINT_SRS)
227 mvs_int_sata(mvi); 216 mvs_int_sata(mvi);
228 217
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
index bc00c940743c..dec7cadb7485 100644
--- a/drivers/scsi/mvsas/mv_defs.h
+++ b/drivers/scsi/mvsas/mv_defs.h
@@ -43,7 +43,6 @@ enum chip_flavors {
43 43
44/* driver compile-time configuration */ 44/* driver compile-time configuration */
45enum driver_configuration { 45enum driver_configuration {
46 MVS_SLOTS = 512, /* command slots */
47 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */ 46 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
48 MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */ 47 MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
49 /* software requires power-of-2 48 /* software requires power-of-2
@@ -56,8 +55,7 @@ enum driver_configuration {
56 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */ 55 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
57 MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */ 56 MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
58 MVS_OAF_SZ = 64, /* Open address frame buffer size */ 57 MVS_OAF_SZ = 64, /* Open address frame buffer size */
59 MVS_QUEUE_SIZE = 32, /* Support Queue depth */ 58 MVS_QUEUE_SIZE = 64, /* Support Queue depth */
60 MVS_CAN_QUEUE = MVS_SLOTS - 2, /* SCSI Queue depth */
61 MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2, 59 MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2,
62}; 60};
63 61
@@ -144,6 +142,7 @@ enum hw_register_bits {
144 CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */ 142 CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
145 CINT_MEM = (1U << 26), /* int mem parity err */ 143 CINT_MEM = (1U << 26), /* int mem parity err */
146 CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */ 144 CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
145 CINT_NON_SPEC_NCQ_ERROR = (1U << 25), /* Non specific NCQ error */
147 CINT_SRS = (1U << 3), /* SRS event */ 146 CINT_SRS = (1U << 3), /* SRS event */
148 CINT_CI_STOP = (1U << 1), /* cmd issue stopped */ 147 CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
149 CINT_DONE = (1U << 0), /* cmd completion */ 148 CINT_DONE = (1U << 0), /* cmd completion */
@@ -161,7 +160,7 @@ enum hw_register_bits {
161 TXQ_CMD_SSP = 1, /* SSP protocol */ 160 TXQ_CMD_SSP = 1, /* SSP protocol */
162 TXQ_CMD_SMP = 2, /* SMP protocol */ 161 TXQ_CMD_SMP = 2, /* SMP protocol */
163 TXQ_CMD_STP = 3, /* STP/SATA protocol */ 162 TXQ_CMD_STP = 3, /* STP/SATA protocol */
164 TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */ 163 TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP target free list */
165 TXQ_CMD_SLOT_RESET = 7, /* reset command slot */ 164 TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
166 TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */ 165 TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
167 TXQ_MODE_TARGET = 0, 166 TXQ_MODE_TARGET = 0,
@@ -391,15 +390,15 @@ enum sas_cmd_port_registers {
391}; 390};
392 391
393enum mvs_info_flags { 392enum mvs_info_flags {
394 MVF_MSI = (1U << 0), /* MSI is enabled */
395 MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */ 393 MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
396 MVF_FLAG_SOC = (1U << 2), /* SoC integrated controllers */ 394 MVF_FLAG_SOC = (1U << 2), /* SoC integrated controllers */
397}; 395};
398 396
399enum mvs_event_flags { 397enum mvs_event_flags {
400 PHY_PLUG_EVENT = (3U), 398 PHY_PLUG_EVENT = (3U),
401 PHY_PLUG_IN = (1U << 0), /* phy plug in */ 399 PHY_PLUG_IN = (1U << 0), /* phy plug in */
402 PHY_PLUG_OUT = (1U << 1), /* phy plug out */ 400 PHY_PLUG_OUT = (1U << 1), /* phy plug out */
401 EXP_BRCT_CHG = (1U << 2), /* broadcast change */
403}; 402};
404 403
405enum mvs_port_type { 404enum mvs_port_type {
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 90b636611cde..4e9af66fd1d3 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -34,22 +34,25 @@ MODULE_PARM_DESC(collector, "\n"
34 "\tThe mvsas SAS LLDD supports both modes.\n" 34 "\tThe mvsas SAS LLDD supports both modes.\n"
35 "\tDefault: 1 (Direct Mode).\n"); 35 "\tDefault: 1 (Direct Mode).\n");
36 36
37int interrupt_coalescing = 0x80;
38
37static struct scsi_transport_template *mvs_stt; 39static struct scsi_transport_template *mvs_stt;
38struct kmem_cache *mvs_task_list_cache; 40struct kmem_cache *mvs_task_list_cache;
39static const struct mvs_chip_info mvs_chips[] = { 41static const struct mvs_chip_info mvs_chips[] = {
40 [chip_6320] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, 42 [chip_6320] = { 1, 2, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
41 [chip_6440] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, 43 [chip_6440] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
42 [chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, }, 44 [chip_6485] = { 1, 8, 0x800, 33, 32, 6, 10, &mvs_64xx_dispatch, },
43 [chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, 45 [chip_9180] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
44 [chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, 46 [chip_9480] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
45 [chip_9445] = { 1, 4, 0x800, 17, 64, 11, &mvs_94xx_dispatch, }, 47 [chip_9445] = { 1, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, },
46 [chip_9485] = { 2, 4, 0x800, 17, 64, 11, &mvs_94xx_dispatch, }, 48 [chip_9485] = { 2, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, },
47 [chip_1300] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, 49 [chip_1300] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
48 [chip_1320] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, 50 [chip_1320] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
49}; 51};
50 52
53struct device_attribute *mvst_host_attrs[];
54
51#define SOC_SAS_NUM 2 55#define SOC_SAS_NUM 2
52#define SG_MX 64
53 56
54static struct scsi_host_template mvs_sht = { 57static struct scsi_host_template mvs_sht = {
55 .module = THIS_MODULE, 58 .module = THIS_MODULE,
@@ -66,7 +69,7 @@ static struct scsi_host_template mvs_sht = {
66 .can_queue = 1, 69 .can_queue = 1,
67 .cmd_per_lun = 1, 70 .cmd_per_lun = 1,
68 .this_id = -1, 71 .this_id = -1,
69 .sg_tablesize = SG_MX, 72 .sg_tablesize = SG_ALL,
70 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 73 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
71 .use_clustering = ENABLE_CLUSTERING, 74 .use_clustering = ENABLE_CLUSTERING,
72 .eh_device_reset_handler = sas_eh_device_reset_handler, 75 .eh_device_reset_handler = sas_eh_device_reset_handler,
@@ -74,6 +77,7 @@ static struct scsi_host_template mvs_sht = {
74 .slave_alloc = mvs_slave_alloc, 77 .slave_alloc = mvs_slave_alloc,
75 .target_destroy = sas_target_destroy, 78 .target_destroy = sas_target_destroy,
76 .ioctl = sas_ioctl, 79 .ioctl = sas_ioctl,
80 .shost_attrs = mvst_host_attrs,
77}; 81};
78 82
79static struct sas_domain_function_template mvs_transport_ops = { 83static struct sas_domain_function_template mvs_transport_ops = {
@@ -100,6 +104,7 @@ static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
100 struct asd_sas_phy *sas_phy = &phy->sas_phy; 104 struct asd_sas_phy *sas_phy = &phy->sas_phy;
101 105
102 phy->mvi = mvi; 106 phy->mvi = mvi;
107 phy->port = NULL;
103 init_timer(&phy->timer); 108 init_timer(&phy->timer);
104 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; 109 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
105 sas_phy->class = SAS; 110 sas_phy->class = SAS;
@@ -128,7 +133,7 @@ static void mvs_free(struct mvs_info *mvi)
128 if (mvi->flags & MVF_FLAG_SOC) 133 if (mvi->flags & MVF_FLAG_SOC)
129 slot_nr = MVS_SOC_SLOTS; 134 slot_nr = MVS_SOC_SLOTS;
130 else 135 else
131 slot_nr = MVS_SLOTS; 136 slot_nr = MVS_CHIP_SLOT_SZ;
132 137
133 if (mvi->dma_pool) 138 if (mvi->dma_pool)
134 pci_pool_destroy(mvi->dma_pool); 139 pci_pool_destroy(mvi->dma_pool);
@@ -148,25 +153,26 @@ static void mvs_free(struct mvs_info *mvi)
148 dma_free_coherent(mvi->dev, 153 dma_free_coherent(mvi->dev,
149 sizeof(*mvi->slot) * slot_nr, 154 sizeof(*mvi->slot) * slot_nr,
150 mvi->slot, mvi->slot_dma); 155 mvi->slot, mvi->slot_dma);
151#ifndef DISABLE_HOTPLUG_DMA_FIX 156
152 if (mvi->bulk_buffer) 157 if (mvi->bulk_buffer)
153 dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE, 158 dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
154 mvi->bulk_buffer, mvi->bulk_buffer_dma); 159 mvi->bulk_buffer, mvi->bulk_buffer_dma);
155#endif 160 if (mvi->bulk_buffer1)
161 dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
162 mvi->bulk_buffer1, mvi->bulk_buffer_dma1);
156 163
157 MVS_CHIP_DISP->chip_iounmap(mvi); 164 MVS_CHIP_DISP->chip_iounmap(mvi);
158 if (mvi->shost) 165 if (mvi->shost)
159 scsi_host_put(mvi->shost); 166 scsi_host_put(mvi->shost);
160 list_for_each_entry(mwq, &mvi->wq_list, entry) 167 list_for_each_entry(mwq, &mvi->wq_list, entry)
161 cancel_delayed_work(&mwq->work_q); 168 cancel_delayed_work(&mwq->work_q);
169 kfree(mvi->tags);
162 kfree(mvi); 170 kfree(mvi);
163} 171}
164 172
165#ifdef MVS_USE_TASKLET 173#ifdef CONFIG_SCSI_MVSAS_TASKLET
166struct tasklet_struct mv_tasklet;
167static void mvs_tasklet(unsigned long opaque) 174static void mvs_tasklet(unsigned long opaque)
168{ 175{
169 unsigned long flags;
170 u32 stat; 176 u32 stat;
171 u16 core_nr, i = 0; 177 u16 core_nr, i = 0;
172 178
@@ -179,35 +185,49 @@ static void mvs_tasklet(unsigned long opaque)
179 if (unlikely(!mvi)) 185 if (unlikely(!mvi))
180 BUG_ON(1); 186 BUG_ON(1);
181 187
188 stat = MVS_CHIP_DISP->isr_status(mvi, mvi->pdev->irq);
189 if (!stat)
190 goto out;
191
182 for (i = 0; i < core_nr; i++) { 192 for (i = 0; i < core_nr; i++) {
183 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; 193 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
184 stat = MVS_CHIP_DISP->isr_status(mvi, mvi->irq); 194 MVS_CHIP_DISP->isr(mvi, mvi->pdev->irq, stat);
185 if (stat)
186 MVS_CHIP_DISP->isr(mvi, mvi->irq, stat);
187 } 195 }
196out:
197 MVS_CHIP_DISP->interrupt_enable(mvi);
188 198
189} 199}
190#endif 200#endif
191 201
192static irqreturn_t mvs_interrupt(int irq, void *opaque) 202static irqreturn_t mvs_interrupt(int irq, void *opaque)
193{ 203{
194 u32 core_nr, i = 0; 204 u32 core_nr;
195 u32 stat; 205 u32 stat;
196 struct mvs_info *mvi; 206 struct mvs_info *mvi;
197 struct sas_ha_struct *sha = opaque; 207 struct sas_ha_struct *sha = opaque;
208#ifndef CONFIG_SCSI_MVSAS_TASKLET
209 u32 i;
210#endif
198 211
199 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; 212 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
200 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; 213 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
201 214
202 if (unlikely(!mvi)) 215 if (unlikely(!mvi))
203 return IRQ_NONE; 216 return IRQ_NONE;
217#ifdef CONFIG_SCSI_MVSAS_TASKLET
218 MVS_CHIP_DISP->interrupt_disable(mvi);
219#endif
204 220
205 stat = MVS_CHIP_DISP->isr_status(mvi, irq); 221 stat = MVS_CHIP_DISP->isr_status(mvi, irq);
206 if (!stat) 222 if (!stat) {
223 #ifdef CONFIG_SCSI_MVSAS_TASKLET
224 MVS_CHIP_DISP->interrupt_enable(mvi);
225 #endif
207 return IRQ_NONE; 226 return IRQ_NONE;
227 }
208 228
209#ifdef MVS_USE_TASKLET 229#ifdef CONFIG_SCSI_MVSAS_TASKLET
210 tasklet_schedule(&mv_tasklet); 230 tasklet_schedule(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet);
211#else 231#else
212 for (i = 0; i < core_nr; i++) { 232 for (i = 0; i < core_nr; i++) {
213 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; 233 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
@@ -225,7 +245,7 @@ static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
225 if (mvi->flags & MVF_FLAG_SOC) 245 if (mvi->flags & MVF_FLAG_SOC)
226 slot_nr = MVS_SOC_SLOTS; 246 slot_nr = MVS_SOC_SLOTS;
227 else 247 else
228 slot_nr = MVS_SLOTS; 248 slot_nr = MVS_CHIP_SLOT_SZ;
229 249
230 spin_lock_init(&mvi->lock); 250 spin_lock_init(&mvi->lock);
231 for (i = 0; i < mvi->chip->n_phy; i++) { 251 for (i = 0; i < mvi->chip->n_phy; i++) {
@@ -273,13 +293,18 @@ static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
273 goto err_out; 293 goto err_out;
274 memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr); 294 memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr);
275 295
276#ifndef DISABLE_HOTPLUG_DMA_FIX
277 mvi->bulk_buffer = dma_alloc_coherent(mvi->dev, 296 mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
278 TRASH_BUCKET_SIZE, 297 TRASH_BUCKET_SIZE,
279 &mvi->bulk_buffer_dma, GFP_KERNEL); 298 &mvi->bulk_buffer_dma, GFP_KERNEL);
280 if (!mvi->bulk_buffer) 299 if (!mvi->bulk_buffer)
281 goto err_out; 300 goto err_out;
282#endif 301
302 mvi->bulk_buffer1 = dma_alloc_coherent(mvi->dev,
303 TRASH_BUCKET_SIZE,
304 &mvi->bulk_buffer_dma1, GFP_KERNEL);
305 if (!mvi->bulk_buffer1)
306 goto err_out;
307
283 sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id); 308 sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id);
284 mvi->dma_pool = pci_pool_create(pool_name, mvi->pdev, MVS_SLOT_BUF_SZ, 16, 0); 309 mvi->dma_pool = pci_pool_create(pool_name, mvi->pdev, MVS_SLOT_BUF_SZ, 16, 0);
285 if (!mvi->dma_pool) { 310 if (!mvi->dma_pool) {
@@ -354,11 +379,12 @@ static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
354 const struct pci_device_id *ent, 379 const struct pci_device_id *ent,
355 struct Scsi_Host *shost, unsigned int id) 380 struct Scsi_Host *shost, unsigned int id)
356{ 381{
357 struct mvs_info *mvi; 382 struct mvs_info *mvi = NULL;
358 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 383 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
359 384
360 mvi = kzalloc(sizeof(*mvi) + MVS_SLOTS * sizeof(struct mvs_slot_info), 385 mvi = kzalloc(sizeof(*mvi) +
361 GFP_KERNEL); 386 (1L << mvs_chips[ent->driver_data].slot_width) *
387 sizeof(struct mvs_slot_info), GFP_KERNEL);
362 if (!mvi) 388 if (!mvi)
363 return NULL; 389 return NULL;
364 390
@@ -367,7 +393,6 @@ static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
367 mvi->chip_id = ent->driver_data; 393 mvi->chip_id = ent->driver_data;
368 mvi->chip = &mvs_chips[mvi->chip_id]; 394 mvi->chip = &mvs_chips[mvi->chip_id];
369 INIT_LIST_HEAD(&mvi->wq_list); 395 INIT_LIST_HEAD(&mvi->wq_list);
370 mvi->irq = pdev->irq;
371 396
372 ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi; 397 ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi;
373 ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy; 398 ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy;
@@ -375,9 +400,10 @@ static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
375 mvi->id = id; 400 mvi->id = id;
376 mvi->sas = sha; 401 mvi->sas = sha;
377 mvi->shost = shost; 402 mvi->shost = shost;
378#ifdef MVS_USE_TASKLET 403
379 tasklet_init(&mv_tasklet, mvs_tasklet, (unsigned long)sha); 404 mvi->tags = kzalloc(MVS_CHIP_SLOT_SZ>>3, GFP_KERNEL);
380#endif 405 if (!mvi->tags)
406 goto err_out;
381 407
382 if (MVS_CHIP_DISP->chip_ioremap(mvi)) 408 if (MVS_CHIP_DISP->chip_ioremap(mvi))
383 goto err_out; 409 goto err_out;
@@ -388,7 +414,6 @@ err_out:
388 return NULL; 414 return NULL;
389} 415}
390 416
391/* move to PCI layer or libata core? */
392static int pci_go_64(struct pci_dev *pdev) 417static int pci_go_64(struct pci_dev *pdev)
393{ 418{
394 int rc; 419 int rc;
@@ -450,7 +475,7 @@ static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
450 ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr; 475 ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr;
451 476
452 shost->transportt = mvs_stt; 477 shost->transportt = mvs_stt;
453 shost->max_id = 128; 478 shost->max_id = MVS_MAX_DEVICES;
454 shost->max_lun = ~0; 479 shost->max_lun = ~0;
455 shost->max_channel = 1; 480 shost->max_channel = 1;
456 shost->max_cmd_len = 16; 481 shost->max_cmd_len = 16;
@@ -493,11 +518,12 @@ static void __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost,
493 if (mvi->flags & MVF_FLAG_SOC) 518 if (mvi->flags & MVF_FLAG_SOC)
494 can_queue = MVS_SOC_CAN_QUEUE; 519 can_queue = MVS_SOC_CAN_QUEUE;
495 else 520 else
496 can_queue = MVS_CAN_QUEUE; 521 can_queue = MVS_CHIP_SLOT_SZ;
497 522
498 sha->lldd_queue_size = can_queue; 523 sha->lldd_queue_size = can_queue;
524 shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG);
499 shost->can_queue = can_queue; 525 shost->can_queue = can_queue;
500 mvi->shost->cmd_per_lun = MVS_SLOTS/sha->num_phys; 526 mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE;
501 sha->core.shost = mvi->shost; 527 sha->core.shost = mvi->shost;
502} 528}
503 529
@@ -518,6 +544,7 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
518{ 544{
519 unsigned int rc, nhost = 0; 545 unsigned int rc, nhost = 0;
520 struct mvs_info *mvi; 546 struct mvs_info *mvi;
547 struct mvs_prv_info *mpi;
521 irq_handler_t irq_handler = mvs_interrupt; 548 irq_handler_t irq_handler = mvs_interrupt;
522 struct Scsi_Host *shost = NULL; 549 struct Scsi_Host *shost = NULL;
523 const struct mvs_chip_info *chip; 550 const struct mvs_chip_info *chip;
@@ -569,6 +596,9 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
569 goto err_out_regions; 596 goto err_out_regions;
570 } 597 }
571 598
599 memset(&mvi->hba_info_param, 0xFF,
600 sizeof(struct hba_info_page));
601
572 mvs_init_sas_add(mvi); 602 mvs_init_sas_add(mvi);
573 603
574 mvi->instance = nhost; 604 mvi->instance = nhost;
@@ -579,8 +609,9 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
579 } 609 }
580 nhost++; 610 nhost++;
581 } while (nhost < chip->n_host); 611 } while (nhost < chip->n_host);
582#ifdef MVS_USE_TASKLET 612 mpi = (struct mvs_prv_info *)(SHOST_TO_SAS_HA(shost)->lldd_ha);
583 tasklet_init(&mv_tasklet, mvs_tasklet, 613#ifdef CONFIG_SCSI_MVSAS_TASKLET
614 tasklet_init(&(mpi->mv_tasklet), mvs_tasklet,
584 (unsigned long)SHOST_TO_SAS_HA(shost)); 615 (unsigned long)SHOST_TO_SAS_HA(shost));
585#endif 616#endif
586 617
@@ -625,8 +656,8 @@ static void __devexit mvs_pci_remove(struct pci_dev *pdev)
625 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; 656 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
626 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; 657 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
627 658
628#ifdef MVS_USE_TASKLET 659#ifdef CONFIG_SCSI_MVSAS_TASKLET
629 tasklet_kill(&mv_tasklet); 660 tasklet_kill(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet);
630#endif 661#endif
631 662
632 pci_set_drvdata(pdev, NULL); 663 pci_set_drvdata(pdev, NULL);
@@ -635,7 +666,7 @@ static void __devexit mvs_pci_remove(struct pci_dev *pdev)
635 scsi_remove_host(mvi->shost); 666 scsi_remove_host(mvi->shost);
636 667
637 MVS_CHIP_DISP->interrupt_disable(mvi); 668 MVS_CHIP_DISP->interrupt_disable(mvi);
638 free_irq(mvi->irq, sha); 669 free_irq(mvi->pdev->irq, sha);
639 for (i = 0; i < core_nr; i++) { 670 for (i = 0; i < core_nr; i++) {
640 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; 671 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
641 mvs_free(mvi); 672 mvs_free(mvi);
@@ -703,6 +734,70 @@ static struct pci_driver mvs_pci_driver = {
703 .remove = __devexit_p(mvs_pci_remove), 734 .remove = __devexit_p(mvs_pci_remove),
704}; 735};
705 736
737static ssize_t
738mvs_show_driver_version(struct device *cdev,
739 struct device_attribute *attr, char *buffer)
740{
741 return snprintf(buffer, PAGE_SIZE, "%s\n", DRV_VERSION);
742}
743
744static DEVICE_ATTR(driver_version,
745 S_IRUGO,
746 mvs_show_driver_version,
747 NULL);
748
749static ssize_t
750mvs_store_interrupt_coalescing(struct device *cdev,
751 struct device_attribute *attr,
752 const char *buffer, size_t size)
753{
754 int val = 0;
755 struct mvs_info *mvi = NULL;
756 struct Scsi_Host *shost = class_to_shost(cdev);
757 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
758 u8 i, core_nr;
759 if (buffer == NULL)
760 return size;
761
762 if (sscanf(buffer, "%d", &val) != 1)
763 return -EINVAL;
764
765 if (val >= 0x10000) {
766 mv_dprintk("interrupt coalescing timer %d us is"
767 "too long\n", val);
768 return strlen(buffer);
769 }
770
771 interrupt_coalescing = val;
772
773 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
774 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
775
776 if (unlikely(!mvi))
777 return -EINVAL;
778
779 for (i = 0; i < core_nr; i++) {
780 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
781 if (MVS_CHIP_DISP->tune_interrupt)
782 MVS_CHIP_DISP->tune_interrupt(mvi,
783 interrupt_coalescing);
784 }
785 mv_dprintk("set interrupt coalescing time to %d us\n",
786 interrupt_coalescing);
787 return strlen(buffer);
788}
789
790static ssize_t mvs_show_interrupt_coalescing(struct device *cdev,
791 struct device_attribute *attr, char *buffer)
792{
793 return snprintf(buffer, PAGE_SIZE, "%d\n", interrupt_coalescing);
794}
795
796static DEVICE_ATTR(interrupt_coalescing,
797 S_IRUGO|S_IWUSR,
798 mvs_show_interrupt_coalescing,
799 mvs_store_interrupt_coalescing);
800
706/* task handler */ 801/* task handler */
707struct task_struct *mvs_th; 802struct task_struct *mvs_th;
708static int __init mvs_init(void) 803static int __init mvs_init(void)
@@ -739,6 +834,12 @@ static void __exit mvs_exit(void)
739 kmem_cache_destroy(mvs_task_list_cache); 834 kmem_cache_destroy(mvs_task_list_cache);
740} 835}
741 836
837struct device_attribute *mvst_host_attrs[] = {
838 &dev_attr_driver_version,
839 &dev_attr_interrupt_coalescing,
840 NULL,
841};
842
742module_init(mvs_init); 843module_init(mvs_init);
743module_exit(mvs_exit); 844module_exit(mvs_exit);
744 845
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 0ef27425c447..4958fefff365 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -38,7 +38,7 @@ static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
38 38
39void mvs_tag_clear(struct mvs_info *mvi, u32 tag) 39void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
40{ 40{
41 void *bitmap = &mvi->tags; 41 void *bitmap = mvi->tags;
42 clear_bit(tag, bitmap); 42 clear_bit(tag, bitmap);
43} 43}
44 44
@@ -49,14 +49,14 @@ void mvs_tag_free(struct mvs_info *mvi, u32 tag)
49 49
50void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) 50void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
51{ 51{
52 void *bitmap = &mvi->tags; 52 void *bitmap = mvi->tags;
53 set_bit(tag, bitmap); 53 set_bit(tag, bitmap);
54} 54}
55 55
56inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) 56inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
57{ 57{
58 unsigned int index, tag; 58 unsigned int index, tag;
59 void *bitmap = &mvi->tags; 59 void *bitmap = mvi->tags;
60 60
61 index = find_first_zero_bit(bitmap, mvi->tags_num); 61 index = find_first_zero_bit(bitmap, mvi->tags_num);
62 tag = index; 62 tag = index;
@@ -74,126 +74,6 @@ void mvs_tag_init(struct mvs_info *mvi)
74 mvs_tag_clear(mvi, i); 74 mvs_tag_clear(mvi, i);
75} 75}
76 76
77void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
78{
79 u32 i;
80 u32 run;
81 u32 offset;
82
83 offset = 0;
84 while (size) {
85 printk(KERN_DEBUG"%08X : ", baseaddr + offset);
86 if (size >= 16)
87 run = 16;
88 else
89 run = size;
90 size -= run;
91 for (i = 0; i < 16; i++) {
92 if (i < run)
93 printk(KERN_DEBUG"%02X ", (u32)data[i]);
94 else
95 printk(KERN_DEBUG" ");
96 }
97 printk(KERN_DEBUG": ");
98 for (i = 0; i < run; i++)
99 printk(KERN_DEBUG"%c",
100 isalnum(data[i]) ? data[i] : '.');
101 printk(KERN_DEBUG"\n");
102 data = &data[16];
103 offset += run;
104 }
105 printk(KERN_DEBUG"\n");
106}
107
108#if (_MV_DUMP > 1)
109static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
110 enum sas_protocol proto)
111{
112 u32 offset;
113 struct mvs_slot_info *slot = &mvi->slot_info[tag];
114
115 offset = slot->cmd_size + MVS_OAF_SZ +
116 MVS_CHIP_DISP->prd_size() * slot->n_elem;
117 dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n",
118 tag);
119 mvs_hexdump(32, (u8 *) slot->response,
120 (u32) slot->buf_dma + offset);
121}
122#endif
123
124static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
125 enum sas_protocol proto)
126{
127#if (_MV_DUMP > 1)
128 u32 sz, w_ptr;
129 u64 addr;
130 struct mvs_slot_info *slot = &mvi->slot_info[tag];
131
132 /*Delivery Queue */
133 sz = MVS_CHIP_SLOT_SZ;
134 w_ptr = slot->tx;
135 addr = mvi->tx_dma;
136 dev_printk(KERN_DEBUG, mvi->dev,
137 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
138 dev_printk(KERN_DEBUG, mvi->dev,
139 "Delivery Queue Base Address=0x%llX (PA)"
140 "(tx_dma=0x%llX), Entry=%04d\n",
141 addr, (unsigned long long)mvi->tx_dma, w_ptr);
142 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
143 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
144 /*Command List */
145 addr = mvi->slot_dma;
146 dev_printk(KERN_DEBUG, mvi->dev,
147 "Command List Base Address=0x%llX (PA)"
148 "(slot_dma=0x%llX), Header=%03d\n",
149 addr, (unsigned long long)slot->buf_dma, tag);
150 dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag);
151 /*mvs_cmd_hdr */
152 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
153 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
154 /*1.command table area */
155 dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n");
156 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
157 /*2.open address frame area */
158 dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n");
159 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
160 (u32) slot->buf_dma + slot->cmd_size);
161 /*3.status buffer */
162 mvs_hba_sb_dump(mvi, tag, proto);
163 /*4.PRD table */
164 dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n");
165 mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem,
166 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
167 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
168#endif
169}
170
171static void mvs_hba_cq_dump(struct mvs_info *mvi)
172{
173#if (_MV_DUMP > 2)
174 u64 addr;
175 void __iomem *regs = mvi->regs;
176 u32 entry = mvi->rx_cons + 1;
177 u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
178
179 /*Completion Queue */
180 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
181 dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n",
182 mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
183 dev_printk(KERN_DEBUG, mvi->dev,
184 "Completion List Base Address=0x%llX (PA), "
185 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
186 addr, entry - 1, mvi->rx[0]);
187 mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
188 mvi->rx_dma + sizeof(u32) * entry);
189#endif
190}
191
192void mvs_get_sas_addr(void *buf, u32 buflen)
193{
194 /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/
195}
196
197struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) 77struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
198{ 78{
199 unsigned long i = 0, j = 0, hi = 0; 79 unsigned long i = 0, j = 0, hi = 0;
@@ -222,7 +102,6 @@ struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
222 102
223} 103}
224 104
225/* FIXME */
226int mvs_find_dev_phyno(struct domain_device *dev, int *phyno) 105int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
227{ 106{
228 unsigned long i = 0, j = 0, n = 0, num = 0; 107 unsigned long i = 0, j = 0, n = 0, num = 0;
@@ -253,6 +132,20 @@ int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
253 return num; 132 return num;
254} 133}
255 134
135struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi,
136 u8 reg_set)
137{
138 u32 dev_no;
139 for (dev_no = 0; dev_no < MVS_MAX_DEVICES; dev_no++) {
140 if (mvi->devices[dev_no].taskfileset == MVS_ID_NOT_MAPPED)
141 continue;
142
143 if (mvi->devices[dev_no].taskfileset == reg_set)
144 return &mvi->devices[dev_no];
145 }
146 return NULL;
147}
148
256static inline void mvs_free_reg_set(struct mvs_info *mvi, 149static inline void mvs_free_reg_set(struct mvs_info *mvi,
257 struct mvs_device *dev) 150 struct mvs_device *dev)
258{ 151{
@@ -283,7 +176,6 @@ void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
283 } 176 }
284} 177}
285 178
286/* FIXME: locking? */
287int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, 179int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
288 void *funcdata) 180 void *funcdata)
289{ 181{
@@ -309,12 +201,12 @@ int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
309 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id); 201 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
310 if (tmp & PHY_RST_HARD) 202 if (tmp & PHY_RST_HARD)
311 break; 203 break;
312 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1); 204 MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_HARD_RESET);
313 break; 205 break;
314 206
315 case PHY_FUNC_LINK_RESET: 207 case PHY_FUNC_LINK_RESET:
316 MVS_CHIP_DISP->phy_enable(mvi, phy_id); 208 MVS_CHIP_DISP->phy_enable(mvi, phy_id);
317 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0); 209 MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_SOFT_RESET);
318 break; 210 break;
319 211
320 case PHY_FUNC_DISABLE: 212 case PHY_FUNC_DISABLE:
@@ -406,14 +298,10 @@ int mvs_slave_configure(struct scsi_device *sdev)
406 298
407 if (ret) 299 if (ret)
408 return ret; 300 return ret;
409 if (dev_is_sata(dev)) { 301 if (!dev_is_sata(dev)) {
410 /* may set PIO mode */ 302 sas_change_queue_depth(sdev,
411 #if MV_DISABLE_NCQ 303 MVS_QUEUE_SIZE,
412 struct ata_port *ap = dev->sata_dev.ap; 304 SCSI_QDEPTH_DEFAULT);
413 struct ata_device *adev = ap->link.device;
414 adev->flags |= ATA_DFLAG_NCQ_OFF;
415 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
416 #endif
417 } 305 }
418 return 0; 306 return 0;
419} 307}
@@ -424,6 +312,7 @@ void mvs_scan_start(struct Scsi_Host *shost)
424 unsigned short core_nr; 312 unsigned short core_nr;
425 struct mvs_info *mvi; 313 struct mvs_info *mvi;
426 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 314 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
315 struct mvs_prv_info *mvs_prv = sha->lldd_ha;
427 316
428 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; 317 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
429 318
@@ -432,15 +321,17 @@ void mvs_scan_start(struct Scsi_Host *shost)
432 for (i = 0; i < mvi->chip->n_phy; ++i) 321 for (i = 0; i < mvi->chip->n_phy; ++i)
433 mvs_bytes_dmaed(mvi, i); 322 mvs_bytes_dmaed(mvi, i);
434 } 323 }
324 mvs_prv->scan_finished = 1;
435} 325}
436 326
437int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time) 327int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
438{ 328{
439 /* give the phy enabling interrupt event time to come in (1s 329 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
440 * is empirically about all it takes) */ 330 struct mvs_prv_info *mvs_prv = sha->lldd_ha;
441 if (time < HZ) 331
332 if (mvs_prv->scan_finished == 0)
442 return 0; 333 return 0;
443 /* Wait for discovery to finish */ 334
444 scsi_flush_work(shost); 335 scsi_flush_work(shost);
445 return 1; 336 return 1;
446} 337}
@@ -461,10 +352,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
461 void *buf_prd; 352 void *buf_prd;
462 struct mvs_slot_info *slot = &mvi->slot_info[tag]; 353 struct mvs_slot_info *slot = &mvi->slot_info[tag];
463 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); 354 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
464#if _MV_DUMP 355
465 u8 *buf_cmd;
466 void *from;
467#endif
468 /* 356 /*
469 * DMA-map SMP request, response buffers 357 * DMA-map SMP request, response buffers
470 */ 358 */
@@ -496,15 +384,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
496 buf_tmp = slot->buf; 384 buf_tmp = slot->buf;
497 buf_tmp_dma = slot->buf_dma; 385 buf_tmp_dma = slot->buf_dma;
498 386
499#if _MV_DUMP
500 buf_cmd = buf_tmp;
501 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
502 buf_tmp += req_len;
503 buf_tmp_dma += req_len;
504 slot->cmd_size = req_len;
505#else
506 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req)); 387 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
507#endif
508 388
509 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ 389 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
510 buf_oaf = buf_tmp; 390 buf_oaf = buf_tmp;
@@ -553,12 +433,6 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
553 /* fill in PRD (scatter/gather) table, if any */ 433 /* fill in PRD (scatter/gather) table, if any */
554 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); 434 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
555 435
556#if _MV_DUMP
557 /* copy cmd table */
558 from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
559 memcpy(buf_cmd, from + sg_req->offset, req_len);
560 kunmap_atomic(from, KM_IRQ0);
561#endif
562 return 0; 436 return 0;
563 437
564err_out_2: 438err_out_2:
@@ -616,14 +490,11 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
616 (mvi_dev->taskfileset << TXQ_SRS_SHIFT); 490 (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
617 mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q); 491 mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
618 492
619#ifndef DISABLE_HOTPLUG_DMA_FIX
620 if (task->data_dir == DMA_FROM_DEVICE) 493 if (task->data_dir == DMA_FROM_DEVICE)
621 flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT); 494 flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
622 else 495 else
623 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); 496 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
624#else 497
625 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
626#endif
627 if (task->ata_task.use_ncq) 498 if (task->ata_task.use_ncq)
628 flags |= MCH_FPDMA; 499 flags |= MCH_FPDMA;
629 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) { 500 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
@@ -631,11 +502,8 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
631 flags |= MCH_ATAPI; 502 flags |= MCH_ATAPI;
632 } 503 }
633 504
634 /* FIXME: fill in port multiplier number */
635
636 hdr->flags = cpu_to_le32(flags); 505 hdr->flags = cpu_to_le32(flags);
637 506
638 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
639 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag)) 507 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
640 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); 508 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
641 else 509 else
@@ -657,9 +525,6 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
657 525
658 buf_tmp += MVS_ATA_CMD_SZ; 526 buf_tmp += MVS_ATA_CMD_SZ;
659 buf_tmp_dma += MVS_ATA_CMD_SZ; 527 buf_tmp_dma += MVS_ATA_CMD_SZ;
660#if _MV_DUMP
661 slot->cmd_size = MVS_ATA_CMD_SZ;
662#endif
663 528
664 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ 529 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
665 /* used for STP. unused for SATA? */ 530 /* used for STP. unused for SATA? */
@@ -682,9 +547,6 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
682 buf_tmp_dma += i; 547 buf_tmp_dma += i;
683 548
684 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ 549 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
685 /* FIXME: probably unused, for SATA. kept here just in case
686 * we get a STP/SATA error information record
687 */
688 slot->response = buf_tmp; 550 slot->response = buf_tmp;
689 hdr->status_buf = cpu_to_le64(buf_tmp_dma); 551 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
690 if (mvi->flags & MVF_FLAG_SOC) 552 if (mvi->flags & MVF_FLAG_SOC)
@@ -715,11 +577,11 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
715 577
716 /* fill in PRD (scatter/gather) table, if any */ 578 /* fill in PRD (scatter/gather) table, if any */
717 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); 579 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
718#ifndef DISABLE_HOTPLUG_DMA_FIX 580
719 if (task->data_dir == DMA_FROM_DEVICE) 581 if (task->data_dir == DMA_FROM_DEVICE)
720 MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma, 582 MVS_CHIP_DISP->dma_fix(mvi, sas_port->phy_mask,
721 TRASH_BUCKET_SIZE, tei->n_elem, buf_prd); 583 TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
722#endif 584
723 return 0; 585 return 0;
724} 586}
725 587
@@ -761,6 +623,9 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
761 } 623 }
762 if (is_tmf) 624 if (is_tmf)
763 flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT); 625 flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
626 else
627 flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT);
628
764 hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT)); 629 hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
765 hdr->tags = cpu_to_le32(tag); 630 hdr->tags = cpu_to_le32(tag);
766 hdr->data_len = cpu_to_le32(task->total_xfer_len); 631 hdr->data_len = cpu_to_le32(task->total_xfer_len);
@@ -777,9 +642,6 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
777 642
778 buf_tmp += MVS_SSP_CMD_SZ; 643 buf_tmp += MVS_SSP_CMD_SZ;
779 buf_tmp_dma += MVS_SSP_CMD_SZ; 644 buf_tmp_dma += MVS_SSP_CMD_SZ;
780#if _MV_DUMP
781 slot->cmd_size = MVS_SSP_CMD_SZ;
782#endif
783 645
784 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ 646 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
785 buf_oaf = buf_tmp; 647 buf_oaf = buf_tmp;
@@ -986,7 +848,6 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
986 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 848 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
987 spin_unlock(&task->task_state_lock); 849 spin_unlock(&task->task_state_lock);
988 850
989 mvs_hba_memory_dump(mvi, tag, task->task_proto);
990 mvi_dev->running_req++; 851 mvi_dev->running_req++;
991 ++(*pass); 852 ++(*pass);
992 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); 853 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
@@ -1189,9 +1050,9 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
1189 mvs_slot_free(mvi, slot_idx); 1050 mvs_slot_free(mvi, slot_idx);
1190} 1051}
1191 1052
1192static void mvs_update_wideport(struct mvs_info *mvi, int i) 1053static void mvs_update_wideport(struct mvs_info *mvi, int phy_no)
1193{ 1054{
1194 struct mvs_phy *phy = &mvi->phy[i]; 1055 struct mvs_phy *phy = &mvi->phy[phy_no];
1195 struct mvs_port *port = phy->port; 1056 struct mvs_port *port = phy->port;
1196 int j, no; 1057 int j, no;
1197 1058
@@ -1246,18 +1107,17 @@ static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
1246 return NULL; 1107 return NULL;
1247 1108
1248 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); 1109 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
1249 s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); 1110 s[3] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
1250 1111
1251 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); 1112 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
1252 s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); 1113 s[2] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
1253 1114
1254 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); 1115 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
1255 s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); 1116 s[1] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
1256 1117
1257 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); 1118 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
1258 s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); 1119 s[0] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
1259 1120
1260 /* Workaround: take some ATAPI devices for ATA */
1261 if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01)) 1121 if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
1262 s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10); 1122 s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
1263 1123
@@ -1269,6 +1129,13 @@ static u32 mvs_is_sig_fis_received(u32 irq_status)
1269 return irq_status & PHYEV_SIG_FIS; 1129 return irq_status & PHYEV_SIG_FIS;
1270} 1130}
1271 1131
1132static void mvs_sig_remove_timer(struct mvs_phy *phy)
1133{
1134 if (phy->timer.function)
1135 del_timer(&phy->timer);
1136 phy->timer.function = NULL;
1137}
1138
1272void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) 1139void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1273{ 1140{
1274 struct mvs_phy *phy = &mvi->phy[i]; 1141 struct mvs_phy *phy = &mvi->phy[i];
@@ -1291,6 +1158,7 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1291 if (phy->phy_type & PORT_TYPE_SATA) { 1158 if (phy->phy_type & PORT_TYPE_SATA) {
1292 phy->identify.target_port_protocols = SAS_PROTOCOL_STP; 1159 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
1293 if (mvs_is_sig_fis_received(phy->irq_status)) { 1160 if (mvs_is_sig_fis_received(phy->irq_status)) {
1161 mvs_sig_remove_timer(phy);
1294 phy->phy_attached = 1; 1162 phy->phy_attached = 1;
1295 phy->att_dev_sas_addr = 1163 phy->att_dev_sas_addr =
1296 i + mvi->id * mvi->chip->n_phy; 1164 i + mvi->id * mvi->chip->n_phy;
@@ -1308,7 +1176,6 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1308 tmp | PHYEV_SIG_FIS); 1176 tmp | PHYEV_SIG_FIS);
1309 phy->phy_attached = 0; 1177 phy->phy_attached = 0;
1310 phy->phy_type &= ~PORT_TYPE_SATA; 1178 phy->phy_type &= ~PORT_TYPE_SATA;
1311 MVS_CHIP_DISP->phy_reset(mvi, i, 0);
1312 goto out_done; 1179 goto out_done;
1313 } 1180 }
1314 } else if (phy->phy_type & PORT_TYPE_SAS 1181 } else if (phy->phy_type & PORT_TYPE_SAS
@@ -1334,9 +1201,9 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1334 if (MVS_CHIP_DISP->phy_work_around) 1201 if (MVS_CHIP_DISP->phy_work_around)
1335 MVS_CHIP_DISP->phy_work_around(mvi, i); 1202 MVS_CHIP_DISP->phy_work_around(mvi, i);
1336 } 1203 }
1337 mv_dprintk("port %d attach dev info is %x\n", 1204 mv_dprintk("phy %d attach dev info is %x\n",
1338 i + mvi->id * mvi->chip->n_phy, phy->att_dev_info); 1205 i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
1339 mv_dprintk("port %d attach sas addr is %llx\n", 1206 mv_dprintk("phy %d attach sas addr is %llx\n",
1340 i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr); 1207 i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
1341out_done: 1208out_done:
1342 if (get_st) 1209 if (get_st)
@@ -1361,10 +1228,10 @@ static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
1361 } 1228 }
1362 hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy; 1229 hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
1363 mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi]; 1230 mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
1364 if (sas_port->id >= mvi->chip->n_phy) 1231 if (i >= mvi->chip->n_phy)
1365 port = &mvi->port[sas_port->id - mvi->chip->n_phy]; 1232 port = &mvi->port[i - mvi->chip->n_phy];
1366 else 1233 else
1367 port = &mvi->port[sas_port->id]; 1234 port = &mvi->port[i];
1368 if (lock) 1235 if (lock)
1369 spin_lock_irqsave(&mvi->lock, flags); 1236 spin_lock_irqsave(&mvi->lock, flags);
1370 port->port_attached = 1; 1237 port->port_attached = 1;
@@ -1393,7 +1260,7 @@ static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
1393 return; 1260 return;
1394 } 1261 }
1395 list_for_each_entry(dev, &port->dev_list, dev_list_node) 1262 list_for_each_entry(dev, &port->dev_list, dev_list_node)
1396 mvs_do_release_task(phy->mvi, phy_no, NULL); 1263 mvs_do_release_task(phy->mvi, phy_no, dev);
1397 1264
1398} 1265}
1399 1266
@@ -1457,6 +1324,7 @@ int mvs_dev_found_notify(struct domain_device *dev, int lock)
1457 mvi_device->dev_status = MVS_DEV_NORMAL; 1324 mvi_device->dev_status = MVS_DEV_NORMAL;
1458 mvi_device->dev_type = dev->dev_type; 1325 mvi_device->dev_type = dev->dev_type;
1459 mvi_device->mvi_info = mvi; 1326 mvi_device->mvi_info = mvi;
1327 mvi_device->sas_device = dev;
1460 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { 1328 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
1461 int phy_id; 1329 int phy_id;
1462 u8 phy_num = parent_dev->ex_dev.num_phys; 1330 u8 phy_num = parent_dev->ex_dev.num_phys;
@@ -1508,6 +1376,7 @@ void mvs_dev_gone_notify(struct domain_device *dev)
1508 mv_dprintk("found dev has gone.\n"); 1376 mv_dprintk("found dev has gone.\n");
1509 } 1377 }
1510 dev->lldd_dev = NULL; 1378 dev->lldd_dev = NULL;
1379 mvi_dev->sas_device = NULL;
1511 1380
1512 spin_unlock_irqrestore(&mvi->lock, flags); 1381 spin_unlock_irqrestore(&mvi->lock, flags);
1513} 1382}
@@ -1555,7 +1424,6 @@ static void mvs_tmf_timedout(unsigned long data)
1555 complete(&task->completion); 1424 complete(&task->completion);
1556} 1425}
1557 1426
1558/* XXX */
1559#define MVS_TASK_TIMEOUT 20 1427#define MVS_TASK_TIMEOUT 20
1560static int mvs_exec_internal_tmf_task(struct domain_device *dev, 1428static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1561 void *parameter, u32 para_len, struct mvs_tmf_task *tmf) 1429 void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
@@ -1588,7 +1456,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1588 } 1456 }
1589 1457
1590 wait_for_completion(&task->completion); 1458 wait_for_completion(&task->completion);
1591 res = -TMF_RESP_FUNC_FAILED; 1459 res = TMF_RESP_FUNC_FAILED;
1592 /* Even TMF timed out, return direct. */ 1460 /* Even TMF timed out, return direct. */
1593 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1461 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1594 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1462 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
@@ -1638,11 +1506,10 @@ static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
1638 u8 *lun, struct mvs_tmf_task *tmf) 1506 u8 *lun, struct mvs_tmf_task *tmf)
1639{ 1507{
1640 struct sas_ssp_task ssp_task; 1508 struct sas_ssp_task ssp_task;
1641 DECLARE_COMPLETION_ONSTACK(completion);
1642 if (!(dev->tproto & SAS_PROTOCOL_SSP)) 1509 if (!(dev->tproto & SAS_PROTOCOL_SSP))
1643 return TMF_RESP_FUNC_ESUPP; 1510 return TMF_RESP_FUNC_ESUPP;
1644 1511
1645 strncpy((u8 *)&ssp_task.LUN, lun, 8); 1512 memcpy(ssp_task.LUN, lun, 8);
1646 1513
1647 return mvs_exec_internal_tmf_task(dev, &ssp_task, 1514 return mvs_exec_internal_tmf_task(dev, &ssp_task,
1648 sizeof(ssp_task), tmf); 1515 sizeof(ssp_task), tmf);
@@ -1666,7 +1533,7 @@ static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
1666int mvs_lu_reset(struct domain_device *dev, u8 *lun) 1533int mvs_lu_reset(struct domain_device *dev, u8 *lun)
1667{ 1534{
1668 unsigned long flags; 1535 unsigned long flags;
1669 int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED; 1536 int rc = TMF_RESP_FUNC_FAILED;
1670 struct mvs_tmf_task tmf_task; 1537 struct mvs_tmf_task tmf_task;
1671 struct mvs_device * mvi_dev = dev->lldd_dev; 1538 struct mvs_device * mvi_dev = dev->lldd_dev;
1672 struct mvs_info *mvi = mvi_dev->mvi_info; 1539 struct mvs_info *mvi = mvi_dev->mvi_info;
@@ -1675,10 +1542,8 @@ int mvs_lu_reset(struct domain_device *dev, u8 *lun)
1675 mvi_dev->dev_status = MVS_DEV_EH; 1542 mvi_dev->dev_status = MVS_DEV_EH;
1676 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); 1543 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1677 if (rc == TMF_RESP_FUNC_COMPLETE) { 1544 if (rc == TMF_RESP_FUNC_COMPLETE) {
1678 num = mvs_find_dev_phyno(dev, phyno);
1679 spin_lock_irqsave(&mvi->lock, flags); 1545 spin_lock_irqsave(&mvi->lock, flags);
1680 for (i = 0; i < num; i++) 1546 mvs_release_task(mvi, dev);
1681 mvs_release_task(mvi, dev);
1682 spin_unlock_irqrestore(&mvi->lock, flags); 1547 spin_unlock_irqrestore(&mvi->lock, flags);
1683 } 1548 }
1684 /* If failed, fall-through I_T_Nexus reset */ 1549 /* If failed, fall-through I_T_Nexus reset */
@@ -1696,11 +1561,12 @@ int mvs_I_T_nexus_reset(struct domain_device *dev)
1696 1561
1697 if (mvi_dev->dev_status != MVS_DEV_EH) 1562 if (mvi_dev->dev_status != MVS_DEV_EH)
1698 return TMF_RESP_FUNC_COMPLETE; 1563 return TMF_RESP_FUNC_COMPLETE;
1564 else
1565 mvi_dev->dev_status = MVS_DEV_NORMAL;
1699 rc = mvs_debug_I_T_nexus_reset(dev); 1566 rc = mvs_debug_I_T_nexus_reset(dev);
1700 mv_printk("%s for device[%x]:rc= %d\n", 1567 mv_printk("%s for device[%x]:rc= %d\n",
1701 __func__, mvi_dev->device_id, rc); 1568 __func__, mvi_dev->device_id, rc);
1702 1569
1703 /* housekeeper */
1704 spin_lock_irqsave(&mvi->lock, flags); 1570 spin_lock_irqsave(&mvi->lock, flags);
1705 mvs_release_task(mvi, dev); 1571 mvs_release_task(mvi, dev);
1706 spin_unlock_irqrestore(&mvi->lock, flags); 1572 spin_unlock_irqrestore(&mvi->lock, flags);
@@ -1739,9 +1605,6 @@ int mvs_query_task(struct sas_task *task)
1739 case TMF_RESP_FUNC_FAILED: 1605 case TMF_RESP_FUNC_FAILED:
1740 case TMF_RESP_FUNC_COMPLETE: 1606 case TMF_RESP_FUNC_COMPLETE:
1741 break; 1607 break;
1742 default:
1743 rc = TMF_RESP_FUNC_COMPLETE;
1744 break;
1745 } 1608 }
1746 } 1609 }
1747 mv_printk("%s:rc= %d\n", __func__, rc); 1610 mv_printk("%s:rc= %d\n", __func__, rc);
@@ -1761,8 +1624,8 @@ int mvs_abort_task(struct sas_task *task)
1761 u32 tag; 1624 u32 tag;
1762 1625
1763 if (!mvi_dev) { 1626 if (!mvi_dev) {
1764 mv_printk("%s:%d TMF_RESP_FUNC_FAILED\n", __func__, __LINE__); 1627 mv_printk("Device has removed\n");
1765 rc = TMF_RESP_FUNC_FAILED; 1628 return TMF_RESP_FUNC_FAILED;
1766 } 1629 }
1767 1630
1768 mvi = mvi_dev->mvi_info; 1631 mvi = mvi_dev->mvi_info;
@@ -1807,25 +1670,17 @@ int mvs_abort_task(struct sas_task *task)
1807 1670
1808 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1671 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1809 task->task_proto & SAS_PROTOCOL_STP) { 1672 task->task_proto & SAS_PROTOCOL_STP) {
1810 /* to do free register_set */
1811 if (SATA_DEV == dev->dev_type) { 1673 if (SATA_DEV == dev->dev_type) {
1812 struct mvs_slot_info *slot = task->lldd_task; 1674 struct mvs_slot_info *slot = task->lldd_task;
1813 struct task_status_struct *tstat;
1814 u32 slot_idx = (u32)(slot - mvi->slot_info); 1675 u32 slot_idx = (u32)(slot - mvi->slot_info);
1815 tstat = &task->task_status; 1676 mv_dprintk("mvs_abort_task() mvi=%p task=%p "
1816 mv_dprintk(KERN_DEBUG "mv_abort_task() mvi=%p task=%p "
1817 "slot=%p slot_idx=x%x\n", 1677 "slot=%p slot_idx=x%x\n",
1818 mvi, task, slot, slot_idx); 1678 mvi, task, slot, slot_idx);
1819 tstat->stat = SAS_ABORTED_TASK; 1679 mvs_tmf_timedout((unsigned long)task);
1820 if (mvi_dev && mvi_dev->running_req)
1821 mvi_dev->running_req--;
1822 if (sas_protocol_ata(task->task_proto))
1823 mvs_free_reg_set(mvi, mvi_dev);
1824 mvs_slot_task_free(mvi, task, slot, slot_idx); 1680 mvs_slot_task_free(mvi, task, slot, slot_idx);
1825 return -1; 1681 rc = TMF_RESP_FUNC_COMPLETE;
1682 goto out;
1826 } 1683 }
1827 } else {
1828 /* SMP */
1829 1684
1830 } 1685 }
1831out: 1686out:
@@ -1891,12 +1746,63 @@ static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1891 return stat; 1746 return stat;
1892} 1747}
1893 1748
1749void mvs_set_sense(u8 *buffer, int len, int d_sense,
1750 int key, int asc, int ascq)
1751{
1752 memset(buffer, 0, len);
1753
1754 if (d_sense) {
1755 /* Descriptor format */
1756 if (len < 4) {
1757 mv_printk("Length %d of sense buffer too small to "
1758 "fit sense %x:%x:%x", len, key, asc, ascq);
1759 }
1760
1761 buffer[0] = 0x72; /* Response Code */
1762 if (len > 1)
1763 buffer[1] = key; /* Sense Key */
1764 if (len > 2)
1765 buffer[2] = asc; /* ASC */
1766 if (len > 3)
1767 buffer[3] = ascq; /* ASCQ */
1768 } else {
1769 if (len < 14) {
1770 mv_printk("Length %d of sense buffer too small to "
1771 "fit sense %x:%x:%x", len, key, asc, ascq);
1772 }
1773
1774 buffer[0] = 0x70; /* Response Code */
1775 if (len > 2)
1776 buffer[2] = key; /* Sense Key */
1777 if (len > 7)
1778 buffer[7] = 0x0a; /* Additional Sense Length */
1779 if (len > 12)
1780 buffer[12] = asc; /* ASC */
1781 if (len > 13)
1782 buffer[13] = ascq; /* ASCQ */
1783 }
1784
1785 return;
1786}
1787
1788void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu,
1789 u8 key, u8 asc, u8 asc_q)
1790{
1791 iu->datapres = 2;
1792 iu->response_data_len = 0;
1793 iu->sense_data_len = 17;
1794 iu->status = 02;
1795 mvs_set_sense(iu->sense_data, 17, 0,
1796 key, asc, asc_q);
1797}
1798
1894static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, 1799static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1895 u32 slot_idx) 1800 u32 slot_idx)
1896{ 1801{
1897 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; 1802 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1898 int stat; 1803 int stat;
1899 u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response)); 1804 u32 err_dw0 = le32_to_cpu(*(u32 *)slot->response);
1805 u32 err_dw1 = le32_to_cpu(*((u32 *)slot->response + 1));
1900 u32 tfs = 0; 1806 u32 tfs = 0;
1901 enum mvs_port_type type = PORT_TYPE_SAS; 1807 enum mvs_port_type type = PORT_TYPE_SAS;
1902 1808
@@ -1908,8 +1814,19 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1908 stat = SAM_STAT_CHECK_CONDITION; 1814 stat = SAM_STAT_CHECK_CONDITION;
1909 switch (task->task_proto) { 1815 switch (task->task_proto) {
1910 case SAS_PROTOCOL_SSP: 1816 case SAS_PROTOCOL_SSP:
1817 {
1911 stat = SAS_ABORTED_TASK; 1818 stat = SAS_ABORTED_TASK;
1819 if ((err_dw0 & NO_DEST) || err_dw1 & bit(31)) {
1820 struct ssp_response_iu *iu = slot->response +
1821 sizeof(struct mvs_err_info);
1822 mvs_fill_ssp_resp_iu(iu, NOT_READY, 0x04, 01);
1823 sas_ssp_task_response(mvi->dev, task, iu);
1824 stat = SAM_STAT_CHECK_CONDITION;
1825 }
1826 if (err_dw1 & bit(31))
1827 mv_printk("reuse same slot, retry command.\n");
1912 break; 1828 break;
1829 }
1913 case SAS_PROTOCOL_SMP: 1830 case SAS_PROTOCOL_SMP:
1914 stat = SAM_STAT_CHECK_CONDITION; 1831 stat = SAM_STAT_CHECK_CONDITION;
1915 break; 1832 break;
@@ -1918,10 +1835,8 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1918 case SAS_PROTOCOL_STP: 1835 case SAS_PROTOCOL_STP:
1919 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 1836 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1920 { 1837 {
1921 if (err_dw0 == 0x80400002)
1922 mv_printk("find reserved error, why?\n");
1923
1924 task->ata_task.use_ncq = 0; 1838 task->ata_task.use_ncq = 0;
1839 stat = SAS_PROTO_RESPONSE;
1925 mvs_sata_done(mvi, task, slot_idx, err_dw0); 1840 mvs_sata_done(mvi, task, slot_idx, err_dw0);
1926 } 1841 }
1927 break; 1842 break;
@@ -1945,8 +1860,6 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1945 void *to; 1860 void *to;
1946 enum exec_status sts; 1861 enum exec_status sts;
1947 1862
1948 if (mvi->exp_req)
1949 mvi->exp_req--;
1950 if (unlikely(!task || !task->lldd_task || !task->dev)) 1863 if (unlikely(!task || !task->lldd_task || !task->dev))
1951 return -1; 1864 return -1;
1952 1865
@@ -1954,8 +1867,6 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1954 dev = task->dev; 1867 dev = task->dev;
1955 mvi_dev = dev->lldd_dev; 1868 mvi_dev = dev->lldd_dev;
1956 1869
1957 mvs_hba_cq_dump(mvi);
1958
1959 spin_lock(&task->task_state_lock); 1870 spin_lock(&task->task_state_lock);
1960 task->task_state_flags &= 1871 task->task_state_flags &=
1961 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 1872 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
@@ -1978,6 +1889,7 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1978 return -1; 1889 return -1;
1979 } 1890 }
1980 1891
1892 /* when no device attaching, go ahead and complete by error handling*/
1981 if (unlikely(!mvi_dev || flags)) { 1893 if (unlikely(!mvi_dev || flags)) {
1982 if (!mvi_dev) 1894 if (!mvi_dev)
1983 mv_dprintk("port has not device.\n"); 1895 mv_dprintk("port has not device.\n");
@@ -1987,6 +1899,9 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1987 1899
1988 /* error info record present */ 1900 /* error info record present */
1989 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { 1901 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
1902 mv_dprintk("port %d slot %d rx_desc %X has error info"
1903 "%016llX.\n", slot->port->sas_port.id, slot_idx,
1904 rx_desc, (u64)(*(u64 *)slot->response));
1990 tstat->stat = mvs_slot_err(mvi, task, slot_idx); 1905 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
1991 tstat->resp = SAS_TASK_COMPLETE; 1906 tstat->resp = SAS_TASK_COMPLETE;
1992 goto out; 1907 goto out;
@@ -2048,8 +1963,7 @@ out:
2048 spin_unlock(&mvi->lock); 1963 spin_unlock(&mvi->lock);
2049 if (task->task_done) 1964 if (task->task_done)
2050 task->task_done(task); 1965 task->task_done(task);
2051 else 1966
2052 mv_dprintk("why has not task_done.\n");
2053 spin_lock(&mvi->lock); 1967 spin_lock(&mvi->lock);
2054 1968
2055 return sts; 1969 return sts;
@@ -2092,7 +2006,6 @@ void mvs_release_task(struct mvs_info *mvi,
2092 struct domain_device *dev) 2006 struct domain_device *dev)
2093{ 2007{
2094 int i, phyno[WIDE_PORT_MAX_PHY], num; 2008 int i, phyno[WIDE_PORT_MAX_PHY], num;
2095 /* housekeeper */
2096 num = mvs_find_dev_phyno(dev, phyno); 2009 num = mvs_find_dev_phyno(dev, phyno);
2097 for (i = 0; i < num; i++) 2010 for (i = 0; i < num; i++)
2098 mvs_do_release_task(mvi, phyno[i], dev); 2011 mvs_do_release_task(mvi, phyno[i], dev);
@@ -2111,13 +2024,13 @@ static void mvs_work_queue(struct work_struct *work)
2111 struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q); 2024 struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
2112 struct mvs_info *mvi = mwq->mvi; 2025 struct mvs_info *mvi = mwq->mvi;
2113 unsigned long flags; 2026 unsigned long flags;
2027 u32 phy_no = (unsigned long) mwq->data;
2028 struct sas_ha_struct *sas_ha = mvi->sas;
2029 struct mvs_phy *phy = &mvi->phy[phy_no];
2030 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2114 2031
2115 spin_lock_irqsave(&mvi->lock, flags); 2032 spin_lock_irqsave(&mvi->lock, flags);
2116 if (mwq->handler & PHY_PLUG_EVENT) { 2033 if (mwq->handler & PHY_PLUG_EVENT) {
2117 u32 phy_no = (unsigned long) mwq->data;
2118 struct sas_ha_struct *sas_ha = mvi->sas;
2119 struct mvs_phy *phy = &mvi->phy[phy_no];
2120 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2121 2034
2122 if (phy->phy_event & PHY_PLUG_OUT) { 2035 if (phy->phy_event & PHY_PLUG_OUT) {
2123 u32 tmp; 2036 u32 tmp;
@@ -2139,6 +2052,11 @@ static void mvs_work_queue(struct work_struct *work)
2139 mv_dprintk("phy%d Attached Device\n", phy_no); 2052 mv_dprintk("phy%d Attached Device\n", phy_no);
2140 } 2053 }
2141 } 2054 }
2055 } else if (mwq->handler & EXP_BRCT_CHG) {
2056 phy->phy_event &= ~EXP_BRCT_CHG;
2057 sas_ha->notify_port_event(sas_phy,
2058 PORTE_BROADCAST_RCVD);
2059 mv_dprintk("phy%d Got Broadcast Change\n", phy_no);
2142 } 2060 }
2143 list_del(&mwq->entry); 2061 list_del(&mwq->entry);
2144 spin_unlock_irqrestore(&mvi->lock, flags); 2062 spin_unlock_irqrestore(&mvi->lock, flags);
@@ -2174,29 +2092,21 @@ static void mvs_sig_time_out(unsigned long tphy)
2174 if (&mvi->phy[phy_no] == phy) { 2092 if (&mvi->phy[phy_no] == phy) {
2175 mv_dprintk("Get signature time out, reset phy %d\n", 2093 mv_dprintk("Get signature time out, reset phy %d\n",
2176 phy_no+mvi->id*mvi->chip->n_phy); 2094 phy_no+mvi->id*mvi->chip->n_phy);
2177 MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1); 2095 MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_HARD_RESET);
2178 } 2096 }
2179 } 2097 }
2180} 2098}
2181 2099
2182static void mvs_sig_remove_timer(struct mvs_phy *phy)
2183{
2184 if (phy->timer.function)
2185 del_timer(&phy->timer);
2186 phy->timer.function = NULL;
2187}
2188
2189void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) 2100void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2190{ 2101{
2191 u32 tmp; 2102 u32 tmp;
2192 struct sas_ha_struct *sas_ha = mvi->sas;
2193 struct mvs_phy *phy = &mvi->phy[phy_no]; 2103 struct mvs_phy *phy = &mvi->phy[phy_no];
2194 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2195 2104
2196 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no); 2105 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
2197 mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy, 2106 MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
2107 mv_dprintk("phy %d ctrl sts=0x%08X.\n", phy_no+mvi->id*mvi->chip->n_phy,
2198 MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no)); 2108 MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
2199 mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy, 2109 mv_dprintk("phy %d irq sts = 0x%08X\n", phy_no+mvi->id*mvi->chip->n_phy,
2200 phy->irq_status); 2110 phy->irq_status);
2201 2111
2202 /* 2112 /*
@@ -2205,11 +2115,12 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2205 */ 2115 */
2206 2116
2207 if (phy->irq_status & PHYEV_DCDR_ERR) { 2117 if (phy->irq_status & PHYEV_DCDR_ERR) {
2208 mv_dprintk("port %d STP decoding error.\n", 2118 mv_dprintk("phy %d STP decoding error.\n",
2209 phy_no + mvi->id*mvi->chip->n_phy); 2119 phy_no + mvi->id*mvi->chip->n_phy);
2210 } 2120 }
2211 2121
2212 if (phy->irq_status & PHYEV_POOF) { 2122 if (phy->irq_status & PHYEV_POOF) {
2123 mdelay(500);
2213 if (!(phy->phy_event & PHY_PLUG_OUT)) { 2124 if (!(phy->phy_event & PHY_PLUG_OUT)) {
2214 int dev_sata = phy->phy_type & PORT_TYPE_SATA; 2125 int dev_sata = phy->phy_type & PORT_TYPE_SATA;
2215 int ready; 2126 int ready;
@@ -2220,17 +2131,13 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2220 (void *)(unsigned long)phy_no, 2131 (void *)(unsigned long)phy_no,
2221 PHY_PLUG_EVENT); 2132 PHY_PLUG_EVENT);
2222 ready = mvs_is_phy_ready(mvi, phy_no); 2133 ready = mvs_is_phy_ready(mvi, phy_no);
2223 if (!ready)
2224 mv_dprintk("phy%d Unplug Notice\n",
2225 phy_no +
2226 mvi->id * mvi->chip->n_phy);
2227 if (ready || dev_sata) { 2134 if (ready || dev_sata) {
2228 if (MVS_CHIP_DISP->stp_reset) 2135 if (MVS_CHIP_DISP->stp_reset)
2229 MVS_CHIP_DISP->stp_reset(mvi, 2136 MVS_CHIP_DISP->stp_reset(mvi,
2230 phy_no); 2137 phy_no);
2231 else 2138 else
2232 MVS_CHIP_DISP->phy_reset(mvi, 2139 MVS_CHIP_DISP->phy_reset(mvi,
2233 phy_no, 0); 2140 phy_no, MVS_SOFT_RESET);
2234 return; 2141 return;
2235 } 2142 }
2236 } 2143 }
@@ -2243,13 +2150,12 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2243 if (phy->timer.function == NULL) { 2150 if (phy->timer.function == NULL) {
2244 phy->timer.data = (unsigned long)phy; 2151 phy->timer.data = (unsigned long)phy;
2245 phy->timer.function = mvs_sig_time_out; 2152 phy->timer.function = mvs_sig_time_out;
2246 phy->timer.expires = jiffies + 10*HZ; 2153 phy->timer.expires = jiffies + 5*HZ;
2247 add_timer(&phy->timer); 2154 add_timer(&phy->timer);
2248 } 2155 }
2249 } 2156 }
2250 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { 2157 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
2251 phy->phy_status = mvs_is_phy_ready(mvi, phy_no); 2158 phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
2252 mvs_sig_remove_timer(phy);
2253 mv_dprintk("notify plug in on phy[%d]\n", phy_no); 2159 mv_dprintk("notify plug in on phy[%d]\n", phy_no);
2254 if (phy->phy_status) { 2160 if (phy->phy_status) {
2255 mdelay(10); 2161 mdelay(10);
@@ -2263,14 +2169,14 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2263 } 2169 }
2264 mvs_update_phyinfo(mvi, phy_no, 0); 2170 mvs_update_phyinfo(mvi, phy_no, 0);
2265 if (phy->phy_type & PORT_TYPE_SAS) { 2171 if (phy->phy_type & PORT_TYPE_SAS) {
2266 MVS_CHIP_DISP->phy_reset(mvi, phy_no, 2); 2172 MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_PHY_TUNE);
2267 mdelay(10); 2173 mdelay(10);
2268 } 2174 }
2269 2175
2270 mvs_bytes_dmaed(mvi, phy_no); 2176 mvs_bytes_dmaed(mvi, phy_no);
2271 /* whether driver is going to handle hot plug */ 2177 /* whether driver is going to handle hot plug */
2272 if (phy->phy_event & PHY_PLUG_OUT) { 2178 if (phy->phy_event & PHY_PLUG_OUT) {
2273 mvs_port_notify_formed(sas_phy, 0); 2179 mvs_port_notify_formed(&phy->sas_phy, 0);
2274 phy->phy_event &= ~PHY_PLUG_OUT; 2180 phy->phy_event &= ~PHY_PLUG_OUT;
2275 } 2181 }
2276 } else { 2182 } else {
@@ -2278,13 +2184,11 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2278 phy_no + mvi->id*mvi->chip->n_phy); 2184 phy_no + mvi->id*mvi->chip->n_phy);
2279 } 2185 }
2280 } else if (phy->irq_status & PHYEV_BROAD_CH) { 2186 } else if (phy->irq_status & PHYEV_BROAD_CH) {
2281 mv_dprintk("port %d broadcast change.\n", 2187 mv_dprintk("phy %d broadcast change.\n",
2282 phy_no + mvi->id*mvi->chip->n_phy); 2188 phy_no + mvi->id*mvi->chip->n_phy);
2283 /* exception for Samsung disk drive*/ 2189 mvs_handle_event(mvi, (void *)(unsigned long)phy_no,
2284 mdelay(1000); 2190 EXP_BRCT_CHG);
2285 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
2286 } 2191 }
2287 MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
2288} 2192}
2289 2193
2290int mvs_int_rx(struct mvs_info *mvi, bool self_clear) 2194int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 1367d8b9350d..44d7885a4a1d 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -48,12 +48,8 @@
48 48
49#define DRV_NAME "mvsas" 49#define DRV_NAME "mvsas"
50#define DRV_VERSION "0.8.2" 50#define DRV_VERSION "0.8.2"
51#define _MV_DUMP 0
52#define MVS_ID_NOT_MAPPED 0x7f 51#define MVS_ID_NOT_MAPPED 0x7f
53/* #define DISABLE_HOTPLUG_DMA_FIX */
54// #define MAX_EXP_RUNNING_REQ 2
55#define WIDE_PORT_MAX_PHY 4 52#define WIDE_PORT_MAX_PHY 4
56#define MV_DISABLE_NCQ 0
57#define mv_printk(fmt, arg ...) \ 53#define mv_printk(fmt, arg ...) \
58 printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg) 54 printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg)
59#ifdef MV_DEBUG 55#ifdef MV_DEBUG
@@ -64,6 +60,7 @@
64#endif 60#endif
65#define MV_MAX_U32 0xffffffff 61#define MV_MAX_U32 0xffffffff
66 62
63extern int interrupt_coalescing;
67extern struct mvs_tgt_initiator mvs_tgt; 64extern struct mvs_tgt_initiator mvs_tgt;
68extern struct mvs_info *tgt_mvi; 65extern struct mvs_info *tgt_mvi;
69extern const struct mvs_dispatch mvs_64xx_dispatch; 66extern const struct mvs_dispatch mvs_64xx_dispatch;
@@ -99,6 +96,11 @@ enum dev_status {
99 MVS_DEV_EH = 0x1, 96 MVS_DEV_EH = 0x1,
100}; 97};
101 98
99enum dev_reset {
100 MVS_SOFT_RESET = 0,
101 MVS_HARD_RESET = 1,
102 MVS_PHY_TUNE = 2,
103};
102 104
103struct mvs_info; 105struct mvs_info;
104 106
@@ -130,7 +132,6 @@ struct mvs_dispatch {
130 u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port); 132 u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port);
131 void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val); 133 void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val);
132 134
133 void (*get_sas_addr)(void *buf, u32 buflen);
134 void (*command_active)(struct mvs_info *mvi, u32 slot_idx); 135 void (*command_active)(struct mvs_info *mvi, u32 slot_idx);
135 void (*clear_srs_irq)(struct mvs_info *mvi, u8 reg_set, u8 clear_all); 136 void (*clear_srs_irq)(struct mvs_info *mvi, u8 reg_set, u8 clear_all);
136 void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type, 137 void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type,
@@ -167,9 +168,10 @@ struct mvs_dispatch {
167 ); 168 );
168 int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd); 169 int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd);
169 int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout); 170 int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout);
170#ifndef DISABLE_HOTPLUG_DMA_FIX 171 void (*dma_fix)(struct mvs_info *mvi, u32 phy_mask,
171 void (*dma_fix)(dma_addr_t buf_dma, int buf_len, int from, void *prd); 172 int buf_len, int from, void *prd);
172#endif 173 void (*tune_interrupt)(struct mvs_info *mvi, u32 time);
174 void (*non_spec_ncq_error)(struct mvs_info *mvi);
173 175
174}; 176};
175 177
@@ -179,9 +181,11 @@ struct mvs_chip_info {
179 u32 fis_offs; 181 u32 fis_offs;
180 u32 fis_count; 182 u32 fis_count;
181 u32 srs_sz; 183 u32 srs_sz;
184 u32 sg_width;
182 u32 slot_width; 185 u32 slot_width;
183 const struct mvs_dispatch *dispatch; 186 const struct mvs_dispatch *dispatch;
184}; 187};
188#define MVS_MAX_SG (1U << mvi->chip->sg_width)
185#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width) 189#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
186#define MVS_RX_FISL_SZ \ 190#define MVS_RX_FISL_SZ \
187 (mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100)) 191 (mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100))
@@ -249,6 +253,73 @@ struct mvs_device {
249 u16 reserved; 253 u16 reserved;
250}; 254};
251 255
256/* Generate PHY tunning parameters */
257struct phy_tuning {
258 /* 1 bit, transmitter emphasis enable */
259 u8 trans_emp_en:1;
260 /* 4 bits, transmitter emphasis amplitude */
261 u8 trans_emp_amp:4;
262 /* 3 bits, reserved space */
263 u8 Reserved_2bit_1:3;
264 /* 5 bits, transmitter amplitude */
265 u8 trans_amp:5;
266 /* 2 bits, transmitter amplitude adjust */
267 u8 trans_amp_adj:2;
268 /* 1 bit, reserved space */
269 u8 resv_2bit_2:1;
270 /* 2 bytes, reserved space */
271 u8 reserved[2];
272};
273
274struct ffe_control {
275 /* 4 bits, FFE Capacitor Select (value range 0~F) */
276 u8 ffe_cap_sel:4;
277 /* 3 bits, FFE Resistor Select (value range 0~7) */
278 u8 ffe_rss_sel:3;
279 /* 1 bit reserve*/
280 u8 reserved:1;
281};
282
283/*
284 * HBA_Info_Page is saved in Flash/NVRAM, total 256 bytes.
285 * The data area is valid only Signature="MRVL".
286 * If any member fills with 0xFF, the member is invalid.
287 */
288struct hba_info_page {
289 /* Dword 0 */
290 /* 4 bytes, structure signature,should be "MRVL" at first initial */
291 u8 signature[4];
292
293 /* Dword 1-13 */
294 u32 reserved1[13];
295
296 /* Dword 14-29 */
297 /* 64 bytes, SAS address for each port */
298 u64 sas_addr[8];
299
300 /* Dword 30-31 */
301 /* 8 bytes for vanir 8 port PHY FFE seeting
302 * BIT 0~3 : FFE Capacitor select(value range 0~F)
303 * BIT 4~6 : FFE Resistor select(value range 0~7)
304 * BIT 7: reserve.
305 */
306
307 struct ffe_control ffe_ctl[8];
308 /* Dword 32 -43 */
309 u32 reserved2[12];
310
311 /* Dword 44-45 */
312 /* 8 bytes, 0: 1.5G, 1: 3.0G, should be 0x01 at first initial */
313 u8 phy_rate[8];
314
315 /* Dword 46-53 */
316 /* 32 bytes, PHY tuning parameters for each PHY*/
317 struct phy_tuning phy_tuning[8];
318
319 /* Dword 54-63 */
320 u32 reserved3[10];
321}; /* total 256 bytes */
322
252struct mvs_slot_info { 323struct mvs_slot_info {
253 struct list_head entry; 324 struct list_head entry;
254 union { 325 union {
@@ -264,9 +335,6 @@ struct mvs_slot_info {
264 */ 335 */
265 void *buf; 336 void *buf;
266 dma_addr_t buf_dma; 337 dma_addr_t buf_dma;
267#if _MV_DUMP
268 u32 cmd_size;
269#endif
270 void *response; 338 void *response;
271 struct mvs_port *port; 339 struct mvs_port *port;
272 struct mvs_device *device; 340 struct mvs_device *device;
@@ -320,12 +388,10 @@ struct mvs_info {
320 const struct mvs_chip_info *chip; 388 const struct mvs_chip_info *chip;
321 389
322 int tags_num; 390 int tags_num;
323 DECLARE_BITMAP(tags, MVS_SLOTS); 391 unsigned long *tags;
324 /* further per-slot information */ 392 /* further per-slot information */
325 struct mvs_phy phy[MVS_MAX_PHYS]; 393 struct mvs_phy phy[MVS_MAX_PHYS];
326 struct mvs_port port[MVS_MAX_PHYS]; 394 struct mvs_port port[MVS_MAX_PHYS];
327 u32 irq;
328 u32 exp_req;
329 u32 id; 395 u32 id;
330 u64 sata_reg_set; 396 u64 sata_reg_set;
331 struct list_head *hba_list; 397 struct list_head *hba_list;
@@ -337,12 +403,13 @@ struct mvs_info {
337 u32 flashsectSize; 403 u32 flashsectSize;
338 404
339 void *addon; 405 void *addon;
406 struct hba_info_page hba_info_param;
340 struct mvs_device devices[MVS_MAX_DEVICES]; 407 struct mvs_device devices[MVS_MAX_DEVICES];
341#ifndef DISABLE_HOTPLUG_DMA_FIX
342 void *bulk_buffer; 408 void *bulk_buffer;
343 dma_addr_t bulk_buffer_dma; 409 dma_addr_t bulk_buffer_dma;
410 void *bulk_buffer1;
411 dma_addr_t bulk_buffer_dma1;
344#define TRASH_BUCKET_SIZE 0x20000 412#define TRASH_BUCKET_SIZE 0x20000
345#endif
346 void *dma_pool; 413 void *dma_pool;
347 struct mvs_slot_info slot_info[0]; 414 struct mvs_slot_info slot_info[0];
348}; 415};
@@ -350,8 +417,10 @@ struct mvs_info {
350struct mvs_prv_info{ 417struct mvs_prv_info{
351 u8 n_host; 418 u8 n_host;
352 u8 n_phy; 419 u8 n_phy;
353 u16 reserve; 420 u8 scan_finished;
421 u8 reserve;
354 struct mvs_info *mvi[2]; 422 struct mvs_info *mvi[2];
423 struct tasklet_struct mv_tasklet;
355}; 424};
356 425
357struct mvs_wq { 426struct mvs_wq {
@@ -415,6 +484,6 @@ void mvs_do_release_task(struct mvs_info *mvi, int phy_no,
415void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events); 484void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events);
416void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st); 485void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
417int mvs_int_rx(struct mvs_info *mvi, bool self_clear); 486int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
418void mvs_hexdump(u32 size, u8 *data, u32 baseaddr); 487struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi, u8 reg_set);
419#endif 488#endif
420 489
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index aa05e661d113..b97c8ab0c20e 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -54,7 +54,7 @@
54#include <scsi/libsas.h> 54#include <scsi/libsas.h>
55#include <scsi/scsi_tcq.h> 55#include <scsi/scsi_tcq.h>
56#include <scsi/sas_ata.h> 56#include <scsi/sas_ata.h>
57#include <asm/atomic.h> 57#include <linux/atomic.h>
58#include "pm8001_defs.h" 58#include "pm8001_defs.h"
59 59
60#define DRV_NAME "pm8001" 60#define DRV_NAME "pm8001"
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index fca6a8953070..d079f9a3c6b3 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3871,6 +3871,9 @@ static long pmcraid_ioctl_passthrough(
3871 pmcraid_err("couldn't build passthrough ioadls\n"); 3871 pmcraid_err("couldn't build passthrough ioadls\n");
3872 goto out_free_buffer; 3872 goto out_free_buffer;
3873 } 3873 }
3874 } else if (request_size < 0) {
3875 rc = -EINVAL;
3876 goto out_free_buffer;
3874 } 3877 }
3875 3878
3876 /* If data is being written into the device, copy the data from user 3879 /* If data is being written into the device, copy the data from user
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 532313e0725e..7836eb01c7fc 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -42,8 +42,8 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
42 int reading; 42 int reading;
43 43
44 if (IS_QLA82XX(ha)) { 44 if (IS_QLA82XX(ha)) {
45 DEBUG2(qla_printk(KERN_INFO, ha, 45 ql_dbg(ql_dbg_user, vha, 0x705b,
46 "Firmware dump not supported for ISP82xx\n")); 46 "Firmware dump not supported for ISP82xx\n");
47 return count; 47 return count;
48 } 48 }
49 49
@@ -56,7 +56,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
56 if (!ha->fw_dump_reading) 56 if (!ha->fw_dump_reading)
57 break; 57 break;
58 58
59 qla_printk(KERN_INFO, ha, 59 ql_log(ql_log_info, vha, 0x705d,
60 "Firmware dump cleared on (%ld).\n", vha->host_no); 60 "Firmware dump cleared on (%ld).\n", vha->host_no);
61 61
62 ha->fw_dump_reading = 0; 62 ha->fw_dump_reading = 0;
@@ -66,7 +66,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
66 if (ha->fw_dumped && !ha->fw_dump_reading) { 66 if (ha->fw_dumped && !ha->fw_dump_reading) {
67 ha->fw_dump_reading = 1; 67 ha->fw_dump_reading = 1;
68 68
69 qla_printk(KERN_INFO, ha, 69 ql_log(ql_log_info, vha, 0x705e,
70 "Raw firmware dump ready for read on (%ld).\n", 70 "Raw firmware dump ready for read on (%ld).\n",
71 vha->host_no); 71 vha->host_no);
72 } 72 }
@@ -148,7 +148,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
148 } 148 }
149 149
150 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 150 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
151 qla_printk(KERN_WARNING, ha, 151 ql_log(ql_log_warn, vha, 0x705f,
152 "HBA not online, failing NVRAM update.\n"); 152 "HBA not online, failing NVRAM update.\n");
153 return -EAGAIN; 153 return -EAGAIN;
154 } 154 }
@@ -158,6 +158,8 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
158 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base, 158 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
159 count); 159 count);
160 160
161 ql_dbg(ql_dbg_user, vha, 0x7060,
162 "Setting ISP_ABORT_NEEDED\n");
161 /* NVRAM settings take effect immediately. */ 163 /* NVRAM settings take effect immediately. */
162 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 164 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
163 qla2xxx_wake_dpc(vha); 165 qla2xxx_wake_dpc(vha);
@@ -255,9 +257,9 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
255 257
256 ha->optrom_state = QLA_SWAITING; 258 ha->optrom_state = QLA_SWAITING;
257 259
258 DEBUG2(qla_printk(KERN_INFO, ha, 260 ql_dbg(ql_dbg_user, vha, 0x7061,
259 "Freeing flash region allocation -- 0x%x bytes.\n", 261 "Freeing flash region allocation -- 0x%x bytes.\n",
260 ha->optrom_region_size)); 262 ha->optrom_region_size);
261 263
262 vfree(ha->optrom_buffer); 264 vfree(ha->optrom_buffer);
263 ha->optrom_buffer = NULL; 265 ha->optrom_buffer = NULL;
@@ -273,7 +275,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
273 ha->optrom_state = QLA_SREADING; 275 ha->optrom_state = QLA_SREADING;
274 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 276 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
275 if (ha->optrom_buffer == NULL) { 277 if (ha->optrom_buffer == NULL) {
276 qla_printk(KERN_WARNING, ha, 278 ql_log(ql_log_warn, vha, 0x7062,
277 "Unable to allocate memory for optrom retrieval " 279 "Unable to allocate memory for optrom retrieval "
278 "(%x).\n", ha->optrom_region_size); 280 "(%x).\n", ha->optrom_region_size);
279 281
@@ -282,14 +284,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
282 } 284 }
283 285
284 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 286 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
285 qla_printk(KERN_WARNING, ha, 287 ql_log(ql_log_warn, vha, 0x7063,
286 "HBA not online, failing NVRAM update.\n"); 288 "HBA not online, failing NVRAM update.\n");
287 return -EAGAIN; 289 return -EAGAIN;
288 } 290 }
289 291
290 DEBUG2(qla_printk(KERN_INFO, ha, 292 ql_dbg(ql_dbg_user, vha, 0x7064,
291 "Reading flash region -- 0x%x/0x%x.\n", 293 "Reading flash region -- 0x%x/0x%x.\n",
292 ha->optrom_region_start, ha->optrom_region_size)); 294 ha->optrom_region_start, ha->optrom_region_size);
293 295
294 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 296 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
295 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 297 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
@@ -328,7 +330,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
328 else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha)) 330 else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
329 valid = 1; 331 valid = 1;
330 if (!valid) { 332 if (!valid) {
331 qla_printk(KERN_WARNING, ha, 333 ql_log(ql_log_warn, vha, 0x7065,
332 "Invalid start region 0x%x/0x%x.\n", start, size); 334 "Invalid start region 0x%x/0x%x.\n", start, size);
333 return -EINVAL; 335 return -EINVAL;
334 } 336 }
@@ -340,17 +342,17 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
340 ha->optrom_state = QLA_SWRITING; 342 ha->optrom_state = QLA_SWRITING;
341 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 343 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
342 if (ha->optrom_buffer == NULL) { 344 if (ha->optrom_buffer == NULL) {
343 qla_printk(KERN_WARNING, ha, 345 ql_log(ql_log_warn, vha, 0x7066,
344 "Unable to allocate memory for optrom update " 346 "Unable to allocate memory for optrom update "
345 "(%x).\n", ha->optrom_region_size); 347 "(%x)\n", ha->optrom_region_size);
346 348
347 ha->optrom_state = QLA_SWAITING; 349 ha->optrom_state = QLA_SWAITING;
348 return count; 350 return count;
349 } 351 }
350 352
351 DEBUG2(qla_printk(KERN_INFO, ha, 353 ql_dbg(ql_dbg_user, vha, 0x7067,
352 "Staging flash region write -- 0x%x/0x%x.\n", 354 "Staging flash region write -- 0x%x/0x%x.\n",
353 ha->optrom_region_start, ha->optrom_region_size)); 355 ha->optrom_region_start, ha->optrom_region_size);
354 356
355 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 357 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
356 break; 358 break;
@@ -359,14 +361,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
359 break; 361 break;
360 362
361 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 363 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
362 qla_printk(KERN_WARNING, ha, 364 ql_log(ql_log_warn, vha, 0x7068,
363 "HBA not online, failing flash update.\n"); 365 "HBA not online, failing flash update.\n");
364 return -EAGAIN; 366 return -EAGAIN;
365 } 367 }
366 368
367 DEBUG2(qla_printk(KERN_INFO, ha, 369 ql_dbg(ql_dbg_user, vha, 0x7069,
368 "Writing flash region -- 0x%x/0x%x.\n", 370 "Writing flash region -- 0x%x/0x%x.\n",
369 ha->optrom_region_start, ha->optrom_region_size)); 371 ha->optrom_region_start, ha->optrom_region_size);
370 372
371 ha->isp_ops->write_optrom(vha, ha->optrom_buffer, 373 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
372 ha->optrom_region_start, ha->optrom_region_size); 374 ha->optrom_region_start, ha->optrom_region_size);
@@ -425,7 +427,7 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
425 return 0; 427 return 0;
426 428
427 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 429 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
428 qla_printk(KERN_WARNING, ha, 430 ql_log(ql_log_warn, vha, 0x706a,
429 "HBA not online, failing VPD update.\n"); 431 "HBA not online, failing VPD update.\n");
430 return -EAGAIN; 432 return -EAGAIN;
431 } 433 }
@@ -440,7 +442,7 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
440 442
441 tmp_data = vmalloc(256); 443 tmp_data = vmalloc(256);
442 if (!tmp_data) { 444 if (!tmp_data) {
443 qla_printk(KERN_WARNING, ha, 445 ql_log(ql_log_warn, vha, 0x706b,
444 "Unable to allocate memory for VPD information update.\n"); 446 "Unable to allocate memory for VPD information update.\n");
445 goto done; 447 goto done;
446 } 448 }
@@ -480,7 +482,7 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
480 ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 482 ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
481 &ha->sfp_data_dma); 483 &ha->sfp_data_dma);
482 if (!ha->sfp_data) { 484 if (!ha->sfp_data) {
483 qla_printk(KERN_WARNING, ha, 485 ql_log(ql_log_warn, vha, 0x706c,
484 "Unable to allocate memory for SFP read-data.\n"); 486 "Unable to allocate memory for SFP read-data.\n");
485 return 0; 487 return 0;
486 } 488 }
@@ -499,9 +501,10 @@ do_read:
499 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data, 501 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
500 addr, offset, SFP_BLOCK_SIZE, 0); 502 addr, offset, SFP_BLOCK_SIZE, 0);
501 if (rval != QLA_SUCCESS) { 503 if (rval != QLA_SUCCESS) {
502 qla_printk(KERN_WARNING, ha, 504 ql_log(ql_log_warn, vha, 0x706d,
503 "Unable to read SFP data (%x/%x/%x).\n", rval, 505 "Unable to read SFP data (%x/%x/%x).\n", rval,
504 addr, offset); 506 addr, offset);
507
505 count = 0; 508 count = 0;
506 break; 509 break;
507 } 510 }
@@ -538,8 +541,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
538 type = simple_strtol(buf, NULL, 10); 541 type = simple_strtol(buf, NULL, 10);
539 switch (type) { 542 switch (type) {
540 case 0x2025c: 543 case 0x2025c:
541 qla_printk(KERN_INFO, ha, 544 ql_log(ql_log_info, vha, 0x706e,
542 "Issuing ISP reset on (%ld).\n", vha->host_no); 545 "Issuing ISP reset.\n");
543 546
544 scsi_block_requests(vha->host); 547 scsi_block_requests(vha->host);
545 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 548 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -551,8 +554,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
551 if (!IS_QLA81XX(ha)) 554 if (!IS_QLA81XX(ha))
552 break; 555 break;
553 556
554 qla_printk(KERN_INFO, ha, 557 ql_log(ql_log_info, vha, 0x706f,
555 "Issuing MPI reset on (%ld).\n", vha->host_no); 558 "Issuing MPI reset.\n");
556 559
557 /* Make sure FC side is not in reset */ 560 /* Make sure FC side is not in reset */
558 qla2x00_wait_for_hba_online(vha); 561 qla2x00_wait_for_hba_online(vha);
@@ -560,20 +563,19 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
560 /* Issue MPI reset */ 563 /* Issue MPI reset */
561 scsi_block_requests(vha->host); 564 scsi_block_requests(vha->host);
562 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS) 565 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
563 qla_printk(KERN_WARNING, ha, 566 ql_log(ql_log_warn, vha, 0x7070,
564 "MPI reset failed on (%ld).\n", vha->host_no); 567 "MPI reset failed.\n");
565 scsi_unblock_requests(vha->host); 568 scsi_unblock_requests(vha->host);
566 break; 569 break;
567 case 0x2025e: 570 case 0x2025e:
568 if (!IS_QLA82XX(ha) || vha != base_vha) { 571 if (!IS_QLA82XX(ha) || vha != base_vha) {
569 qla_printk(KERN_INFO, ha, 572 ql_log(ql_log_info, vha, 0x7071,
570 "FCoE ctx reset not supported for host%ld.\n", 573 "FCoE ctx reset no supported.\n");
571 vha->host_no);
572 return count; 574 return count;
573 } 575 }
574 576
575 qla_printk(KERN_INFO, ha, 577 ql_log(ql_log_info, vha, 0x7072,
576 "Issuing FCoE CTX reset on host%ld.\n", vha->host_no); 578 "Issuing FCoE ctx reset.\n");
577 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 579 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
578 qla2xxx_wake_dpc(vha); 580 qla2xxx_wake_dpc(vha);
579 qla2x00_wait_for_fcoe_ctx_reset(vha); 581 qla2x00_wait_for_fcoe_ctx_reset(vha);
@@ -611,8 +613,8 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
611 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 613 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
612 &ha->edc_data_dma); 614 &ha->edc_data_dma);
613 if (!ha->edc_data) { 615 if (!ha->edc_data) {
614 DEBUG2(qla_printk(KERN_INFO, ha, 616 ql_log(ql_log_warn, vha, 0x7073,
615 "Unable to allocate memory for EDC write.\n")); 617 "Unable to allocate memory for EDC write.\n");
616 return 0; 618 return 0;
617 } 619 }
618 } 620 }
@@ -631,9 +633,9 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
631 rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data, 633 rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data,
632 dev, adr, len, opt); 634 dev, adr, len, opt);
633 if (rval != QLA_SUCCESS) { 635 if (rval != QLA_SUCCESS) {
634 DEBUG2(qla_printk(KERN_INFO, ha, 636 ql_log(ql_log_warn, vha, 0x7074,
635 "Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n", 637 "Unable to write EDC (%x) %02x:%04x:%02x:%02x\n",
636 rval, dev, adr, opt, len, buf[8])); 638 rval, dev, adr, opt, len, buf[8]);
637 return 0; 639 return 0;
638 } 640 }
639 641
@@ -669,8 +671,8 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
669 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 671 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
670 &ha->edc_data_dma); 672 &ha->edc_data_dma);
671 if (!ha->edc_data) { 673 if (!ha->edc_data) {
672 DEBUG2(qla_printk(KERN_INFO, ha, 674 ql_log(ql_log_warn, vha, 0x708c,
673 "Unable to allocate memory for EDC status.\n")); 675 "Unable to allocate memory for EDC status.\n");
674 return 0; 676 return 0;
675 } 677 }
676 } 678 }
@@ -688,9 +690,9 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
688 rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data, 690 rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data,
689 dev, adr, len, opt); 691 dev, adr, len, opt);
690 if (rval != QLA_SUCCESS) { 692 if (rval != QLA_SUCCESS) {
691 DEBUG2(qla_printk(KERN_INFO, ha, 693 ql_log(ql_log_info, vha, 0x7075,
692 "Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n", 694 "Unable to write EDC status (%x) %02x:%04x:%02x.\n",
693 rval, dev, adr, opt, len)); 695 rval, dev, adr, opt, len);
694 return 0; 696 return 0;
695 } 697 }
696 698
@@ -749,7 +751,7 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
749 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, 751 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
750 &ha->xgmac_data_dma, GFP_KERNEL); 752 &ha->xgmac_data_dma, GFP_KERNEL);
751 if (!ha->xgmac_data) { 753 if (!ha->xgmac_data) {
752 qla_printk(KERN_WARNING, ha, 754 ql_log(ql_log_warn, vha, 0x7076,
753 "Unable to allocate memory for XGMAC read-data.\n"); 755 "Unable to allocate memory for XGMAC read-data.\n");
754 return 0; 756 return 0;
755 } 757 }
@@ -761,7 +763,7 @@ do_read:
761 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma, 763 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
762 XGMAC_DATA_SIZE, &actual_size); 764 XGMAC_DATA_SIZE, &actual_size);
763 if (rval != QLA_SUCCESS) { 765 if (rval != QLA_SUCCESS) {
764 qla_printk(KERN_WARNING, ha, 766 ql_log(ql_log_warn, vha, 0x7077,
765 "Unable to read XGMAC data (%x).\n", rval); 767 "Unable to read XGMAC data (%x).\n", rval);
766 count = 0; 768 count = 0;
767 } 769 }
@@ -801,7 +803,7 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
801 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, 803 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
802 &ha->dcbx_tlv_dma, GFP_KERNEL); 804 &ha->dcbx_tlv_dma, GFP_KERNEL);
803 if (!ha->dcbx_tlv) { 805 if (!ha->dcbx_tlv) {
804 qla_printk(KERN_WARNING, ha, 806 ql_log(ql_log_warn, vha, 0x7078,
805 "Unable to allocate memory for DCBX TLV read-data.\n"); 807 "Unable to allocate memory for DCBX TLV read-data.\n");
806 return 0; 808 return 0;
807 } 809 }
@@ -813,8 +815,8 @@ do_read:
813 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma, 815 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
814 DCBX_TLV_DATA_SIZE); 816 DCBX_TLV_DATA_SIZE);
815 if (rval != QLA_SUCCESS) { 817 if (rval != QLA_SUCCESS) {
816 qla_printk(KERN_WARNING, ha, 818 ql_log(ql_log_warn, vha, 0x7079,
817 "Unable to read DCBX TLV data (%x).\n", rval); 819 "Unable to read DCBX TLV (%x).\n", rval);
818 count = 0; 820 count = 0;
819 } 821 }
820 822
@@ -869,9 +871,13 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
869 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 871 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
870 iter->attr); 872 iter->attr);
871 if (ret) 873 if (ret)
872 qla_printk(KERN_INFO, vha->hw, 874 ql_log(ql_log_warn, vha, 0x00f3,
873 "Unable to create sysfs %s binary attribute " 875 "Unable to create sysfs %s binary attribute (%d).\n",
874 "(%d).\n", iter->name, ret); 876 iter->name, ret);
877 else
878 ql_dbg(ql_dbg_init, vha, 0x00f4,
879 "Successfully created sysfs %s binary attribure.\n",
880 iter->name);
875 } 881 }
876} 882}
877 883
@@ -1126,7 +1132,7 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1126 return -EPERM; 1132 return -EPERM;
1127 1133
1128 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 1134 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
1129 qla_printk(KERN_WARNING, ha, 1135 ql_log(ql_log_warn, vha, 0x707a,
1130 "Abort ISP active -- ignoring beacon request.\n"); 1136 "Abort ISP active -- ignoring beacon request.\n");
1131 return -EBUSY; 1137 return -EBUSY;
1132 } 1138 }
@@ -1322,9 +1328,8 @@ qla2x00_thermal_temp_show(struct device *dev,
1322 temp = frac = 0; 1328 temp = frac = 0;
1323 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1329 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1324 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 1330 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1325 DEBUG2_3_11(printk(KERN_WARNING 1331 ql_log(ql_log_warn, vha, 0x707b,
1326 "%s(%ld): isp reset in progress.\n", 1332 "ISP reset active.\n");
1327 __func__, vha->host_no));
1328 else if (!vha->hw->flags.eeh_busy) 1333 else if (!vha->hw->flags.eeh_busy)
1329 rval = qla2x00_get_thermal_temp(vha, &temp, &frac); 1334 rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
1330 if (rval != QLA_SUCCESS) 1335 if (rval != QLA_SUCCESS)
@@ -1343,8 +1348,8 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1343 1348
1344 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1349 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1345 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 1350 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1346 DEBUG2_3_11(printk("%s(%ld): isp reset in progress.\n", 1351 ql_log(ql_log_warn, vha, 0x707c,
1347 __func__, vha->host_no)); 1352 "ISP reset active.\n");
1348 else if (!vha->hw->flags.eeh_busy) 1353 else if (!vha->hw->flags.eeh_busy)
1349 rval = qla2x00_get_firmware_state(vha, state); 1354 rval = qla2x00_get_firmware_state(vha, state);
1350 if (rval != QLA_SUCCESS) 1355 if (rval != QLA_SUCCESS)
@@ -1645,8 +1650,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1645 1650
1646 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma); 1651 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
1647 if (stats == NULL) { 1652 if (stats == NULL) {
1648 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n", 1653 ql_log(ql_log_warn, vha, 0x707d,
1649 __func__, base_vha->host_no)); 1654 "Failed to allocate memory for stats.\n");
1650 goto done; 1655 goto done;
1651 } 1656 }
1652 memset(stats, 0, DMA_POOL_SIZE); 1657 memset(stats, 0, DMA_POOL_SIZE);
@@ -1746,15 +1751,14 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1746 1751
1747 ret = qla24xx_vport_create_req_sanity_check(fc_vport); 1752 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1748 if (ret) { 1753 if (ret) {
1749 DEBUG15(printk("qla24xx_vport_create_req_sanity_check failed, " 1754 ql_log(ql_log_warn, vha, 0x707e,
1750 "status %x\n", ret)); 1755 "Vport sanity check failed, status %x\n", ret);
1751 return (ret); 1756 return (ret);
1752 } 1757 }
1753 1758
1754 vha = qla24xx_create_vhost(fc_vport); 1759 vha = qla24xx_create_vhost(fc_vport);
1755 if (vha == NULL) { 1760 if (vha == NULL) {
1756 DEBUG15(printk ("qla24xx_create_vhost failed, vha = %p\n", 1761 ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
1757 vha));
1758 return FC_VPORT_FAILED; 1762 return FC_VPORT_FAILED;
1759 } 1763 }
1760 if (disable) { 1764 if (disable) {
@@ -1764,8 +1768,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1764 atomic_set(&vha->vp_state, VP_FAILED); 1768 atomic_set(&vha->vp_state, VP_FAILED);
1765 1769
1766 /* ready to create vport */ 1770 /* ready to create vport */
1767 qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n", 1771 ql_log(ql_log_info, vha, 0x7080,
1768 vha->vp_idx); 1772 "VP entry id %d assigned.\n", vha->vp_idx);
1769 1773
1770 /* initialized vport states */ 1774 /* initialized vport states */
1771 atomic_set(&vha->loop_state, LOOP_DOWN); 1775 atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -1775,8 +1779,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1775 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN || 1779 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
1776 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 1780 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
1777 /* Don't retry or attempt login of this virtual port */ 1781 /* Don't retry or attempt login of this virtual port */
1778 DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n", 1782 ql_dbg(ql_dbg_user, vha, 0x7081,
1779 base_vha->host_no)); 1783 "Vport loop state is not UP.\n");
1780 atomic_set(&vha->loop_state, LOOP_DEAD); 1784 atomic_set(&vha->loop_state, LOOP_DEAD);
1781 if (!disable) 1785 if (!disable)
1782 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); 1786 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
@@ -1785,9 +1789,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1785 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { 1789 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
1786 if (ha->fw_attributes & BIT_4) { 1790 if (ha->fw_attributes & BIT_4) {
1787 vha->flags.difdix_supported = 1; 1791 vha->flags.difdix_supported = 1;
1788 DEBUG18(qla_printk(KERN_INFO, ha, 1792 ql_dbg(ql_dbg_user, vha, 0x7082,
1789 "Registering for DIF/DIX type 1 and 3" 1793 "Registered for DIF/DIX type 1 and 3 protection.\n");
1790 " protection.\n"));
1791 scsi_host_set_prot(vha->host, 1794 scsi_host_set_prot(vha->host,
1792 SHOST_DIF_TYPE1_PROTECTION 1795 SHOST_DIF_TYPE1_PROTECTION
1793 | SHOST_DIF_TYPE2_PROTECTION 1796 | SHOST_DIF_TYPE2_PROTECTION
@@ -1802,8 +1805,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1802 1805
1803 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev, 1806 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
1804 &ha->pdev->dev)) { 1807 &ha->pdev->dev)) {
1805 DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n", 1808 ql_dbg(ql_dbg_user, vha, 0x7083,
1806 vha->host_no, vha->vp_idx)); 1809 "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
1807 goto vport_create_failed_2; 1810 goto vport_create_failed_2;
1808 } 1811 }
1809 1812
@@ -1820,6 +1823,10 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1820 1823
1821 if (ha->flags.cpu_affinity_enabled) { 1824 if (ha->flags.cpu_affinity_enabled) {
1822 req = ha->req_q_map[1]; 1825 req = ha->req_q_map[1];
1826 ql_dbg(ql_dbg_multiq, vha, 0xc000,
1827 "Request queue %p attached with "
1828 "VP[%d], cpu affinity =%d\n",
1829 req, vha->vp_idx, ha->flags.cpu_affinity_enabled);
1823 goto vport_queue; 1830 goto vport_queue;
1824 } else if (ql2xmaxqueues == 1 || !ha->npiv_info) 1831 } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
1825 goto vport_queue; 1832 goto vport_queue;
@@ -1836,13 +1843,16 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1836 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0, 1843 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
1837 qos); 1844 qos);
1838 if (!ret) 1845 if (!ret)
1839 qla_printk(KERN_WARNING, ha, 1846 ql_log(ql_log_warn, vha, 0x7084,
1840 "Can't create request queue for vp_idx:%d\n", 1847 "Can't create request queue for VP[%d]\n",
1841 vha->vp_idx); 1848 vha->vp_idx);
1842 else { 1849 else {
1843 DEBUG2(qla_printk(KERN_INFO, ha, 1850 ql_dbg(ql_dbg_multiq, vha, 0xc001,
1844 "Request Que:%d (QoS: %d) created for vp_idx:%d\n", 1851 "Request Que:%d Q0s: %d) created for VP[%d]\n",
1845 ret, qos, vha->vp_idx)); 1852 ret, qos, vha->vp_idx);
1853 ql_dbg(ql_dbg_user, vha, 0x7085,
1854 "Request Que:%d Q0s: %d) created for VP[%d]\n",
1855 ret, qos, vha->vp_idx);
1846 req = ha->req_q_map[ret]; 1856 req = ha->req_q_map[ret];
1847 } 1857 }
1848 } 1858 }
@@ -1882,12 +1892,13 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1882 1892
1883 if (vha->timer_active) { 1893 if (vha->timer_active) {
1884 qla2x00_vp_stop_timer(vha); 1894 qla2x00_vp_stop_timer(vha);
1885 DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]" 1895 ql_dbg(ql_dbg_user, vha, 0x7086,
1886 " = %p has stopped\n", vha->host_no, vha->vp_idx, vha)); 1896 "Timer for the VP[%d] has stopped\n", vha->vp_idx);
1887 } 1897 }
1888 1898
1889 /* No pending activities shall be there on the vha now */ 1899 /* No pending activities shall be there on the vha now */
1890 DEBUG(msleep(random32()%10)); /* Just to see if something falls on 1900 if (ql2xextended_error_logging & ql_dbg_user)
1901 msleep(random32()%10); /* Just to see if something falls on
1891 * the net we have placed below */ 1902 * the net we have placed below */
1892 1903
1893 BUG_ON(atomic_read(&vha->vref_count)); 1904 BUG_ON(atomic_read(&vha->vref_count));
@@ -1901,12 +1912,12 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1901 1912
1902 if (vha->req->id && !ha->flags.cpu_affinity_enabled) { 1913 if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
1903 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) 1914 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
1904 qla_printk(KERN_WARNING, ha, 1915 ql_log(ql_log_warn, vha, 0x7087,
1905 "Queue delete failed.\n"); 1916 "Queue delete failed.\n");
1906 } 1917 }
1907 1918
1908 scsi_host_put(vha->host); 1919 scsi_host_put(vha->host);
1909 qla_printk(KERN_INFO, ha, "vport %d deleted\n", id); 1920 ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
1910 return 0; 1921 return 0;
1911} 1922}
1912 1923
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 8c10e2c4928e..07d1767cd26b 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -36,7 +36,8 @@ done:
36} 36}
37 37
38int 38int
39qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag) 39qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
40 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
40{ 41{
41 int i, ret, num_valid; 42 int i, ret, num_valid;
42 uint8_t *bcode; 43 uint8_t *bcode;
@@ -51,18 +52,17 @@ qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
51 52
52 if (bcode_val == 0xFFFFFFFF) { 53 if (bcode_val == 0xFFFFFFFF) {
53 /* No FCP Priority config data in flash */ 54 /* No FCP Priority config data in flash */
54 DEBUG2(printk(KERN_INFO 55 ql_dbg(ql_dbg_user, vha, 0x7051,
55 "%s: No FCP priority config data.\n", 56 "No FCP Priority config data.\n");
56 __func__));
57 return 0; 57 return 0;
58 } 58 }
59 59
60 if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' || 60 if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
61 bcode[3] != 'S') { 61 bcode[3] != 'S') {
62 /* Invalid FCP priority data header*/ 62 /* Invalid FCP priority data header*/
63 DEBUG2(printk(KERN_ERR 63 ql_dbg(ql_dbg_user, vha, 0x7052,
64 "%s: Invalid FCP Priority data header. bcode=0x%x\n", 64 "Invalid FCP Priority data header. bcode=0x%x.\n",
65 __func__, bcode_val)); 65 bcode_val);
66 return 0; 66 return 0;
67 } 67 }
68 if (flag != 1) 68 if (flag != 1)
@@ -77,15 +77,14 @@ qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
77 77
78 if (num_valid == 0) { 78 if (num_valid == 0) {
79 /* No valid FCP priority data entries */ 79 /* No valid FCP priority data entries */
80 DEBUG2(printk(KERN_ERR 80 ql_dbg(ql_dbg_user, vha, 0x7053,
81 "%s: No valid FCP Priority data entries.\n", 81 "No valid FCP Priority data entries.\n");
82 __func__));
83 ret = 0; 82 ret = 0;
84 } else { 83 } else {
85 /* FCP priority data is valid */ 84 /* FCP priority data is valid */
86 DEBUG2(printk(KERN_INFO 85 ql_dbg(ql_dbg_user, vha, 0x7054,
87 "%s: Valid FCP priority data. num entries = %d\n", 86 "Valid FCP priority data. num entries = %d.\n",
88 __func__, num_valid)); 87 num_valid);
89 } 88 }
90 89
91 return ret; 90 return ret;
@@ -182,10 +181,9 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
182 if (!ha->fcp_prio_cfg) { 181 if (!ha->fcp_prio_cfg) {
183 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); 182 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
184 if (!ha->fcp_prio_cfg) { 183 if (!ha->fcp_prio_cfg) {
185 qla_printk(KERN_WARNING, ha, 184 ql_log(ql_log_warn, vha, 0x7050,
186 "Unable to allocate memory " 185 "Unable to allocate memory for fcp prio "
187 "for fcp prio config data (%x).\n", 186 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
188 FCP_PRIO_CFG_SIZE);
189 bsg_job->reply->result = (DID_ERROR << 16); 187 bsg_job->reply->result = (DID_ERROR << 16);
190 ret = -ENOMEM; 188 ret = -ENOMEM;
191 goto exit_fcp_prio_cfg; 189 goto exit_fcp_prio_cfg;
@@ -198,9 +196,9 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
198 FCP_PRIO_CFG_SIZE); 196 FCP_PRIO_CFG_SIZE);
199 197
200 /* validate fcp priority data */ 198 /* validate fcp priority data */
201 if (!qla24xx_fcp_prio_cfg_valid( 199
202 (struct qla_fcp_prio_cfg *) 200 if (!qla24xx_fcp_prio_cfg_valid(vha,
203 ha->fcp_prio_cfg, 1)) { 201 (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
204 bsg_job->reply->result = (DID_ERROR << 16); 202 bsg_job->reply->result = (DID_ERROR << 16);
205 ret = -EINVAL; 203 ret = -EINVAL;
206 /* If buffer was invalidatic int 204 /* If buffer was invalidatic int
@@ -256,9 +254,8 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
256 254
257 /* pass through is supported only for ISP 4Gb or higher */ 255 /* pass through is supported only for ISP 4Gb or higher */
258 if (!IS_FWI2_CAPABLE(ha)) { 256 if (!IS_FWI2_CAPABLE(ha)) {
259 DEBUG2(qla_printk(KERN_INFO, ha, 257 ql_dbg(ql_dbg_user, vha, 0x7001,
260 "scsi(%ld):ELS passthru not supported for ISP23xx based " 258 "ELS passthru not supported for ISP23xx based adapters.\n");
261 "adapters\n", vha->host_no));
262 rval = -EPERM; 259 rval = -EPERM;
263 goto done; 260 goto done;
264 } 261 }
@@ -266,11 +263,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
266 /* Multiple SG's are not supported for ELS requests */ 263 /* Multiple SG's are not supported for ELS requests */
267 if (bsg_job->request_payload.sg_cnt > 1 || 264 if (bsg_job->request_payload.sg_cnt > 1 ||
268 bsg_job->reply_payload.sg_cnt > 1) { 265 bsg_job->reply_payload.sg_cnt > 1) {
269 DEBUG2(printk(KERN_INFO 266 ql_dbg(ql_dbg_user, vha, 0x7002,
270 "multiple SG's are not supported for ELS requests" 267 "Multiple SG's are not suppored for ELS requests, "
271 " [request_sg_cnt: %x reply_sg_cnt: %x]\n", 268 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
272 bsg_job->request_payload.sg_cnt, 269 bsg_job->request_payload.sg_cnt,
273 bsg_job->reply_payload.sg_cnt)); 270 bsg_job->reply_payload.sg_cnt);
274 rval = -EPERM; 271 rval = -EPERM;
275 goto done; 272 goto done;
276 } 273 }
@@ -281,9 +278,9 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
281 * if not perform fabric login 278 * if not perform fabric login
282 */ 279 */
283 if (qla2x00_fabric_login(vha, fcport, &nextlid)) { 280 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
284 DEBUG2(qla_printk(KERN_WARNING, ha, 281 ql_dbg(ql_dbg_user, vha, 0x7003,
285 "failed to login port %06X for ELS passthru\n", 282 "Failed to login port %06X for ELS passthru.\n",
286 fcport->d_id.b24)); 283 fcport->d_id.b24);
287 rval = -EIO; 284 rval = -EIO;
288 goto done; 285 goto done;
289 } 286 }
@@ -314,8 +311,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
314 } 311 }
315 312
316 if (!vha->flags.online) { 313 if (!vha->flags.online) {
317 DEBUG2(qla_printk(KERN_WARNING, ha, 314 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
318 "host not online\n"));
319 rval = -EIO; 315 rval = -EIO;
320 goto done; 316 goto done;
321 } 317 }
@@ -337,12 +333,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
337 333
338 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 334 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
339 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 335 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
340 DEBUG2(printk(KERN_INFO 336 ql_log(ql_log_warn, vha, 0x7008,
341 "dma mapping resulted in different sg counts \ 337 "dma mapping resulted in different sg counts, "
342 [request_sg_cnt: %x dma_request_sg_cnt: %x\ 338 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
343 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n", 339 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
344 bsg_job->request_payload.sg_cnt, req_sg_cnt, 340 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
345 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
346 rval = -EAGAIN; 341 rval = -EAGAIN;
347 goto done_unmap_sg; 342 goto done_unmap_sg;
348 } 343 }
@@ -363,15 +358,16 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
363 "bsg_els_rpt" : "bsg_els_hst"); 358 "bsg_els_rpt" : "bsg_els_hst");
364 els->u.bsg_job = bsg_job; 359 els->u.bsg_job = bsg_job;
365 360
366 DEBUG2(qla_printk(KERN_INFO, ha, 361 ql_dbg(ql_dbg_user, vha, 0x700a,
367 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x " 362 "bsg rqst type: %s els type: %x - loop-id=%x "
368 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type, 363 "portid=%-2x%02x%02x.\n", type,
369 bsg_job->request->rqst_data.h_els.command_code, 364 bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
370 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 365 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
371 fcport->d_id.b.al_pa));
372 366
373 rval = qla2x00_start_sp(sp); 367 rval = qla2x00_start_sp(sp);
374 if (rval != QLA_SUCCESS) { 368 if (rval != QLA_SUCCESS) {
369 ql_log(ql_log_warn, vha, 0x700e,
370 "qla2x00_start_sp failed = %d\n", rval);
375 kfree(sp->ctx); 371 kfree(sp->ctx);
376 mempool_free(sp, ha->srb_mempool); 372 mempool_free(sp, ha->srb_mempool);
377 rval = -EIO; 373 rval = -EIO;
@@ -411,6 +407,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
411 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 407 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
412 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 408 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
413 if (!req_sg_cnt) { 409 if (!req_sg_cnt) {
410 ql_log(ql_log_warn, vha, 0x700f,
411 "dma_map_sg return %d for request\n", req_sg_cnt);
414 rval = -ENOMEM; 412 rval = -ENOMEM;
415 goto done; 413 goto done;
416 } 414 }
@@ -418,24 +416,25 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
418 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 416 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
419 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 417 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
420 if (!rsp_sg_cnt) { 418 if (!rsp_sg_cnt) {
419 ql_log(ql_log_warn, vha, 0x7010,
420 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
421 rval = -ENOMEM; 421 rval = -ENOMEM;
422 goto done; 422 goto done;
423 } 423 }
424 424
425 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 425 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
426 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 426 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
427 DEBUG2(qla_printk(KERN_WARNING, ha, 427 ql_log(ql_log_warn, vha, 0x7011,
428 "[request_sg_cnt: %x dma_request_sg_cnt: %x\ 428 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
429 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n", 429 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
430 bsg_job->request_payload.sg_cnt, req_sg_cnt, 430 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
431 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
432 rval = -EAGAIN; 431 rval = -EAGAIN;
433 goto done_unmap_sg; 432 goto done_unmap_sg;
434 } 433 }
435 434
436 if (!vha->flags.online) { 435 if (!vha->flags.online) {
437 DEBUG2(qla_printk(KERN_WARNING, ha, 436 ql_log(ql_log_warn, vha, 0x7012,
438 "host not online\n")); 437 "Host is not online.\n");
439 rval = -EIO; 438 rval = -EIO;
440 goto done_unmap_sg; 439 goto done_unmap_sg;
441 } 440 }
@@ -451,8 +450,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
451 loop_id = vha->mgmt_svr_loop_id; 450 loop_id = vha->mgmt_svr_loop_id;
452 break; 451 break;
453 default: 452 default:
454 DEBUG2(qla_printk(KERN_INFO, ha, 453 ql_dbg(ql_dbg_user, vha, 0x7013,
455 "Unknown loop id: %x\n", loop_id)); 454 "Unknown loop id: %x.\n", loop_id);
456 rval = -EINVAL; 455 rval = -EINVAL;
457 goto done_unmap_sg; 456 goto done_unmap_sg;
458 } 457 }
@@ -464,6 +463,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
464 */ 463 */
465 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 464 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
466 if (!fcport) { 465 if (!fcport) {
466 ql_log(ql_log_warn, vha, 0x7014,
467 "Failed to allocate fcport.\n");
467 rval = -ENOMEM; 468 rval = -ENOMEM;
468 goto done_unmap_sg; 469 goto done_unmap_sg;
469 } 470 }
@@ -479,6 +480,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
479 /* Alloc SRB structure */ 480 /* Alloc SRB structure */
480 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx)); 481 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
481 if (!sp) { 482 if (!sp) {
483 ql_log(ql_log_warn, vha, 0x7015,
484 "qla2x00_get_ctx_bsg_sp failed.\n");
482 rval = -ENOMEM; 485 rval = -ENOMEM;
483 goto done_free_fcport; 486 goto done_free_fcport;
484 } 487 }
@@ -488,15 +491,17 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
488 ct->name = "bsg_ct"; 491 ct->name = "bsg_ct";
489 ct->u.bsg_job = bsg_job; 492 ct->u.bsg_job = bsg_job;
490 493
491 DEBUG2(qla_printk(KERN_INFO, ha, 494 ql_dbg(ql_dbg_user, vha, 0x7016,
492 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x " 495 "bsg rqst type: %s else type: %x - "
493 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type, 496 "loop-id=%x portid=%02x%02x%02x.\n", type,
494 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16), 497 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
495 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 498 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
496 fcport->d_id.b.al_pa)); 499 fcport->d_id.b.al_pa);
497 500
498 rval = qla2x00_start_sp(sp); 501 rval = qla2x00_start_sp(sp);
499 if (rval != QLA_SUCCESS) { 502 if (rval != QLA_SUCCESS) {
503 ql_log(ql_log_warn, vha, 0x7017,
504 "qla2x00_start_sp failed=%d.\n", rval);
500 kfree(sp->ctx); 505 kfree(sp->ctx);
501 mempool_free(sp, ha->srb_mempool); 506 mempool_free(sp, ha->srb_mempool);
502 rval = -EIO; 507 rval = -EIO;
@@ -535,9 +540,8 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
535 ha->notify_dcbx_comp = 1; 540 ha->notify_dcbx_comp = 1;
536 ret = qla81xx_set_port_config(vha, new_config); 541 ret = qla81xx_set_port_config(vha, new_config);
537 if (ret != QLA_SUCCESS) { 542 if (ret != QLA_SUCCESS) {
538 DEBUG2(printk(KERN_ERR 543 ql_log(ql_log_warn, vha, 0x7021,
539 "%s(%lu): Set port config failed\n", 544 "set port config failed.\n");
540 __func__, vha->host_no));
541 ha->notify_dcbx_comp = 0; 545 ha->notify_dcbx_comp = 0;
542 rval = -EINVAL; 546 rval = -EINVAL;
543 goto done_set_internal; 547 goto done_set_internal;
@@ -545,11 +549,11 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
545 549
546 /* Wait for DCBX complete event */ 550 /* Wait for DCBX complete event */
547 if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) { 551 if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
548 DEBUG2(qla_printk(KERN_WARNING, ha, 552 ql_dbg(ql_dbg_user, vha, 0x7022,
549 "State change notificaition not received.\n")); 553 "State change notification not received.\n");
550 } else 554 } else
551 DEBUG2(qla_printk(KERN_INFO, ha, 555 ql_dbg(ql_dbg_user, vha, 0x7023,
552 "State change RECEIVED\n")); 556 "State change received.\n");
553 557
554 ha->notify_dcbx_comp = 0; 558 ha->notify_dcbx_comp = 0;
555 559
@@ -581,9 +585,8 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
581 ha->notify_dcbx_comp = wait; 585 ha->notify_dcbx_comp = wait;
582 ret = qla81xx_set_port_config(vha, new_config); 586 ret = qla81xx_set_port_config(vha, new_config);
583 if (ret != QLA_SUCCESS) { 587 if (ret != QLA_SUCCESS) {
584 DEBUG2(printk(KERN_ERR 588 ql_log(ql_log_warn, vha, 0x7025,
585 "%s(%lu): Set port config failed\n", 589 "Set port config failed.\n");
586 __func__, vha->host_no));
587 ha->notify_dcbx_comp = 0; 590 ha->notify_dcbx_comp = 0;
588 rval = -EINVAL; 591 rval = -EINVAL;
589 goto done_reset_internal; 592 goto done_reset_internal;
@@ -592,14 +595,14 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
592 /* Wait for DCBX complete event */ 595 /* Wait for DCBX complete event */
593 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp, 596 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
594 (20 * HZ))) { 597 (20 * HZ))) {
595 DEBUG2(qla_printk(KERN_WARNING, ha, 598 ql_dbg(ql_dbg_user, vha, 0x7026,
596 "State change notificaition not received.\n")); 599 "State change notification not received.\n");
597 ha->notify_dcbx_comp = 0; 600 ha->notify_dcbx_comp = 0;
598 rval = -EINVAL; 601 rval = -EINVAL;
599 goto done_reset_internal; 602 goto done_reset_internal;
600 } else 603 } else
601 DEBUG2(qla_printk(KERN_INFO, ha, 604 ql_dbg(ql_dbg_user, vha, 0x7027,
602 "State change RECEIVED\n")); 605 "State change received.\n");
603 606
604 ha->notify_dcbx_comp = 0; 607 ha->notify_dcbx_comp = 0;
605 } 608 }
@@ -629,11 +632,13 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
629 632
630 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 633 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
631 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 634 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
632 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) 635 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
636 ql_log(ql_log_warn, vha, 0x7018, "Abort active or needed.\n");
633 return -EBUSY; 637 return -EBUSY;
638 }
634 639
635 if (!vha->flags.online) { 640 if (!vha->flags.online) {
636 DEBUG2(qla_printk(KERN_WARNING, ha, "host not online\n")); 641 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
637 return -EIO; 642 return -EIO;
638 } 643 }
639 644
@@ -641,26 +646,31 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
641 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 646 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
642 DMA_TO_DEVICE); 647 DMA_TO_DEVICE);
643 648
644 if (!elreq.req_sg_cnt) 649 if (!elreq.req_sg_cnt) {
650 ql_log(ql_log_warn, vha, 0x701a,
651 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
645 return -ENOMEM; 652 return -ENOMEM;
653 }
646 654
647 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 655 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
648 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 656 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
649 DMA_FROM_DEVICE); 657 DMA_FROM_DEVICE);
650 658
651 if (!elreq.rsp_sg_cnt) { 659 if (!elreq.rsp_sg_cnt) {
660 ql_log(ql_log_warn, vha, 0x701b,
661 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
652 rval = -ENOMEM; 662 rval = -ENOMEM;
653 goto done_unmap_req_sg; 663 goto done_unmap_req_sg;
654 } 664 }
655 665
656 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) || 666 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
657 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 667 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
658 DEBUG2(printk(KERN_INFO 668 ql_log(ql_log_warn, vha, 0x701c,
659 "dma mapping resulted in different sg counts " 669 "dma mapping resulted in different sg counts, "
660 "[request_sg_cnt: %x dma_request_sg_cnt: %x " 670 "request_sg_cnt: %x dma_request_sg_cnt: %x "
661 "reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n", 671 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
662 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt, 672 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
663 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt)); 673 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
664 rval = -EAGAIN; 674 rval = -EAGAIN;
665 goto done_unmap_sg; 675 goto done_unmap_sg;
666 } 676 }
@@ -668,8 +678,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
668 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len, 678 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
669 &req_data_dma, GFP_KERNEL); 679 &req_data_dma, GFP_KERNEL);
670 if (!req_data) { 680 if (!req_data) {
671 DEBUG2(printk(KERN_ERR "%s: dma alloc for req_data " 681 ql_log(ql_log_warn, vha, 0x701d,
672 "failed for host=%lu\n", __func__, vha->host_no)); 682 "dma alloc failed for req_data.\n");
673 rval = -ENOMEM; 683 rval = -ENOMEM;
674 goto done_unmap_sg; 684 goto done_unmap_sg;
675 } 685 }
@@ -677,8 +687,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
677 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len, 687 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
678 &rsp_data_dma, GFP_KERNEL); 688 &rsp_data_dma, GFP_KERNEL);
679 if (!rsp_data) { 689 if (!rsp_data) {
680 DEBUG2(printk(KERN_ERR "%s: dma alloc for rsp_data " 690 ql_log(ql_log_warn, vha, 0x7004,
681 "failed for host=%lu\n", __func__, vha->host_no)); 691 "dma alloc failed for rsp_data.\n");
682 rval = -ENOMEM; 692 rval = -ENOMEM;
683 goto done_free_dma_req; 693 goto done_free_dma_req;
684 } 694 }
@@ -699,8 +709,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
699 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 709 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
700 elreq.options == EXTERNAL_LOOPBACK) { 710 elreq.options == EXTERNAL_LOOPBACK) {
701 type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; 711 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
702 DEBUG2(qla_printk(KERN_INFO, ha, 712 ql_dbg(ql_dbg_user, vha, 0x701e,
703 "scsi(%ld) bsg rqst type: %s\n", vha->host_no, type)); 713 "BSG request type: %s.\n", type);
704 command_sent = INT_DEF_LB_ECHO_CMD; 714 command_sent = INT_DEF_LB_ECHO_CMD;
705 rval = qla2x00_echo_test(vha, &elreq, response); 715 rval = qla2x00_echo_test(vha, &elreq, response);
706 } else { 716 } else {
@@ -708,9 +718,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
708 memset(config, 0, sizeof(config)); 718 memset(config, 0, sizeof(config));
709 memset(new_config, 0, sizeof(new_config)); 719 memset(new_config, 0, sizeof(new_config));
710 if (qla81xx_get_port_config(vha, config)) { 720 if (qla81xx_get_port_config(vha, config)) {
711 DEBUG2(printk(KERN_ERR 721 ql_log(ql_log_warn, vha, 0x701f,
712 "%s(%lu): Get port config failed\n", 722 "Get port config failed.\n");
713 __func__, vha->host_no));
714 bsg_job->reply->reply_payload_rcv_len = 0; 723 bsg_job->reply->reply_payload_rcv_len = 0;
715 bsg_job->reply->result = (DID_ERROR << 16); 724 bsg_job->reply->result = (DID_ERROR << 16);
716 rval = -EPERM; 725 rval = -EPERM;
@@ -718,11 +727,13 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
718 } 727 }
719 728
720 if (elreq.options != EXTERNAL_LOOPBACK) { 729 if (elreq.options != EXTERNAL_LOOPBACK) {
721 DEBUG2(qla_printk(KERN_INFO, ha, 730 ql_dbg(ql_dbg_user, vha, 0x7020,
722 "Internal: current port config = %x\n", 731 "Internal: curent port config = %x\n",
723 config[0])); 732 config[0]);
724 if (qla81xx_set_internal_loopback(vha, config, 733 if (qla81xx_set_internal_loopback(vha, config,
725 new_config)) { 734 new_config)) {
735 ql_log(ql_log_warn, vha, 0x7024,
736 "Internal loopback failed.\n");
726 bsg_job->reply->reply_payload_rcv_len = 737 bsg_job->reply->reply_payload_rcv_len =
727 0; 738 0;
728 bsg_job->reply->result = 739 bsg_job->reply->result =
@@ -746,9 +757,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
746 } 757 }
747 758
748 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 759 type = "FC_BSG_HST_VENDOR_LOOPBACK";
749 DEBUG2(qla_printk(KERN_INFO, ha, 760 ql_dbg(ql_dbg_user, vha, 0x7028,
750 "scsi(%ld) bsg rqst type: %s\n", 761 "BSG request type: %s.\n", type);
751 vha->host_no, type));
752 762
753 command_sent = INT_DEF_LB_LOOPBACK_CMD; 763 command_sent = INT_DEF_LB_LOOPBACK_CMD;
754 rval = qla2x00_loopback_test(vha, &elreq, response); 764 rval = qla2x00_loopback_test(vha, &elreq, response);
@@ -763,17 +773,16 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
763 773
764 if (response[0] == MBS_COMMAND_ERROR && 774 if (response[0] == MBS_COMMAND_ERROR &&
765 response[1] == MBS_LB_RESET) { 775 response[1] == MBS_LB_RESET) {
766 DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing " 776 ql_log(ql_log_warn, vha, 0x7029,
767 "ISP\n", __func__, vha->host_no)); 777 "MBX command error, Aborting ISP.\n");
768 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 778 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
769 qla2xxx_wake_dpc(vha); 779 qla2xxx_wake_dpc(vha);
770 qla2x00_wait_for_chip_reset(vha); 780 qla2x00_wait_for_chip_reset(vha);
771 /* Also reset the MPI */ 781 /* Also reset the MPI */
772 if (qla81xx_restart_mpi_firmware(vha) != 782 if (qla81xx_restart_mpi_firmware(vha) !=
773 QLA_SUCCESS) { 783 QLA_SUCCESS) {
774 qla_printk(KERN_INFO, ha, 784 ql_log(ql_log_warn, vha, 0x702a,
775 "MPI reset failed for host%ld.\n", 785 "MPI reset failed.\n");
776 vha->host_no);
777 } 786 }
778 787
779 bsg_job->reply->reply_payload_rcv_len = 0; 788 bsg_job->reply->reply_payload_rcv_len = 0;
@@ -783,17 +792,16 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
783 } 792 }
784 } else { 793 } else {
785 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 794 type = "FC_BSG_HST_VENDOR_LOOPBACK";
786 DEBUG2(qla_printk(KERN_INFO, ha, 795 ql_dbg(ql_dbg_user, vha, 0x702b,
787 "scsi(%ld) bsg rqst type: %s\n", 796 "BSG request type: %s.\n", type);
788 vha->host_no, type));
789 command_sent = INT_DEF_LB_LOOPBACK_CMD; 797 command_sent = INT_DEF_LB_LOOPBACK_CMD;
790 rval = qla2x00_loopback_test(vha, &elreq, response); 798 rval = qla2x00_loopback_test(vha, &elreq, response);
791 } 799 }
792 } 800 }
793 801
794 if (rval) { 802 if (rval) {
795 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 803 ql_log(ql_log_warn, vha, 0x702c,
796 "request %s failed\n", vha->host_no, type)); 804 "Vendor request %s failed.\n", type);
797 805
798 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) + 806 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
799 sizeof(struct fc_bsg_reply); 807 sizeof(struct fc_bsg_reply);
@@ -805,8 +813,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
805 bsg_job->reply->reply_payload_rcv_len = 0; 813 bsg_job->reply->reply_payload_rcv_len = 0;
806 bsg_job->reply->result = (DID_ERROR << 16); 814 bsg_job->reply->result = (DID_ERROR << 16);
807 } else { 815 } else {
808 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 816 ql_dbg(ql_dbg_user, vha, 0x702d,
809 "request %s completed\n", vha->host_no, type)); 817 "Vendor request %s completed.\n", type);
810 818
811 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 819 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
812 sizeof(response) + sizeof(uint8_t); 820 sizeof(response) + sizeof(uint8_t);
@@ -851,12 +859,13 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
851 859
852 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 860 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
853 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 861 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
854 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) 862 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
863 ql_log(ql_log_warn, vha, 0x702e, "Abort active or needed.\n");
855 return -EBUSY; 864 return -EBUSY;
865 }
856 866
857 if (!IS_QLA84XX(ha)) { 867 if (!IS_QLA84XX(ha)) {
858 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, " 868 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
859 "exiting.\n", vha->host_no));
860 return -EINVAL; 869 return -EINVAL;
861 } 870 }
862 871
@@ -865,14 +874,14 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
865 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW); 874 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
866 875
867 if (rval) { 876 if (rval) {
868 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 877 ql_log(ql_log_warn, vha, 0x7030,
869 "request 84xx reset failed\n", vha->host_no)); 878 "Vendor request 84xx reset failed.\n");
870 rval = bsg_job->reply->reply_payload_rcv_len = 0; 879 rval = bsg_job->reply->reply_payload_rcv_len = 0;
871 bsg_job->reply->result = (DID_ERROR << 16); 880 bsg_job->reply->result = (DID_ERROR << 16);
872 881
873 } else { 882 } else {
874 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 883 ql_dbg(ql_dbg_user, vha, 0x7031,
875 "request 84xx reset completed\n", vha->host_no)); 884 "Vendor request 84xx reset completed.\n");
876 bsg_job->reply->result = DID_OK; 885 bsg_job->reply->result = DID_OK;
877 } 886 }
878 887
@@ -902,21 +911,24 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
902 return -EBUSY; 911 return -EBUSY;
903 912
904 if (!IS_QLA84XX(ha)) { 913 if (!IS_QLA84XX(ha)) {
905 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, " 914 ql_dbg(ql_dbg_user, vha, 0x7032,
906 "exiting.\n", vha->host_no)); 915 "Not 84xx, exiting.\n");
907 return -EINVAL; 916 return -EINVAL;
908 } 917 }
909 918
910 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 919 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
911 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 920 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
912 if (!sg_cnt) 921 if (!sg_cnt) {
922 ql_log(ql_log_warn, vha, 0x7033,
923 "dma_map_sg returned %d for request.\n", sg_cnt);
913 return -ENOMEM; 924 return -ENOMEM;
925 }
914 926
915 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 927 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
916 DEBUG2(printk(KERN_INFO 928 ql_log(ql_log_warn, vha, 0x7034,
917 "dma mapping resulted in different sg counts " 929 "DMA mapping resulted in different sg counts, "
918 "request_sg_cnt: %x dma_request_sg_cnt: %x ", 930 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
919 bsg_job->request_payload.sg_cnt, sg_cnt)); 931 bsg_job->request_payload.sg_cnt, sg_cnt);
920 rval = -EAGAIN; 932 rval = -EAGAIN;
921 goto done_unmap_sg; 933 goto done_unmap_sg;
922 } 934 }
@@ -925,8 +937,8 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
925 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len, 937 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
926 &fw_dma, GFP_KERNEL); 938 &fw_dma, GFP_KERNEL);
927 if (!fw_buf) { 939 if (!fw_buf) {
928 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw_buf " 940 ql_log(ql_log_warn, vha, 0x7035,
929 "failed for host=%lu\n", __func__, vha->host_no)); 941 "DMA alloc failed for fw_buf.\n");
930 rval = -ENOMEM; 942 rval = -ENOMEM;
931 goto done_unmap_sg; 943 goto done_unmap_sg;
932 } 944 }
@@ -936,8 +948,8 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
936 948
937 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 949 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
938 if (!mn) { 950 if (!mn) {
939 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer " 951 ql_log(ql_log_warn, vha, 0x7036,
940 "failed for host=%lu\n", __func__, vha->host_no)); 952 "DMA alloc failed for fw buffer.\n");
941 rval = -ENOMEM; 953 rval = -ENOMEM;
942 goto done_free_fw_buf; 954 goto done_free_fw_buf;
943 } 955 }
@@ -965,15 +977,15 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
965 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 977 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
966 978
967 if (rval) { 979 if (rval) {
968 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 980 ql_log(ql_log_warn, vha, 0x7037,
969 "request 84xx updatefw failed\n", vha->host_no)); 981 "Vendor request 84xx updatefw failed.\n");
970 982
971 rval = bsg_job->reply->reply_payload_rcv_len = 0; 983 rval = bsg_job->reply->reply_payload_rcv_len = 0;
972 bsg_job->reply->result = (DID_ERROR << 16); 984 bsg_job->reply->result = (DID_ERROR << 16);
973 985
974 } else { 986 } else {
975 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 987 ql_dbg(ql_dbg_user, vha, 0x7038,
976 "request 84xx updatefw completed\n", vha->host_no)); 988 "Vendor request 84xx updatefw completed.\n");
977 989
978 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 990 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
979 bsg_job->reply->result = DID_OK; 991 bsg_job->reply->result = DID_OK;
@@ -1009,27 +1021,30 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1009 1021
1010 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 1022 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1011 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1023 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1012 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) 1024 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1025 ql_log(ql_log_warn, vha, 0x7039,
1026 "Abort active or needed.\n");
1013 return -EBUSY; 1027 return -EBUSY;
1028 }
1014 1029
1015 if (!IS_QLA84XX(ha)) { 1030 if (!IS_QLA84XX(ha)) {
1016 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, " 1031 ql_log(ql_log_warn, vha, 0x703a,
1017 "exiting.\n", vha->host_no)); 1032 "Not 84xx, exiting.\n");
1018 return -EINVAL; 1033 return -EINVAL;
1019 } 1034 }
1020 1035
1021 ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request + 1036 ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
1022 sizeof(struct fc_bsg_request)); 1037 sizeof(struct fc_bsg_request));
1023 if (!ql84_mgmt) { 1038 if (!ql84_mgmt) {
1024 DEBUG2(printk("%s(%ld): mgmt header not provided, exiting.\n", 1039 ql_log(ql_log_warn, vha, 0x703b,
1025 __func__, vha->host_no)); 1040 "MGMT header not provided, exiting.\n");
1026 return -EINVAL; 1041 return -EINVAL;
1027 } 1042 }
1028 1043
1029 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1044 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1030 if (!mn) { 1045 if (!mn) {
1031 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer " 1046 ql_log(ql_log_warn, vha, 0x703c,
1032 "failed for host=%lu\n", __func__, vha->host_no)); 1047 "DMA alloc failed for fw buffer.\n");
1033 return -ENOMEM; 1048 return -ENOMEM;
1034 } 1049 }
1035 1050
@@ -1044,6 +1059,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1044 bsg_job->reply_payload.sg_list, 1059 bsg_job->reply_payload.sg_list,
1045 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1060 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1046 if (!sg_cnt) { 1061 if (!sg_cnt) {
1062 ql_log(ql_log_warn, vha, 0x703d,
1063 "dma_map_sg returned %d for reply.\n", sg_cnt);
1047 rval = -ENOMEM; 1064 rval = -ENOMEM;
1048 goto exit_mgmt; 1065 goto exit_mgmt;
1049 } 1066 }
@@ -1051,10 +1068,10 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1051 dma_direction = DMA_FROM_DEVICE; 1068 dma_direction = DMA_FROM_DEVICE;
1052 1069
1053 if (sg_cnt != bsg_job->reply_payload.sg_cnt) { 1070 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1054 DEBUG2(printk(KERN_INFO 1071 ql_log(ql_log_warn, vha, 0x703e,
1055 "dma mapping resulted in different sg counts " 1072 "DMA mapping resulted in different sg counts, "
1056 "reply_sg_cnt: %x dma_reply_sg_cnt: %x\n", 1073 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1057 bsg_job->reply_payload.sg_cnt, sg_cnt)); 1074 bsg_job->reply_payload.sg_cnt, sg_cnt);
1058 rval = -EAGAIN; 1075 rval = -EAGAIN;
1059 goto done_unmap_sg; 1076 goto done_unmap_sg;
1060 } 1077 }
@@ -1064,9 +1081,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1064 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1081 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1065 &mgmt_dma, GFP_KERNEL); 1082 &mgmt_dma, GFP_KERNEL);
1066 if (!mgmt_b) { 1083 if (!mgmt_b) {
1067 DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b " 1084 ql_log(ql_log_warn, vha, 0x703f,
1068 "failed for host=%lu\n", 1085 "DMA alloc failed for mgmt_b.\n");
1069 __func__, vha->host_no));
1070 rval = -ENOMEM; 1086 rval = -ENOMEM;
1071 goto done_unmap_sg; 1087 goto done_unmap_sg;
1072 } 1088 }
@@ -1094,6 +1110,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1094 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1110 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1095 1111
1096 if (!sg_cnt) { 1112 if (!sg_cnt) {
1113 ql_log(ql_log_warn, vha, 0x7040,
1114 "dma_map_sg returned %d.\n", sg_cnt);
1097 rval = -ENOMEM; 1115 rval = -ENOMEM;
1098 goto exit_mgmt; 1116 goto exit_mgmt;
1099 } 1117 }
@@ -1101,10 +1119,10 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1101 dma_direction = DMA_TO_DEVICE; 1119 dma_direction = DMA_TO_DEVICE;
1102 1120
1103 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 1121 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1104 DEBUG2(printk(KERN_INFO 1122 ql_log(ql_log_warn, vha, 0x7041,
1105 "dma mapping resulted in different sg counts " 1123 "DMA mapping resulted in different sg counts, "
1106 "request_sg_cnt: %x dma_request_sg_cnt: %x ", 1124 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1107 bsg_job->request_payload.sg_cnt, sg_cnt)); 1125 bsg_job->request_payload.sg_cnt, sg_cnt);
1108 rval = -EAGAIN; 1126 rval = -EAGAIN;
1109 goto done_unmap_sg; 1127 goto done_unmap_sg;
1110 } 1128 }
@@ -1113,9 +1131,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1113 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1131 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1114 &mgmt_dma, GFP_KERNEL); 1132 &mgmt_dma, GFP_KERNEL);
1115 if (!mgmt_b) { 1133 if (!mgmt_b) {
1116 DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b " 1134 ql_log(ql_log_warn, vha, 0x7042,
1117 "failed for host=%lu\n", 1135 "DMA alloc failed for mgmt_b.\n");
1118 __func__, vha->host_no));
1119 rval = -ENOMEM; 1136 rval = -ENOMEM;
1120 goto done_unmap_sg; 1137 goto done_unmap_sg;
1121 } 1138 }
@@ -1156,15 +1173,15 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1156 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0); 1173 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1157 1174
1158 if (rval) { 1175 if (rval) {
1159 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 1176 ql_log(ql_log_warn, vha, 0x7043,
1160 "request 84xx mgmt failed\n", vha->host_no)); 1177 "Vendor request 84xx mgmt failed.\n");
1161 1178
1162 rval = bsg_job->reply->reply_payload_rcv_len = 0; 1179 rval = bsg_job->reply->reply_payload_rcv_len = 0;
1163 bsg_job->reply->result = (DID_ERROR << 16); 1180 bsg_job->reply->result = (DID_ERROR << 16);
1164 1181
1165 } else { 1182 } else {
1166 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 1183 ql_dbg(ql_dbg_user, vha, 0x7044,
1167 "request 84xx mgmt completed\n", vha->host_no)); 1184 "Vendor request 84xx mgmt completed.\n");
1168 1185
1169 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1186 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1170 bsg_job->reply->result = DID_OK; 1187 bsg_job->reply->result = DID_OK;
@@ -1204,7 +1221,6 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1204{ 1221{
1205 struct Scsi_Host *host = bsg_job->shost; 1222 struct Scsi_Host *host = bsg_job->shost;
1206 scsi_qla_host_t *vha = shost_priv(host); 1223 scsi_qla_host_t *vha = shost_priv(host);
1207 struct qla_hw_data *ha = vha->hw;
1208 int rval = 0; 1224 int rval = 0;
1209 struct qla_port_param *port_param = NULL; 1225 struct qla_port_param *port_param = NULL;
1210 fc_port_t *fcport = NULL; 1226 fc_port_t *fcport = NULL;
@@ -1215,26 +1231,27 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1215 1231
1216 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 1232 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1217 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1233 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1218 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) 1234 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1235 ql_log(ql_log_warn, vha, 0x7045, "abort active or needed.\n");
1219 return -EBUSY; 1236 return -EBUSY;
1237 }
1220 1238
1221 if (!IS_IIDMA_CAPABLE(vha->hw)) { 1239 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1222 DEBUG2(qla_printk(KERN_WARNING, ha, "%s(%lu): iiDMA not " 1240 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1223 "supported\n", __func__, vha->host_no));
1224 return -EINVAL; 1241 return -EINVAL;
1225 } 1242 }
1226 1243
1227 port_param = (struct qla_port_param *)((char *)bsg_job->request + 1244 port_param = (struct qla_port_param *)((char *)bsg_job->request +
1228 sizeof(struct fc_bsg_request)); 1245 sizeof(struct fc_bsg_request));
1229 if (!port_param) { 1246 if (!port_param) {
1230 DEBUG2(printk("%s(%ld): port_param header not provided, " 1247 ql_log(ql_log_warn, vha, 0x7047,
1231 "exiting.\n", __func__, vha->host_no)); 1248 "port_param header not provided.\n");
1232 return -EINVAL; 1249 return -EINVAL;
1233 } 1250 }
1234 1251
1235 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) { 1252 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1236 DEBUG2(printk(KERN_ERR "%s(%ld): Invalid destination type\n", 1253 ql_log(ql_log_warn, vha, 0x7048,
1237 __func__, vha->host_no)); 1254 "Invalid destination type.\n");
1238 return -EINVAL; 1255 return -EINVAL;
1239 } 1256 }
1240 1257
@@ -1249,21 +1266,20 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1249 } 1266 }
1250 1267
1251 if (!fcport) { 1268 if (!fcport) {
1252 DEBUG2(printk(KERN_ERR "%s(%ld): Failed to find port\n", 1269 ql_log(ql_log_warn, vha, 0x7049,
1253 __func__, vha->host_no)); 1270 "Failed to find port.\n");
1254 return -EINVAL; 1271 return -EINVAL;
1255 } 1272 }
1256 1273
1257 if (atomic_read(&fcport->state) != FCS_ONLINE) { 1274 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1258 DEBUG2(printk(KERN_ERR "%s(%ld): Port not online\n", 1275 ql_log(ql_log_warn, vha, 0x704a,
1259 __func__, vha->host_no)); 1276 "Port is not online.\n");
1260 return -EINVAL; 1277 return -EINVAL;
1261 } 1278 }
1262 1279
1263 if (fcport->flags & FCF_LOGIN_NEEDED) { 1280 if (fcport->flags & FCF_LOGIN_NEEDED) {
1264 DEBUG2(printk(KERN_ERR "%s(%ld): Remote port not logged in, " 1281 ql_log(ql_log_warn, vha, 0x704b,
1265 "flags = 0x%x\n", 1282 "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1266 __func__, vha->host_no, fcport->flags));
1267 return -EINVAL; 1283 return -EINVAL;
1268 } 1284 }
1269 1285
@@ -1275,15 +1291,13 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1275 &port_param->speed, mb); 1291 &port_param->speed, mb);
1276 1292
1277 if (rval) { 1293 if (rval) {
1278 DEBUG16(printk(KERN_ERR "scsi(%ld): iIDMA cmd failed for " 1294 ql_log(ql_log_warn, vha, 0x704c,
1279 "%02x%02x%02x%02x%02x%02x%02x%02x -- " 1295 "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
1280 "%04x %x %04x %04x.\n", 1296 "%04x %x %04x %04x.\n", fcport->port_name[0],
1281 vha->host_no, fcport->port_name[0], 1297 fcport->port_name[1], fcport->port_name[2],
1282 fcport->port_name[1], 1298 fcport->port_name[3], fcport->port_name[4],
1283 fcport->port_name[2], fcport->port_name[3], 1299 fcport->port_name[5], fcport->port_name[6],
1284 fcport->port_name[4], fcport->port_name[5], 1300 fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
1285 fcport->port_name[6], fcport->port_name[7], rval,
1286 fcport->fp_speed, mb[0], mb[1]));
1287 rval = 0; 1301 rval = 0;
1288 bsg_job->reply->result = (DID_ERROR << 16); 1302 bsg_job->reply->result = (DID_ERROR << 16);
1289 1303
@@ -1307,11 +1321,12 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1307} 1321}
1308 1322
1309static int 1323static int
1310qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha, 1324qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
1311 uint8_t is_update) 1325 uint8_t is_update)
1312{ 1326{
1313 uint32_t start = 0; 1327 uint32_t start = 0;
1314 int valid = 0; 1328 int valid = 0;
1329 struct qla_hw_data *ha = vha->hw;
1315 1330
1316 bsg_job->reply->reply_payload_rcv_len = 0; 1331 bsg_job->reply->reply_payload_rcv_len = 0;
1317 1332
@@ -1319,14 +1334,20 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
1319 return -EINVAL; 1334 return -EINVAL;
1320 1335
1321 start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1336 start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1322 if (start > ha->optrom_size) 1337 if (start > ha->optrom_size) {
1338 ql_log(ql_log_warn, vha, 0x7055,
1339 "start %d > optrom_size %d.\n", start, ha->optrom_size);
1323 return -EINVAL; 1340 return -EINVAL;
1341 }
1324 1342
1325 if (ha->optrom_state != QLA_SWAITING) 1343 if (ha->optrom_state != QLA_SWAITING) {
1344 ql_log(ql_log_info, vha, 0x7056,
1345 "optrom_state %d.\n", ha->optrom_state);
1326 return -EBUSY; 1346 return -EBUSY;
1347 }
1327 1348
1328 ha->optrom_region_start = start; 1349 ha->optrom_region_start = start;
1329 1350 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1330 if (is_update) { 1351 if (is_update) {
1331 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 1352 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1332 valid = 1; 1353 valid = 1;
@@ -1337,9 +1358,9 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
1337 IS_QLA8XXX_TYPE(ha)) 1358 IS_QLA8XXX_TYPE(ha))
1338 valid = 1; 1359 valid = 1;
1339 if (!valid) { 1360 if (!valid) {
1340 qla_printk(KERN_WARNING, ha, 1361 ql_log(ql_log_warn, vha, 0x7058,
1341 "Invalid start region 0x%x/0x%x.\n", 1362 "Invalid start region 0x%x/0x%x.\n", start,
1342 start, bsg_job->request_payload.payload_len); 1363 bsg_job->request_payload.payload_len);
1343 return -EINVAL; 1364 return -EINVAL;
1344 } 1365 }
1345 1366
@@ -1358,9 +1379,9 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
1358 1379
1359 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 1380 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1360 if (!ha->optrom_buffer) { 1381 if (!ha->optrom_buffer) {
1361 qla_printk(KERN_WARNING, ha, 1382 ql_log(ql_log_warn, vha, 0x7059,
1362 "Read: Unable to allocate memory for optrom retrieval " 1383 "Read: Unable to allocate memory for optrom retrieval "
1363 "(%x).\n", ha->optrom_region_size); 1384 "(%x)\n", ha->optrom_region_size);
1364 1385
1365 ha->optrom_state = QLA_SWAITING; 1386 ha->optrom_state = QLA_SWAITING;
1366 return -ENOMEM; 1387 return -ENOMEM;
@@ -1378,7 +1399,7 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1378 struct qla_hw_data *ha = vha->hw; 1399 struct qla_hw_data *ha = vha->hw;
1379 int rval = 0; 1400 int rval = 0;
1380 1401
1381 rval = qla2x00_optrom_setup(bsg_job, ha, 0); 1402 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1382 if (rval) 1403 if (rval)
1383 return rval; 1404 return rval;
1384 1405
@@ -1406,7 +1427,7 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1406 struct qla_hw_data *ha = vha->hw; 1427 struct qla_hw_data *ha = vha->hw;
1407 int rval = 0; 1428 int rval = 0;
1408 1429
1409 rval = qla2x00_optrom_setup(bsg_job, ha, 1); 1430 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1410 if (rval) 1431 if (rval)
1411 return rval; 1432 return rval;
1412 1433
@@ -1464,6 +1485,23 @@ int
1464qla24xx_bsg_request(struct fc_bsg_job *bsg_job) 1485qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1465{ 1486{
1466 int ret = -EINVAL; 1487 int ret = -EINVAL;
1488 struct fc_rport *rport;
1489 fc_port_t *fcport = NULL;
1490 struct Scsi_Host *host;
1491 scsi_qla_host_t *vha;
1492
1493 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1494 rport = bsg_job->rport;
1495 fcport = *(fc_port_t **) rport->dd_data;
1496 host = rport_to_shost(rport);
1497 vha = shost_priv(host);
1498 } else {
1499 host = bsg_job->shost;
1500 vha = shost_priv(host);
1501 }
1502
1503 ql_dbg(ql_dbg_user, vha, 0x7000,
1504 "Entered %s msgcode=%d.\n", __func__, bsg_job->request->msgcode);
1467 1505
1468 switch (bsg_job->request->msgcode) { 1506 switch (bsg_job->request->msgcode) {
1469 case FC_BSG_RPT_ELS: 1507 case FC_BSG_RPT_ELS:
@@ -1480,7 +1518,7 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1480 case FC_BSG_HST_DEL_RPORT: 1518 case FC_BSG_HST_DEL_RPORT:
1481 case FC_BSG_RPT_CT: 1519 case FC_BSG_RPT_CT:
1482 default: 1520 default:
1483 DEBUG2(printk("qla2xxx: unsupported BSG request\n")); 1521 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
1484 break; 1522 break;
1485 } 1523 }
1486 return ret; 1524 return ret;
@@ -1514,17 +1552,15 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1514 && (sp_bsg->u.bsg_job == bsg_job)) { 1552 && (sp_bsg->u.bsg_job == bsg_job)) {
1515 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1553 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1516 if (ha->isp_ops->abort_command(sp)) { 1554 if (ha->isp_ops->abort_command(sp)) {
1517 DEBUG2(qla_printk(KERN_INFO, ha, 1555 ql_log(ql_log_warn, vha, 0x7089,
1518 "scsi(%ld): mbx " 1556 "mbx abort_command "
1519 "abort_command failed\n", 1557 "failed.\n");
1520 vha->host_no));
1521 bsg_job->req->errors = 1558 bsg_job->req->errors =
1522 bsg_job->reply->result = -EIO; 1559 bsg_job->reply->result = -EIO;
1523 } else { 1560 } else {
1524 DEBUG2(qla_printk(KERN_INFO, ha, 1561 ql_dbg(ql_dbg_user, vha, 0x708a,
1525 "scsi(%ld): mbx " 1562 "mbx abort_command "
1526 "abort_command success\n", 1563 "success.\n");
1527 vha->host_no));
1528 bsg_job->req->errors = 1564 bsg_job->req->errors =
1529 bsg_job->reply->result = 0; 1565 bsg_job->reply->result = 0;
1530 } 1566 }
@@ -1535,8 +1571,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1535 } 1571 }
1536 } 1572 }
1537 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1573 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1538 DEBUG2(qla_printk(KERN_INFO, ha, 1574 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
1539 "scsi(%ld) SRB not found to abort\n", vha->host_no));
1540 bsg_job->req->errors = bsg_job->reply->result = -ENXIO; 1575 bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
1541 return 0; 1576 return 0;
1542 1577
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index c53719a9a747..2155071f3100 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -4,10 +4,36 @@
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7
8/*
9 * Table for showing the current message id in use for particular level
10 * Change this table for addition of log/debug messages.
11 * -----------------------------------------------------
12 * | Level | Last Value Used |
13 * -----------------------------------------------------
14 * | Module Init and Probe | 0x0116 |
15 * | Mailbox commands | 0x111e |
16 * | Device Discovery | 0x2083 |
17 * | Queue Command and IO tracing | 0x302e |
18 * | DPC Thread | 0x401c |
19 * | Async Events | 0x5059 |
20 * | Timer Routines | 0x600d |
21 * | User Space Interactions | 0x709c |
22 * | Task Management | 0x8043 |
23 * | AER/EEH | 0x900f |
24 * | Virtual Port | 0xa007 |
25 * | ISP82XX Specific | 0xb027 |
26 * | MultiQ | 0xc00b |
27 * | Misc | 0xd00b |
28 * -----------------------------------------------------
29 */
30
7#include "qla_def.h" 31#include "qla_def.h"
8 32
9#include <linux/delay.h> 33#include <linux/delay.h>
10 34
35static uint32_t ql_dbg_offset = 0x800;
36
11static inline void 37static inline void
12qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump) 38qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
13{ 39{
@@ -383,11 +409,11 @@ qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
383 struct qla_hw_data *ha = vha->hw; 409 struct qla_hw_data *ha = vha->hw;
384 410
385 if (rval != QLA_SUCCESS) { 411 if (rval != QLA_SUCCESS) {
386 qla_printk(KERN_WARNING, ha, 412 ql_log(ql_log_warn, vha, 0xd000,
387 "Failed to dump firmware (%x)!!!\n", rval); 413 "Failed to dump firmware (%x).\n", rval);
388 ha->fw_dumped = 0; 414 ha->fw_dumped = 0;
389 } else { 415 } else {
390 qla_printk(KERN_INFO, ha, 416 ql_log(ql_log_info, vha, 0xd001,
391 "Firmware dump saved to temp buffer (%ld/%p).\n", 417 "Firmware dump saved to temp buffer (%ld/%p).\n",
392 vha->host_no, ha->fw_dump); 418 vha->host_no, ha->fw_dump);
393 ha->fw_dumped = 1; 419 ha->fw_dumped = 1;
@@ -419,15 +445,16 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
419 spin_lock_irqsave(&ha->hardware_lock, flags); 445 spin_lock_irqsave(&ha->hardware_lock, flags);
420 446
421 if (!ha->fw_dump) { 447 if (!ha->fw_dump) {
422 qla_printk(KERN_WARNING, ha, 448 ql_log(ql_log_warn, vha, 0xd002,
423 "No buffer available for dump!!!\n"); 449 "No buffer available for dump.\n");
424 goto qla2300_fw_dump_failed; 450 goto qla2300_fw_dump_failed;
425 } 451 }
426 452
427 if (ha->fw_dumped) { 453 if (ha->fw_dumped) {
428 qla_printk(KERN_WARNING, ha, 454 ql_log(ql_log_warn, vha, 0xd003,
429 "Firmware has been previously dumped (%p) -- ignoring " 455 "Firmware has been previously dumped (%p) "
430 "request...\n", ha->fw_dump); 456 "-- ignoring request.\n",
457 ha->fw_dump);
431 goto qla2300_fw_dump_failed; 458 goto qla2300_fw_dump_failed;
432 } 459 }
433 fw = &ha->fw_dump->isp.isp23; 460 fw = &ha->fw_dump->isp.isp23;
@@ -582,15 +609,16 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
582 spin_lock_irqsave(&ha->hardware_lock, flags); 609 spin_lock_irqsave(&ha->hardware_lock, flags);
583 610
584 if (!ha->fw_dump) { 611 if (!ha->fw_dump) {
585 qla_printk(KERN_WARNING, ha, 612 ql_log(ql_log_warn, vha, 0xd004,
586 "No buffer available for dump!!!\n"); 613 "No buffer available for dump.\n");
587 goto qla2100_fw_dump_failed; 614 goto qla2100_fw_dump_failed;
588 } 615 }
589 616
590 if (ha->fw_dumped) { 617 if (ha->fw_dumped) {
591 qla_printk(KERN_WARNING, ha, 618 ql_log(ql_log_warn, vha, 0xd005,
592 "Firmware has been previously dumped (%p) -- ignoring " 619 "Firmware has been previously dumped (%p) "
593 "request...\n", ha->fw_dump); 620 "-- ignoring request.\n",
621 ha->fw_dump);
594 goto qla2100_fw_dump_failed; 622 goto qla2100_fw_dump_failed;
595 } 623 }
596 fw = &ha->fw_dump->isp.isp21; 624 fw = &ha->fw_dump->isp.isp21;
@@ -779,15 +807,16 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
779 spin_lock_irqsave(&ha->hardware_lock, flags); 807 spin_lock_irqsave(&ha->hardware_lock, flags);
780 808
781 if (!ha->fw_dump) { 809 if (!ha->fw_dump) {
782 qla_printk(KERN_WARNING, ha, 810 ql_log(ql_log_warn, vha, 0xd006,
783 "No buffer available for dump!!!\n"); 811 "No buffer available for dump.\n");
784 goto qla24xx_fw_dump_failed; 812 goto qla24xx_fw_dump_failed;
785 } 813 }
786 814
787 if (ha->fw_dumped) { 815 if (ha->fw_dumped) {
788 qla_printk(KERN_WARNING, ha, 816 ql_log(ql_log_warn, vha, 0xd007,
789 "Firmware has been previously dumped (%p) -- ignoring " 817 "Firmware has been previously dumped (%p) "
790 "request...\n", ha->fw_dump); 818 "-- ignoring request.\n",
819 ha->fw_dump);
791 goto qla24xx_fw_dump_failed; 820 goto qla24xx_fw_dump_failed;
792 } 821 }
793 fw = &ha->fw_dump->isp.isp24; 822 fw = &ha->fw_dump->isp.isp24;
@@ -1017,15 +1046,16 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1017 spin_lock_irqsave(&ha->hardware_lock, flags); 1046 spin_lock_irqsave(&ha->hardware_lock, flags);
1018 1047
1019 if (!ha->fw_dump) { 1048 if (!ha->fw_dump) {
1020 qla_printk(KERN_WARNING, ha, 1049 ql_log(ql_log_warn, vha, 0xd008,
1021 "No buffer available for dump!!!\n"); 1050 "No buffer available for dump.\n");
1022 goto qla25xx_fw_dump_failed; 1051 goto qla25xx_fw_dump_failed;
1023 } 1052 }
1024 1053
1025 if (ha->fw_dumped) { 1054 if (ha->fw_dumped) {
1026 qla_printk(KERN_WARNING, ha, 1055 ql_log(ql_log_warn, vha, 0xd009,
1027 "Firmware has been previously dumped (%p) -- ignoring " 1056 "Firmware has been previously dumped (%p) "
1028 "request...\n", ha->fw_dump); 1057 "-- ignoring request.\n",
1058 ha->fw_dump);
1029 goto qla25xx_fw_dump_failed; 1059 goto qla25xx_fw_dump_failed;
1030 } 1060 }
1031 fw = &ha->fw_dump->isp.isp25; 1061 fw = &ha->fw_dump->isp.isp25;
@@ -1328,15 +1358,16 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1328 spin_lock_irqsave(&ha->hardware_lock, flags); 1358 spin_lock_irqsave(&ha->hardware_lock, flags);
1329 1359
1330 if (!ha->fw_dump) { 1360 if (!ha->fw_dump) {
1331 qla_printk(KERN_WARNING, ha, 1361 ql_log(ql_log_warn, vha, 0xd00a,
1332 "No buffer available for dump!!!\n"); 1362 "No buffer available for dump.\n");
1333 goto qla81xx_fw_dump_failed; 1363 goto qla81xx_fw_dump_failed;
1334 } 1364 }
1335 1365
1336 if (ha->fw_dumped) { 1366 if (ha->fw_dumped) {
1337 qla_printk(KERN_WARNING, ha, 1367 ql_log(ql_log_warn, vha, 0xd00b,
1338 "Firmware has been previously dumped (%p) -- ignoring " 1368 "Firmware has been previously dumped (%p) "
1339 "request...\n", ha->fw_dump); 1369 "-- ignoring request.\n",
1370 ha->fw_dump);
1340 goto qla81xx_fw_dump_failed; 1371 goto qla81xx_fw_dump_failed;
1341 } 1372 }
1342 fw = &ha->fw_dump->isp.isp81; 1373 fw = &ha->fw_dump->isp.isp81;
@@ -1619,106 +1650,255 @@ qla81xx_fw_dump_failed:
1619/****************************************************************************/ 1650/****************************************************************************/
1620/* Driver Debug Functions. */ 1651/* Driver Debug Functions. */
1621/****************************************************************************/ 1652/****************************************************************************/
1622 1653/*
1654 * This function is for formatting and logging debug information.
1655 * It is to be used when vha is available. It formats the message
1656 * and logs it to the messages file.
1657 * parameters:
1658 * level: The level of the debug messages to be printed.
1659 * If ql2xextended_error_logging value is correctly set,
1660 * this message will appear in the messages file.
1661 * vha: Pointer to the scsi_qla_host_t.
1662 * id: This is a unique identifier for the level. It identifies the
1663 * part of the code from where the message originated.
1664 * msg: The message to be displayed.
1665 */
1623void 1666void
1624qla2x00_dump_regs(scsi_qla_host_t *vha) 1667ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
1625{ 1668
1626 int i; 1669 char pbuf[QL_DBG_BUF_LEN];
1627 struct qla_hw_data *ha = vha->hw; 1670 va_list ap;
1628 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1671 uint32_t len;
1629 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 1672 struct pci_dev *pdev = NULL;
1630 uint16_t __iomem *mbx_reg; 1673
1674 memset(pbuf, 0, QL_DBG_BUF_LEN);
1675
1676 va_start(ap, msg);
1677
1678 if ((level & ql2xextended_error_logging) == level) {
1679 if (vha != NULL) {
1680 pdev = vha->hw->pdev;
1681 /* <module-name> <pci-name> <msg-id>:<host> Message */
1682 sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
1683 dev_name(&(pdev->dev)), id + ql_dbg_offset,
1684 vha->host_no);
1685 } else
1686 sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
1687 "0000:00:00.0", id + ql_dbg_offset);
1688
1689 len = strlen(pbuf);
1690 vsprintf(pbuf+len, msg, ap);
1691 pr_warning("%s", pbuf);
1692 }
1631 1693
1632 mbx_reg = IS_FWI2_CAPABLE(ha) ? &reg24->mailbox0: 1694 va_end(ap);
1633 MAILBOX_REG(ha, reg, 0);
1634 1695
1635 printk("Mailbox registers:\n");
1636 for (i = 0; i < 6; i++)
1637 printk("scsi(%ld): mbox %d 0x%04x \n", vha->host_no, i,
1638 RD_REG_WORD(mbx_reg++));
1639} 1696}
1640 1697
1641 1698/*
1699 * This function is for formatting and logging debug information.
1700 * It is to be used when vha is not available and pci is availble,
1701 * i.e., before host allocation. It formats the message and logs it
1702 * to the messages file.
1703 * parameters:
1704 * level: The level of the debug messages to be printed.
1705 * If ql2xextended_error_logging value is correctly set,
1706 * this message will appear in the messages file.
1707 * pdev: Pointer to the struct pci_dev.
1708 * id: This is a unique id for the level. It identifies the part
1709 * of the code from where the message originated.
1710 * msg: The message to be displayed.
1711 */
1642void 1712void
1643qla2x00_dump_buffer(uint8_t * b, uint32_t size) 1713ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
1644{
1645 uint32_t cnt;
1646 uint8_t c;
1647 1714
1648 printk(" 0 1 2 3 4 5 6 7 8 9 " 1715 char pbuf[QL_DBG_BUF_LEN];
1649 "Ah Bh Ch Dh Eh Fh\n"); 1716 va_list ap;
1650 printk("----------------------------------------" 1717 uint32_t len;
1651 "----------------------\n"); 1718
1652 1719 if (pdev == NULL)
1653 for (cnt = 0; cnt < size;) { 1720 return;
1654 c = *b++; 1721
1655 printk("%02x",(uint32_t) c); 1722 memset(pbuf, 0, QL_DBG_BUF_LEN);
1656 cnt++; 1723
1657 if (!(cnt % 16)) 1724 va_start(ap, msg);
1658 printk("\n"); 1725
1659 else 1726 if ((level & ql2xextended_error_logging) == level) {
1660 printk(" "); 1727 /* <module-name> <dev-name>:<msg-id> Message */
1728 sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
1729 dev_name(&(pdev->dev)), id + ql_dbg_offset);
1730
1731 len = strlen(pbuf);
1732 vsprintf(pbuf+len, msg, ap);
1733 pr_warning("%s", pbuf);
1661 } 1734 }
1662 if (cnt % 16) 1735
1663 printk("\n"); 1736 va_end(ap);
1737
1664} 1738}
1665 1739
1740/*
1741 * This function is for formatting and logging log messages.
1742 * It is to be used when vha is available. It formats the message
1743 * and logs it to the messages file. All the messages will be logged
1744 * irrespective of value of ql2xextended_error_logging.
1745 * parameters:
1746 * level: The level of the log messages to be printed in the
1747 * messages file.
1748 * vha: Pointer to the scsi_qla_host_t
1749 * id: This is a unique id for the level. It identifies the
1750 * part of the code from where the message originated.
1751 * msg: The message to be displayed.
1752 */
1666void 1753void
1667qla2x00_dump_buffer_zipped(uint8_t *b, uint32_t size) 1754ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
1668{
1669 uint32_t cnt;
1670 uint8_t c;
1671 uint8_t last16[16], cur16[16];
1672 uint32_t lc = 0, num_same16 = 0, j;
1673 1755
1674 printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 " 1756 char pbuf[QL_DBG_BUF_LEN];
1675 "Ah Bh Ch Dh Eh Fh\n"); 1757 va_list ap;
1676 printk(KERN_DEBUG "----------------------------------------" 1758 uint32_t len;
1677 "----------------------\n"); 1759 struct pci_dev *pdev = NULL;
1678 1760
1679 for (cnt = 0; cnt < size;) { 1761 memset(pbuf, 0, QL_DBG_BUF_LEN);
1680 c = *b++;
1681 1762
1682 cur16[lc++] = c; 1763 va_start(ap, msg);
1683 1764
1684 cnt++; 1765 if (level <= ql_errlev) {
1685 if (cnt % 16) 1766 if (vha != NULL) {
1686 continue; 1767 pdev = vha->hw->pdev;
1687 1768 /* <module-name> <msg-id>:<host> Message */
1688 /* We have 16 now */ 1769 sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
1689 lc = 0; 1770 dev_name(&(pdev->dev)), id, vha->host_no);
1690 if (num_same16 == 0) { 1771 } else
1691 memcpy(last16, cur16, 16); 1772 sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
1692 num_same16++; 1773 "0000:00:00.0", id);
1693 continue; 1774
1775 len = strlen(pbuf);
1776 vsprintf(pbuf+len, msg, ap);
1777
1778 switch (level) {
1779 case 0: /* FATAL LOG */
1780 pr_crit("%s", pbuf);
1781 break;
1782 case 1:
1783 pr_err("%s", pbuf);
1784 break;
1785 case 2:
1786 pr_warn("%s", pbuf);
1787 break;
1788 default:
1789 pr_info("%s", pbuf);
1790 break;
1694 } 1791 }
1695 if (memcmp(cur16, last16, 16) == 0) { 1792 }
1696 num_same16++; 1793
1697 continue; 1794 va_end(ap);
1795}
1796
1797/*
1798 * This function is for formatting and logging log messages.
1799 * It is to be used when vha is not available and pci is availble,
1800 * i.e., before host allocation. It formats the message and logs
1801 * it to the messages file. All the messages are logged irrespective
1802 * of the value of ql2xextended_error_logging.
1803 * parameters:
1804 * level: The level of the log messages to be printed in the
1805 * messages file.
1806 * pdev: Pointer to the struct pci_dev.
1807 * id: This is a unique id for the level. It identifies the
1808 * part of the code from where the message originated.
1809 * msg: The message to be displayed.
1810 */
1811void
1812ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
1813
1814 char pbuf[QL_DBG_BUF_LEN];
1815 va_list ap;
1816 uint32_t len;
1817
1818 if (pdev == NULL)
1819 return;
1820
1821 memset(pbuf, 0, QL_DBG_BUF_LEN);
1822
1823 va_start(ap, msg);
1824
1825 if (level <= ql_errlev) {
1826 /* <module-name> <dev-name>:<msg-id> Message */
1827 sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
1828 dev_name(&(pdev->dev)), id);
1829
1830 len = strlen(pbuf);
1831 vsprintf(pbuf+len, msg, ap);
1832 switch (level) {
1833 case 0: /* FATAL LOG */
1834 pr_crit("%s", pbuf);
1835 break;
1836 case 1:
1837 pr_err("%s", pbuf);
1838 break;
1839 case 2:
1840 pr_warn("%s", pbuf);
1841 break;
1842 default:
1843 pr_info("%s", pbuf);
1844 break;
1698 } 1845 }
1699 for (j = 0; j < 16; j++)
1700 printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]);
1701 printk(KERN_DEBUG "\n");
1702
1703 if (num_same16 > 1)
1704 printk(KERN_DEBUG "> prev pattern repeats (%u)"
1705 "more times\n", num_same16-1);
1706 memcpy(last16, cur16, 16);
1707 num_same16 = 1;
1708 } 1846 }
1709 1847
1710 if (num_same16) { 1848 va_end(ap);
1711 for (j = 0; j < 16; j++) 1849}
1712 printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]);
1713 printk(KERN_DEBUG "\n");
1714 1850
1715 if (num_same16 > 1) 1851void
1716 printk(KERN_DEBUG "> prev pattern repeats (%u)" 1852ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
1717 "more times\n", num_same16-1); 1853{
1854 int i;
1855 struct qla_hw_data *ha = vha->hw;
1856 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1857 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
1858 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1859 uint16_t __iomem *mbx_reg;
1860
1861 if ((level & ql2xextended_error_logging) == level) {
1862
1863 if (IS_QLA82XX(ha))
1864 mbx_reg = &reg82->mailbox_in[0];
1865 else if (IS_FWI2_CAPABLE(ha))
1866 mbx_reg = &reg24->mailbox0;
1867 else
1868 mbx_reg = MAILBOX_REG(ha, reg, 0);
1869
1870 ql_dbg(level, vha, id, "Mailbox registers:\n");
1871 for (i = 0; i < 6; i++)
1872 ql_dbg(level, vha, id,
1873 "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
1718 } 1874 }
1719 if (lc) { 1875}
1720 for (j = 0; j < lc; j++) 1876
1721 printk(KERN_DEBUG "%02x ", (uint32_t)cur16[j]); 1877
1722 printk(KERN_DEBUG "\n"); 1878void
1879ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
1880 uint8_t *b, uint32_t size)
1881{
1882 uint32_t cnt;
1883 uint8_t c;
1884 if ((level & ql2xextended_error_logging) == level) {
1885
1886 ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 "
1887 "9 Ah Bh Ch Dh Eh Fh\n");
1888 ql_dbg(level, vha, id, "----------------------------------"
1889 "----------------------------\n");
1890
1891 ql_dbg(level, vha, id, "");
1892 for (cnt = 0; cnt < size;) {
1893 c = *b++;
1894 printk("%02x", (uint32_t) c);
1895 cnt++;
1896 if (!(cnt % 16))
1897 printk("\n");
1898 else
1899 printk(" ");
1900 }
1901 if (cnt % 16)
1902 ql_dbg(level, vha, id, "\n");
1723 } 1903 }
1724} 1904}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 930414541ec6..98a377b99017 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -8,146 +8,6 @@
8#include "qla_def.h" 8#include "qla_def.h"
9 9
10/* 10/*
11 * Driver debug definitions.
12 */
13/* #define QL_DEBUG_LEVEL_1 */ /* Output register accesses to COM1 */
14/* #define QL_DEBUG_LEVEL_2 */ /* Output error msgs to COM1 */
15/* #define QL_DEBUG_LEVEL_3 */ /* Output function trace msgs to COM1 */
16/* #define QL_DEBUG_LEVEL_4 */ /* Output NVRAM trace msgs to COM1 */
17/* #define QL_DEBUG_LEVEL_5 */ /* Output ring trace msgs to COM1 */
18/* #define QL_DEBUG_LEVEL_6 */ /* Output WATCHDOG timer trace to COM1 */
19/* #define QL_DEBUG_LEVEL_7 */ /* Output RISC load trace msgs to COM1 */
20/* #define QL_DEBUG_LEVEL_8 */ /* Output ring saturation msgs to COM1 */
21/* #define QL_DEBUG_LEVEL_9 */ /* Output IOCTL trace msgs */
22/* #define QL_DEBUG_LEVEL_10 */ /* Output IOCTL error msgs */
23/* #define QL_DEBUG_LEVEL_11 */ /* Output Mbx Cmd trace msgs */
24/* #define QL_DEBUG_LEVEL_12 */ /* Output IP trace msgs */
25/* #define QL_DEBUG_LEVEL_13 */ /* Output fdmi function trace msgs */
26/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */
27/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */
28/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */
29/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
30/* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */
31
32/*
33* Macros use for debugging the driver.
34*/
35
36#define DEBUG(x) do { if (ql2xextended_error_logging) { x; } } while (0)
37
38#if defined(QL_DEBUG_LEVEL_1)
39#define DEBUG1(x) do {x;} while (0)
40#else
41#define DEBUG1(x) do {} while (0)
42#endif
43
44#define DEBUG2(x) do { if (ql2xextended_error_logging) { x; } } while (0)
45#define DEBUG2_3(x) do { if (ql2xextended_error_logging) { x; } } while (0)
46#define DEBUG2_3_11(x) do { if (ql2xextended_error_logging) { x; } } while (0)
47#define DEBUG2_9_10(x) do { if (ql2xextended_error_logging) { x; } } while (0)
48#define DEBUG2_11(x) do { if (ql2xextended_error_logging) { x; } } while (0)
49#define DEBUG2_13(x) do { if (ql2xextended_error_logging) { x; } } while (0)
50#define DEBUG2_16(x) do { if (ql2xextended_error_logging) { x; } } while (0)
51#define DEBUG2_17(x) do { if (ql2xextended_error_logging) { x; } } while (0)
52
53#if defined(QL_DEBUG_LEVEL_3)
54#define DEBUG3(x) do {x;} while (0)
55#define DEBUG3_11(x) do {x;} while (0)
56#else
57#define DEBUG3(x) do {} while (0)
58#endif
59
60#if defined(QL_DEBUG_LEVEL_4)
61#define DEBUG4(x) do {x;} while (0)
62#else
63#define DEBUG4(x) do {} while (0)
64#endif
65
66#if defined(QL_DEBUG_LEVEL_5)
67#define DEBUG5(x) do {x;} while (0)
68#else
69#define DEBUG5(x) do {} while (0)
70#endif
71
72#if defined(QL_DEBUG_LEVEL_7)
73#define DEBUG7(x) do {x;} while (0)
74#else
75#define DEBUG7(x) do {} while (0)
76#endif
77
78#if defined(QL_DEBUG_LEVEL_9)
79#define DEBUG9(x) do {x;} while (0)
80#define DEBUG9_10(x) do {x;} while (0)
81#else
82#define DEBUG9(x) do {} while (0)
83#endif
84
85#if defined(QL_DEBUG_LEVEL_10)
86#define DEBUG10(x) do {x;} while (0)
87#define DEBUG9_10(x) do {x;} while (0)
88#else
89#define DEBUG10(x) do {} while (0)
90 #if !defined(DEBUG9_10)
91 #define DEBUG9_10(x) do {} while (0)
92 #endif
93#endif
94
95#if defined(QL_DEBUG_LEVEL_11)
96#define DEBUG11(x) do{x;} while(0)
97#if !defined(DEBUG3_11)
98#define DEBUG3_11(x) do{x;} while(0)
99#endif
100#else
101#define DEBUG11(x) do{} while(0)
102 #if !defined(QL_DEBUG_LEVEL_3)
103 #define DEBUG3_11(x) do{} while(0)
104 #endif
105#endif
106
107#if defined(QL_DEBUG_LEVEL_12)
108#define DEBUG12(x) do {x;} while (0)
109#else
110#define DEBUG12(x) do {} while (0)
111#endif
112
113#if defined(QL_DEBUG_LEVEL_13)
114#define DEBUG13(x) do {x;} while (0)
115#else
116#define DEBUG13(x) do {} while (0)
117#endif
118
119#if defined(QL_DEBUG_LEVEL_14)
120#define DEBUG14(x) do {x;} while (0)
121#else
122#define DEBUG14(x) do {} while (0)
123#endif
124
125#if defined(QL_DEBUG_LEVEL_15)
126#define DEBUG15(x) do {x;} while (0)
127#else
128#define DEBUG15(x) do {} while (0)
129#endif
130
131#if defined(QL_DEBUG_LEVEL_16)
132#define DEBUG16(x) do {x;} while (0)
133#else
134#define DEBUG16(x) do {} while (0)
135#endif
136
137#if defined(QL_DEBUG_LEVEL_17)
138#define DEBUG17(x) do {x;} while (0)
139#else
140#define DEBUG17(x) do {} while (0)
141#endif
142
143#if defined(QL_DEBUG_LEVEL_18)
144#define DEBUG18(x) do {if (ql2xextended_error_logging) x; } while (0)
145#else
146#define DEBUG18(x) do {} while (0)
147#endif
148
149
150/*
151 * Firmware Dump structure definition 11 * Firmware Dump structure definition
152 */ 12 */
153 13
@@ -370,3 +230,50 @@ struct qla2xxx_fw_dump {
370 struct qla81xx_fw_dump isp81; 230 struct qla81xx_fw_dump isp81;
371 } isp; 231 } isp;
372}; 232};
233
234#define QL_MSGHDR "qla2xxx"
235
236#define ql_log_fatal 0 /* display fatal errors */
237#define ql_log_warn 1 /* display critical errors */
238#define ql_log_info 2 /* display all recovered errors */
239#define ql_log_all 3 /* This value is only used by ql_errlev.
240 * No messages will use this value.
241 * This should be always highest value
242 * as compared to other log levels.
243 */
244
245extern int ql_errlev;
246
247void
248ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...);
249void
250ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
251
252void
253ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...);
254void
255ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
256
257/* Debug Levels */
258/* The 0x40000000 is the max value any debug level can have
259 * as ql2xextended_error_logging is of type signed int
260 */
261#define ql_dbg_init 0x40000000 /* Init Debug */
262#define ql_dbg_mbx 0x20000000 /* MBX Debug */
263#define ql_dbg_disc 0x10000000 /* Device Discovery Debug */
264#define ql_dbg_io 0x08000000 /* IO Tracing Debug */
265#define ql_dbg_dpc 0x04000000 /* DPC Thead Debug */
266#define ql_dbg_async 0x02000000 /* Async events Debug */
267#define ql_dbg_timer 0x01000000 /* Timer Debug */
268#define ql_dbg_user 0x00800000 /* User Space Interations Debug */
269#define ql_dbg_taskm 0x00400000 /* Task Management Debug */
270#define ql_dbg_aer 0x00200000 /* AER/EEH Debug */
271#define ql_dbg_multiq 0x00100000 /* MultiQ Debug */
272#define ql_dbg_p3p 0x00080000 /* P3P specific Debug */
273#define ql_dbg_vport 0x00040000 /* Virtual Port Debug */
274#define ql_dbg_buffer 0x00020000 /* For dumping the buffer/regs */
275#define ql_dbg_misc 0x00010000 /* For dumping everything that is not
276 * not covered by upper categories
277 */
278
279#define QL_DBG_BUF_LEN 512
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index a5a4e1275bf2..0b4c2b794c6f 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -64,7 +64,7 @@ qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
64 /* Pause tracing to flush FCE buffers. */ 64 /* Pause tracing to flush FCE buffers. */
65 rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd); 65 rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
66 if (rval) 66 if (rval)
67 qla_printk(KERN_WARNING, ha, 67 ql_dbg(ql_dbg_user, vha, 0x705c,
68 "DebugFS: Unable to disable FCE (%d).\n", rval); 68 "DebugFS: Unable to disable FCE (%d).\n", rval);
69 69
70 ha->flags.fce_enabled = 0; 70 ha->flags.fce_enabled = 0;
@@ -92,7 +92,7 @@ qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
92 rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs, 92 rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
93 ha->fce_mb, &ha->fce_bufs); 93 ha->fce_mb, &ha->fce_bufs);
94 if (rval) { 94 if (rval) {
95 qla_printk(KERN_WARNING, ha, 95 ql_dbg(ql_dbg_user, vha, 0x700d,
96 "DebugFS: Unable to reinitialize FCE (%d).\n", rval); 96 "DebugFS: Unable to reinitialize FCE (%d).\n", rval);
97 ha->flags.fce_enabled = 0; 97 ha->flags.fce_enabled = 0;
98 } 98 }
@@ -125,8 +125,8 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
125 atomic_set(&qla2x00_dfs_root_count, 0); 125 atomic_set(&qla2x00_dfs_root_count, 0);
126 qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL); 126 qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
127 if (!qla2x00_dfs_root) { 127 if (!qla2x00_dfs_root) {
128 qla_printk(KERN_NOTICE, ha, 128 ql_log(ql_log_warn, vha, 0x00f7,
129 "DebugFS: Unable to create root directory.\n"); 129 "Unable to create debugfs root directory.\n");
130 goto out; 130 goto out;
131 } 131 }
132 132
@@ -137,8 +137,8 @@ create_dir:
137 mutex_init(&ha->fce_mutex); 137 mutex_init(&ha->fce_mutex);
138 ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root); 138 ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
139 if (!ha->dfs_dir) { 139 if (!ha->dfs_dir) {
140 qla_printk(KERN_NOTICE, ha, 140 ql_log(ql_log_warn, vha, 0x00f8,
141 "DebugFS: Unable to create ha directory.\n"); 141 "Unable to create debugfs ha directory.\n");
142 goto out; 142 goto out;
143 } 143 }
144 144
@@ -148,8 +148,8 @@ create_nodes:
148 ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha, 148 ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
149 &dfs_fce_ops); 149 &dfs_fce_ops);
150 if (!ha->dfs_fce) { 150 if (!ha->dfs_fce) {
151 qla_printk(KERN_NOTICE, ha, 151 ql_log(ql_log_warn, vha, 0x00f9,
152 "DebugFS: Unable to fce node.\n"); 152 "Unable to create debugfs fce node.\n");
153 goto out; 153 goto out;
154 } 154 }
155out: 155out:
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 0b381224ae4b..29b1a3e28231 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -185,7 +185,7 @@ extern int qla24xx_start_scsi(srb_t *sp);
185int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, 185int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
186 uint16_t, uint16_t, uint8_t); 186 uint16_t, uint16_t, uint8_t);
187extern int qla2x00_start_sp(srb_t *); 187extern int qla2x00_start_sp(srb_t *);
188extern uint16_t qla24xx_calc_iocbs(uint16_t); 188extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
189extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t); 189extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
190extern int qla24xx_dif_start_scsi(srb_t *); 190extern int qla24xx_dif_start_scsi(srb_t *);
191 191
@@ -439,6 +439,9 @@ extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
439extern void qla2x00_dump_regs(scsi_qla_host_t *); 439extern void qla2x00_dump_regs(scsi_qla_host_t *);
440extern void qla2x00_dump_buffer(uint8_t *, uint32_t); 440extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
441extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t); 441extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
442extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t);
443extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t,
444 uint8_t *, uint32_t);
442 445
443/* 446/*
444 * Global Function Prototypes in qla_gs.c source file. 447 * Global Function Prototypes in qla_gs.c source file.
@@ -478,7 +481,8 @@ extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16
478extern int qla2x00_echo_test(scsi_qla_host_t *, 481extern int qla2x00_echo_test(scsi_qla_host_t *,
479 struct msg_echo_lb *, uint16_t *); 482 struct msg_echo_lb *, uint16_t *);
480extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *); 483extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *);
481extern int qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *, uint8_t); 484extern int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *,
485 struct qla_fcp_prio_cfg *, uint8_t);
482 486
483/* 487/*
484 * Global Function Prototypes in qla_dfs.c source file. 488 * Global Function Prototypes in qla_dfs.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 8cd9066ad906..37937aa3c3b8 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -121,11 +121,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
121 121
122 rval = QLA_FUNCTION_FAILED; 122 rval = QLA_FUNCTION_FAILED;
123 if (ms_pkt->entry_status != 0) { 123 if (ms_pkt->entry_status != 0) {
124 DEBUG2_3(printk(KERN_WARNING "scsi(%ld): %s failed, error status " 124 ql_dbg(ql_dbg_disc, vha, 0x2031,
125 "(%x) on port_id: %02x%02x%02x.\n", 125 "%s failed, error status (%x) on port_id: %02x%02x%02x.\n",
126 vha->host_no, routine, ms_pkt->entry_status, 126 routine, ms_pkt->entry_status, vha->d_id.b.domain,
127 vha->d_id.b.domain, vha->d_id.b.area, 127 vha->d_id.b.area, vha->d_id.b.al_pa);
128 vha->d_id.b.al_pa));
129 } else { 128 } else {
130 if (IS_FWI2_CAPABLE(ha)) 129 if (IS_FWI2_CAPABLE(ha))
131 comp_status = le16_to_cpu( 130 comp_status = le16_to_cpu(
@@ -138,24 +137,24 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
138 case CS_DATA_OVERRUN: /* Overrun? */ 137 case CS_DATA_OVERRUN: /* Overrun? */
139 if (ct_rsp->header.response != 138 if (ct_rsp->header.response !=
140 __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) { 139 __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
141 DEBUG2_3(printk("scsi(%ld): %s failed, " 140 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
142 "rejected request on port_id: %02x%02x%02x\n", 141 "%s failed rejected request on port_id: "
143 vha->host_no, routine, 142 "%02x%02x%02x.\n", routine,
144 vha->d_id.b.domain, vha->d_id.b.area, 143 vha->d_id.b.domain, vha->d_id.b.area,
145 vha->d_id.b.al_pa)); 144 vha->d_id.b.al_pa);
146 DEBUG2_3(qla2x00_dump_buffer( 145 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
147 (uint8_t *)&ct_rsp->header, 146 0x2078, (uint8_t *)&ct_rsp->header,
148 sizeof(struct ct_rsp_hdr))); 147 sizeof(struct ct_rsp_hdr));
149 rval = QLA_INVALID_COMMAND; 148 rval = QLA_INVALID_COMMAND;
150 } else 149 } else
151 rval = QLA_SUCCESS; 150 rval = QLA_SUCCESS;
152 break; 151 break;
153 default: 152 default:
154 DEBUG2_3(printk("scsi(%ld): %s failed, completion " 153 ql_dbg(ql_dbg_disc, vha, 0x2033,
155 "status (%x) on port_id: %02x%02x%02x.\n", 154 "%s failed, completion status (%x) on port_id: "
156 vha->host_no, routine, comp_status, 155 "%02x%02x%02x.\n", routine, comp_status,
157 vha->d_id.b.domain, vha->d_id.b.area, 156 vha->d_id.b.domain, vha->d_id.b.area,
158 vha->d_id.b.al_pa)); 157 vha->d_id.b.al_pa);
159 break; 158 break;
160 } 159 }
161 } 160 }
@@ -202,8 +201,8 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
202 sizeof(ms_iocb_entry_t)); 201 sizeof(ms_iocb_entry_t));
203 if (rval != QLA_SUCCESS) { 202 if (rval != QLA_SUCCESS) {
204 /*EMPTY*/ 203 /*EMPTY*/
205 DEBUG2_3(printk("scsi(%ld): GA_NXT issue IOCB failed (%d).\n", 204 ql_dbg(ql_dbg_disc, vha, 0x2062,
206 vha->host_no, rval)); 205 "GA_NXT issue IOCB failed (%d).\n", rval);
207 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") != 206 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
208 QLA_SUCCESS) { 207 QLA_SUCCESS) {
209 rval = QLA_FUNCTION_FAILED; 208 rval = QLA_FUNCTION_FAILED;
@@ -222,11 +221,10 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
222 ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE) 221 ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
223 fcport->d_id.b.domain = 0xf0; 222 fcport->d_id.b.domain = 0xf0;
224 223
225 DEBUG2_3(printk("scsi(%ld): GA_NXT entry - " 224 ql_dbg(ql_dbg_disc, vha, 0x2063,
226 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 225 "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
227 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 226 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
228 "portid=%02x%02x%02x.\n", 227 "port_id=%02x%02x%02x.\n",
229 vha->host_no,
230 fcport->node_name[0], fcport->node_name[1], 228 fcport->node_name[0], fcport->node_name[1],
231 fcport->node_name[2], fcport->node_name[3], 229 fcport->node_name[2], fcport->node_name[3],
232 fcport->node_name[4], fcport->node_name[5], 230 fcport->node_name[4], fcport->node_name[5],
@@ -236,7 +234,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
236 fcport->port_name[4], fcport->port_name[5], 234 fcport->port_name[4], fcport->port_name[5],
237 fcport->port_name[6], fcport->port_name[7], 235 fcport->port_name[6], fcport->port_name[7],
238 fcport->d_id.b.domain, fcport->d_id.b.area, 236 fcport->d_id.b.domain, fcport->d_id.b.area,
239 fcport->d_id.b.al_pa)); 237 fcport->d_id.b.al_pa);
240 } 238 }
241 239
242 return (rval); 240 return (rval);
@@ -287,8 +285,8 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
287 sizeof(ms_iocb_entry_t)); 285 sizeof(ms_iocb_entry_t));
288 if (rval != QLA_SUCCESS) { 286 if (rval != QLA_SUCCESS) {
289 /*EMPTY*/ 287 /*EMPTY*/
290 DEBUG2_3(printk("scsi(%ld): GID_PT issue IOCB failed (%d).\n", 288 ql_dbg(ql_dbg_disc, vha, 0x2055,
291 vha->host_no, rval)); 289 "GID_PT issue IOCB failed (%d).\n", rval);
292 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") != 290 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
293 QLA_SUCCESS) { 291 QLA_SUCCESS) {
294 rval = QLA_FUNCTION_FAILED; 292 rval = QLA_FUNCTION_FAILED;
@@ -364,8 +362,8 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
364 sizeof(ms_iocb_entry_t)); 362 sizeof(ms_iocb_entry_t));
365 if (rval != QLA_SUCCESS) { 363 if (rval != QLA_SUCCESS) {
366 /*EMPTY*/ 364 /*EMPTY*/
367 DEBUG2_3(printk("scsi(%ld): GPN_ID issue IOCB failed " 365 ql_dbg(ql_dbg_disc, vha, 0x2056,
368 "(%d).\n", vha->host_no, rval)); 366 "GPN_ID issue IOCB failed (%d).\n", rval);
369 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 367 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
370 "GPN_ID") != QLA_SUCCESS) { 368 "GPN_ID") != QLA_SUCCESS) {
371 rval = QLA_FUNCTION_FAILED; 369 rval = QLA_FUNCTION_FAILED;
@@ -424,8 +422,8 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
424 sizeof(ms_iocb_entry_t)); 422 sizeof(ms_iocb_entry_t));
425 if (rval != QLA_SUCCESS) { 423 if (rval != QLA_SUCCESS) {
426 /*EMPTY*/ 424 /*EMPTY*/
427 DEBUG2_3(printk("scsi(%ld): GNN_ID issue IOCB failed " 425 ql_dbg(ql_dbg_disc, vha, 0x2057,
428 "(%d).\n", vha->host_no, rval)); 426 "GNN_ID issue IOCB failed (%d).\n", rval);
429 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 427 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
430 "GNN_ID") != QLA_SUCCESS) { 428 "GNN_ID") != QLA_SUCCESS) {
431 rval = QLA_FUNCTION_FAILED; 429 rval = QLA_FUNCTION_FAILED;
@@ -434,11 +432,10 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
434 memcpy(list[i].node_name, 432 memcpy(list[i].node_name,
435 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE); 433 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
436 434
437 DEBUG2_3(printk("scsi(%ld): GID_PT entry - " 435 ql_dbg(ql_dbg_disc, vha, 0x2058,
438 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 436 "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02X%02x "
439 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 437 "pn %02x%02x%02x%02x%02x%02x%02X%02x "
440 "portid=%02x%02x%02x.\n", 438 "portid=%02x%02x%02x.\n",
441 vha->host_no,
442 list[i].node_name[0], list[i].node_name[1], 439 list[i].node_name[0], list[i].node_name[1],
443 list[i].node_name[2], list[i].node_name[3], 440 list[i].node_name[2], list[i].node_name[3],
444 list[i].node_name[4], list[i].node_name[5], 441 list[i].node_name[4], list[i].node_name[5],
@@ -448,7 +445,7 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
448 list[i].port_name[4], list[i].port_name[5], 445 list[i].port_name[4], list[i].port_name[5],
449 list[i].port_name[6], list[i].port_name[7], 446 list[i].port_name[6], list[i].port_name[7],
450 list[i].d_id.b.domain, list[i].d_id.b.area, 447 list[i].d_id.b.domain, list[i].d_id.b.area,
451 list[i].d_id.b.al_pa)); 448 list[i].d_id.b.al_pa);
452 } 449 }
453 450
454 /* Last device exit. */ 451 /* Last device exit. */
@@ -499,14 +496,14 @@ qla2x00_rft_id(scsi_qla_host_t *vha)
499 sizeof(ms_iocb_entry_t)); 496 sizeof(ms_iocb_entry_t));
500 if (rval != QLA_SUCCESS) { 497 if (rval != QLA_SUCCESS) {
501 /*EMPTY*/ 498 /*EMPTY*/
502 DEBUG2_3(printk("scsi(%ld): RFT_ID issue IOCB failed (%d).\n", 499 ql_dbg(ql_dbg_disc, vha, 0x2043,
503 vha->host_no, rval)); 500 "RFT_ID issue IOCB failed (%d).\n", rval);
504 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") != 501 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") !=
505 QLA_SUCCESS) { 502 QLA_SUCCESS) {
506 rval = QLA_FUNCTION_FAILED; 503 rval = QLA_FUNCTION_FAILED;
507 } else { 504 } else {
508 DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n", 505 ql_dbg(ql_dbg_disc, vha, 0x2044,
509 vha->host_no)); 506 "RFT_ID exiting normally.\n");
510 } 507 }
511 508
512 return (rval); 509 return (rval);
@@ -528,8 +525,8 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
528 struct ct_sns_rsp *ct_rsp; 525 struct ct_sns_rsp *ct_rsp;
529 526
530 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 527 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
531 DEBUG2(printk("scsi(%ld): RFF_ID call unsupported on " 528 ql_dbg(ql_dbg_disc, vha, 0x2046,
532 "ISP2100/ISP2200.\n", vha->host_no)); 529 "RFF_ID call not supported on ISP2100/ISP2200.\n");
533 return (QLA_SUCCESS); 530 return (QLA_SUCCESS);
534 } 531 }
535 532
@@ -556,14 +553,14 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
556 sizeof(ms_iocb_entry_t)); 553 sizeof(ms_iocb_entry_t));
557 if (rval != QLA_SUCCESS) { 554 if (rval != QLA_SUCCESS) {
558 /*EMPTY*/ 555 /*EMPTY*/
559 DEBUG2_3(printk("scsi(%ld): RFF_ID issue IOCB failed (%d).\n", 556 ql_dbg(ql_dbg_disc, vha, 0x2047,
560 vha->host_no, rval)); 557 "RFF_ID issue IOCB failed (%d).\n", rval);
561 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") != 558 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") !=
562 QLA_SUCCESS) { 559 QLA_SUCCESS) {
563 rval = QLA_FUNCTION_FAILED; 560 rval = QLA_FUNCTION_FAILED;
564 } else { 561 } else {
565 DEBUG2(printk("scsi(%ld): RFF_ID exiting normally.\n", 562 ql_dbg(ql_dbg_disc, vha, 0x2048,
566 vha->host_no)); 563 "RFF_ID exiting normally.\n");
567 } 564 }
568 565
569 return (rval); 566 return (rval);
@@ -609,14 +606,14 @@ qla2x00_rnn_id(scsi_qla_host_t *vha)
609 sizeof(ms_iocb_entry_t)); 606 sizeof(ms_iocb_entry_t));
610 if (rval != QLA_SUCCESS) { 607 if (rval != QLA_SUCCESS) {
611 /*EMPTY*/ 608 /*EMPTY*/
612 DEBUG2_3(printk("scsi(%ld): RNN_ID issue IOCB failed (%d).\n", 609 ql_dbg(ql_dbg_disc, vha, 0x204d,
613 vha->host_no, rval)); 610 "RNN_ID issue IOCB failed (%d).\n", rval);
614 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") != 611 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") !=
615 QLA_SUCCESS) { 612 QLA_SUCCESS) {
616 rval = QLA_FUNCTION_FAILED; 613 rval = QLA_FUNCTION_FAILED;
617 } else { 614 } else {
618 DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n", 615 ql_dbg(ql_dbg_disc, vha, 0x204e,
619 vha->host_no)); 616 "RNN_ID exiting normally.\n");
620 } 617 }
621 618
622 return (rval); 619 return (rval);
@@ -647,8 +644,8 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
647 struct ct_sns_rsp *ct_rsp; 644 struct ct_sns_rsp *ct_rsp;
648 645
649 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 646 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
650 DEBUG2(printk("scsi(%ld): RSNN_ID call unsupported on " 647 ql_dbg(ql_dbg_disc, vha, 0x2050,
651 "ISP2100/ISP2200.\n", vha->host_no)); 648 "RSNN_ID call unsupported on ISP2100/ISP2200.\n");
652 return (QLA_SUCCESS); 649 return (QLA_SUCCESS);
653 } 650 }
654 651
@@ -682,14 +679,14 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
682 sizeof(ms_iocb_entry_t)); 679 sizeof(ms_iocb_entry_t));
683 if (rval != QLA_SUCCESS) { 680 if (rval != QLA_SUCCESS) {
684 /*EMPTY*/ 681 /*EMPTY*/
685 DEBUG2_3(printk("scsi(%ld): RSNN_NN issue IOCB failed (%d).\n", 682 ql_dbg(ql_dbg_disc, vha, 0x2051,
686 vha->host_no, rval)); 683 "RSNN_NN issue IOCB failed (%d).\n", rval);
687 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") != 684 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") !=
688 QLA_SUCCESS) { 685 QLA_SUCCESS) {
689 rval = QLA_FUNCTION_FAILED; 686 rval = QLA_FUNCTION_FAILED;
690 } else { 687 } else {
691 DEBUG2(printk("scsi(%ld): RSNN_NN exiting normally.\n", 688 ql_dbg(ql_dbg_disc, vha, 0x2052,
692 vha->host_no)); 689 "RSNN_NN exiting normally.\n");
693 } 690 }
694 691
695 return (rval); 692 return (rval);
@@ -757,13 +754,14 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
757 sizeof(struct sns_cmd_pkt)); 754 sizeof(struct sns_cmd_pkt));
758 if (rval != QLA_SUCCESS) { 755 if (rval != QLA_SUCCESS) {
759 /*EMPTY*/ 756 /*EMPTY*/
760 DEBUG2_3(printk("scsi(%ld): GA_NXT Send SNS failed (%d).\n", 757 ql_dbg(ql_dbg_disc, vha, 0x205f,
761 vha->host_no, rval)); 758 "GA_NXT Send SNS failed (%d).\n", rval);
762 } else if (sns_cmd->p.gan_data[8] != 0x80 || 759 } else if (sns_cmd->p.gan_data[8] != 0x80 ||
763 sns_cmd->p.gan_data[9] != 0x02) { 760 sns_cmd->p.gan_data[9] != 0x02) {
764 DEBUG2_3(printk("scsi(%ld): GA_NXT failed, rejected request, " 761 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207d,
765 "ga_nxt_rsp:\n", vha->host_no)); 762 "GA_NXT failed, rejected request ga_nxt_rsp:\n");
766 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gan_data, 16)); 763 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
764 sns_cmd->p.gan_data, 16);
767 rval = QLA_FUNCTION_FAILED; 765 rval = QLA_FUNCTION_FAILED;
768 } else { 766 } else {
769 /* Populate fc_port_t entry. */ 767 /* Populate fc_port_t entry. */
@@ -778,11 +776,10 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
778 sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE) 776 sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
779 fcport->d_id.b.domain = 0xf0; 777 fcport->d_id.b.domain = 0xf0;
780 778
781 DEBUG2_3(printk("scsi(%ld): GA_NXT entry - " 779 ql_dbg(ql_dbg_disc, vha, 0x2061,
782 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 780 "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
783 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 781 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
784 "portid=%02x%02x%02x.\n", 782 "port_id=%02x%02x%02x.\n",
785 vha->host_no,
786 fcport->node_name[0], fcport->node_name[1], 783 fcport->node_name[0], fcport->node_name[1],
787 fcport->node_name[2], fcport->node_name[3], 784 fcport->node_name[2], fcport->node_name[3],
788 fcport->node_name[4], fcport->node_name[5], 785 fcport->node_name[4], fcport->node_name[5],
@@ -792,7 +789,7 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
792 fcport->port_name[4], fcport->port_name[5], 789 fcport->port_name[4], fcport->port_name[5],
793 fcport->port_name[6], fcport->port_name[7], 790 fcport->port_name[6], fcport->port_name[7],
794 fcport->d_id.b.domain, fcport->d_id.b.area, 791 fcport->d_id.b.domain, fcport->d_id.b.area,
795 fcport->d_id.b.al_pa)); 792 fcport->d_id.b.al_pa);
796 } 793 }
797 794
798 return (rval); 795 return (rval);
@@ -831,13 +828,14 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
831 sizeof(struct sns_cmd_pkt)); 828 sizeof(struct sns_cmd_pkt));
832 if (rval != QLA_SUCCESS) { 829 if (rval != QLA_SUCCESS) {
833 /*EMPTY*/ 830 /*EMPTY*/
834 DEBUG2_3(printk("scsi(%ld): GID_PT Send SNS failed (%d).\n", 831 ql_dbg(ql_dbg_disc, vha, 0x206d,
835 vha->host_no, rval)); 832 "GID_PT Send SNS failed (%d).\n", rval);
836 } else if (sns_cmd->p.gid_data[8] != 0x80 || 833 } else if (sns_cmd->p.gid_data[8] != 0x80 ||
837 sns_cmd->p.gid_data[9] != 0x02) { 834 sns_cmd->p.gid_data[9] != 0x02) {
838 DEBUG2_3(printk("scsi(%ld): GID_PT failed, rejected request, " 835 ql_dbg(ql_dbg_disc, vha, 0x202f,
839 "gid_rsp:\n", vha->host_no)); 836 "GID_PT failed, rejected request, gid_rsp:\n");
840 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gid_data, 16)); 837 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081,
838 sns_cmd->p.gid_data, 16);
841 rval = QLA_FUNCTION_FAILED; 839 rval = QLA_FUNCTION_FAILED;
842 } else { 840 } else {
843 /* Set port IDs in switch info list. */ 841 /* Set port IDs in switch info list. */
@@ -900,13 +898,14 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
900 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); 898 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
901 if (rval != QLA_SUCCESS) { 899 if (rval != QLA_SUCCESS) {
902 /*EMPTY*/ 900 /*EMPTY*/
903 DEBUG2_3(printk("scsi(%ld): GPN_ID Send SNS failed " 901 ql_dbg(ql_dbg_disc, vha, 0x2032,
904 "(%d).\n", vha->host_no, rval)); 902 "GPN_ID Send SNS failed (%d).\n", rval);
905 } else if (sns_cmd->p.gpn_data[8] != 0x80 || 903 } else if (sns_cmd->p.gpn_data[8] != 0x80 ||
906 sns_cmd->p.gpn_data[9] != 0x02) { 904 sns_cmd->p.gpn_data[9] != 0x02) {
907 DEBUG2_3(printk("scsi(%ld): GPN_ID failed, rejected " 905 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
908 "request, gpn_rsp:\n", vha->host_no)); 906 "GPN_ID failed, rejected request, gpn_rsp:\n");
909 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gpn_data, 16)); 907 ql_dump_buffer(ql_dbg_disc, vha, 0x207f,
908 sns_cmd->p.gpn_data, 16);
910 rval = QLA_FUNCTION_FAILED; 909 rval = QLA_FUNCTION_FAILED;
911 } else { 910 } else {
912 /* Save portname */ 911 /* Save portname */
@@ -955,24 +954,24 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
955 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); 954 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
956 if (rval != QLA_SUCCESS) { 955 if (rval != QLA_SUCCESS) {
957 /*EMPTY*/ 956 /*EMPTY*/
958 DEBUG2_3(printk("scsi(%ld): GNN_ID Send SNS failed " 957 ql_dbg(ql_dbg_disc, vha, 0x203f,
959 "(%d).\n", vha->host_no, rval)); 958 "GNN_ID Send SNS failed (%d).\n", rval);
960 } else if (sns_cmd->p.gnn_data[8] != 0x80 || 959 } else if (sns_cmd->p.gnn_data[8] != 0x80 ||
961 sns_cmd->p.gnn_data[9] != 0x02) { 960 sns_cmd->p.gnn_data[9] != 0x02) {
962 DEBUG2_3(printk("scsi(%ld): GNN_ID failed, rejected " 961 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082,
963 "request, gnn_rsp:\n", vha->host_no)); 962 "GNN_ID failed, rejected request, gnn_rsp:\n");
964 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gnn_data, 16)); 963 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a,
964 sns_cmd->p.gnn_data, 16);
965 rval = QLA_FUNCTION_FAILED; 965 rval = QLA_FUNCTION_FAILED;
966 } else { 966 } else {
967 /* Save nodename */ 967 /* Save nodename */
968 memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16], 968 memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
969 WWN_SIZE); 969 WWN_SIZE);
970 970
971 DEBUG2_3(printk("scsi(%ld): GID_PT entry - " 971 ql_dbg(ql_dbg_disc, vha, 0x206e,
972 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 972 "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
973 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 973 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
974 "portid=%02x%02x%02x.\n", 974 "port_id=%02x%02x%02x.\n",
975 vha->host_no,
976 list[i].node_name[0], list[i].node_name[1], 975 list[i].node_name[0], list[i].node_name[1],
977 list[i].node_name[2], list[i].node_name[3], 976 list[i].node_name[2], list[i].node_name[3],
978 list[i].node_name[4], list[i].node_name[5], 977 list[i].node_name[4], list[i].node_name[5],
@@ -982,7 +981,7 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
982 list[i].port_name[4], list[i].port_name[5], 981 list[i].port_name[4], list[i].port_name[5],
983 list[i].port_name[6], list[i].port_name[7], 982 list[i].port_name[6], list[i].port_name[7],
984 list[i].d_id.b.domain, list[i].d_id.b.area, 983 list[i].d_id.b.domain, list[i].d_id.b.area,
985 list[i].d_id.b.al_pa)); 984 list[i].d_id.b.al_pa);
986 } 985 }
987 986
988 /* Last device exit. */ 987 /* Last device exit. */
@@ -1025,17 +1024,18 @@ qla2x00_sns_rft_id(scsi_qla_host_t *vha)
1025 sizeof(struct sns_cmd_pkt)); 1024 sizeof(struct sns_cmd_pkt));
1026 if (rval != QLA_SUCCESS) { 1025 if (rval != QLA_SUCCESS) {
1027 /*EMPTY*/ 1026 /*EMPTY*/
1028 DEBUG2_3(printk("scsi(%ld): RFT_ID Send SNS failed (%d).\n", 1027 ql_dbg(ql_dbg_disc, vha, 0x2060,
1029 vha->host_no, rval)); 1028 "RFT_ID Send SNS failed (%d).\n", rval);
1030 } else if (sns_cmd->p.rft_data[8] != 0x80 || 1029 } else if (sns_cmd->p.rft_data[8] != 0x80 ||
1031 sns_cmd->p.rft_data[9] != 0x02) { 1030 sns_cmd->p.rft_data[9] != 0x02) {
1032 DEBUG2_3(printk("scsi(%ld): RFT_ID failed, rejected request, " 1031 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083,
1033 "rft_rsp:\n", vha->host_no)); 1032 "RFT_ID failed, rejected request rft_rsp:\n");
1034 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rft_data, 16)); 1033 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080,
1034 sns_cmd->p.rft_data, 16);
1035 rval = QLA_FUNCTION_FAILED; 1035 rval = QLA_FUNCTION_FAILED;
1036 } else { 1036 } else {
1037 DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n", 1037 ql_dbg(ql_dbg_disc, vha, 0x2073,
1038 vha->host_no)); 1038 "RFT_ID exiting normally.\n");
1039 } 1039 }
1040 1040
1041 return (rval); 1041 return (rval);
@@ -1081,17 +1081,18 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
1081 sizeof(struct sns_cmd_pkt)); 1081 sizeof(struct sns_cmd_pkt));
1082 if (rval != QLA_SUCCESS) { 1082 if (rval != QLA_SUCCESS) {
1083 /*EMPTY*/ 1083 /*EMPTY*/
1084 DEBUG2_3(printk("scsi(%ld): RNN_ID Send SNS failed (%d).\n", 1084 ql_dbg(ql_dbg_disc, vha, 0x204a,
1085 vha->host_no, rval)); 1085 "RNN_ID Send SNS failed (%d).\n", rval);
1086 } else if (sns_cmd->p.rnn_data[8] != 0x80 || 1086 } else if (sns_cmd->p.rnn_data[8] != 0x80 ||
1087 sns_cmd->p.rnn_data[9] != 0x02) { 1087 sns_cmd->p.rnn_data[9] != 0x02) {
1088 DEBUG2_3(printk("scsi(%ld): RNN_ID failed, rejected request, " 1088 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b,
1089 "rnn_rsp:\n", vha->host_no)); 1089 "RNN_ID failed, rejected request, rnn_rsp:\n");
1090 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rnn_data, 16)); 1090 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c,
1091 sns_cmd->p.rnn_data, 16);
1091 rval = QLA_FUNCTION_FAILED; 1092 rval = QLA_FUNCTION_FAILED;
1092 } else { 1093 } else {
1093 DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n", 1094 ql_dbg(ql_dbg_disc, vha, 0x204c,
1094 vha->host_no)); 1095 "RNN_ID exiting normally.\n");
1095 } 1096 }
1096 1097
1097 return (rval); 1098 return (rval);
@@ -1116,10 +1117,10 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1116 ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa, 1117 ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
1117 mb, BIT_1|BIT_0); 1118 mb, BIT_1|BIT_0);
1118 if (mb[0] != MBS_COMMAND_COMPLETE) { 1119 if (mb[0] != MBS_COMMAND_COMPLETE) {
1119 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: " 1120 ql_dbg(ql_dbg_disc, vha, 0x2024,
1120 "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n", 1121 "Failed management_server login: loopid=%x mb[0]=%x "
1121 __func__, vha->host_no, vha->mgmt_svr_loop_id, mb[0], mb[1], 1122 "mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
1122 mb[2], mb[6], mb[7])); 1123 vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6], mb[7]);
1123 ret = QLA_FUNCTION_FAILED; 1124 ret = QLA_FUNCTION_FAILED;
1124 } else 1125 } else
1125 vha->flags.management_server_logged_in = 1; 1126 vha->flags.management_server_logged_in = 1;
@@ -1292,11 +1293,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1292 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE); 1293 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1293 size += 4 + WWN_SIZE; 1294 size += 4 + WWN_SIZE;
1294 1295
1295 DEBUG13(printk("%s(%ld): NODENAME=%02x%02x%02x%02x%02x%02x%02x%02x.\n", 1296 ql_dbg(ql_dbg_disc, vha, 0x2025,
1296 __func__, vha->host_no, 1297 "NodeName = %02x%02x%02x%02x%02x%02x%02x%02x.\n",
1297 eiter->a.node_name[0], eiter->a.node_name[1], eiter->a.node_name[2], 1298 eiter->a.node_name[0], eiter->a.node_name[1],
1298 eiter->a.node_name[3], eiter->a.node_name[4], eiter->a.node_name[5], 1299 eiter->a.node_name[2], eiter->a.node_name[3],
1299 eiter->a.node_name[6], eiter->a.node_name[7])); 1300 eiter->a.node_name[4], eiter->a.node_name[5],
1301 eiter->a.node_name[6], eiter->a.node_name[7]);
1300 1302
1301 /* Manufacturer. */ 1303 /* Manufacturer. */
1302 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1304 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1307,8 +1309,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1307 eiter->len = cpu_to_be16(4 + alen); 1309 eiter->len = cpu_to_be16(4 + alen);
1308 size += 4 + alen; 1310 size += 4 + alen;
1309 1311
1310 DEBUG13(printk("%s(%ld): MANUFACTURER=%s.\n", __func__, vha->host_no, 1312 ql_dbg(ql_dbg_disc, vha, 0x2026,
1311 eiter->a.manufacturer)); 1313 "Manufacturer = %s.\n", eiter->a.manufacturer);
1312 1314
1313 /* Serial number. */ 1315 /* Serial number. */
1314 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1316 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1320,8 +1322,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1320 eiter->len = cpu_to_be16(4 + alen); 1322 eiter->len = cpu_to_be16(4 + alen);
1321 size += 4 + alen; 1323 size += 4 + alen;
1322 1324
1323 DEBUG13(printk("%s(%ld): SERIALNO=%s.\n", __func__, vha->host_no, 1325 ql_dbg(ql_dbg_disc, vha, 0x2027,
1324 eiter->a.serial_num)); 1326 "Serial no. = %s.\n", eiter->a.serial_num);
1325 1327
1326 /* Model name. */ 1328 /* Model name. */
1327 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1329 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1332,8 +1334,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1332 eiter->len = cpu_to_be16(4 + alen); 1334 eiter->len = cpu_to_be16(4 + alen);
1333 size += 4 + alen; 1335 size += 4 + alen;
1334 1336
1335 DEBUG13(printk("%s(%ld): MODEL_NAME=%s.\n", __func__, vha->host_no, 1337 ql_dbg(ql_dbg_disc, vha, 0x2028,
1336 eiter->a.model)); 1338 "Model Name = %s.\n", eiter->a.model);
1337 1339
1338 /* Model description. */ 1340 /* Model description. */
1339 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1341 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1345,8 +1347,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1345 eiter->len = cpu_to_be16(4 + alen); 1347 eiter->len = cpu_to_be16(4 + alen);
1346 size += 4 + alen; 1348 size += 4 + alen;
1347 1349
1348 DEBUG13(printk("%s(%ld): MODEL_DESC=%s.\n", __func__, vha->host_no, 1350 ql_dbg(ql_dbg_disc, vha, 0x2029,
1349 eiter->a.model_desc)); 1351 "Model Desc = %s.\n", eiter->a.model_desc);
1350 1352
1351 /* Hardware version. */ 1353 /* Hardware version. */
1352 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1354 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1357,8 +1359,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1357 eiter->len = cpu_to_be16(4 + alen); 1359 eiter->len = cpu_to_be16(4 + alen);
1358 size += 4 + alen; 1360 size += 4 + alen;
1359 1361
1360 DEBUG13(printk("%s(%ld): HARDWAREVER=%s.\n", __func__, vha->host_no, 1362 ql_dbg(ql_dbg_disc, vha, 0x202a,
1361 eiter->a.hw_version)); 1363 "Hardware ver = %s.\n", eiter->a.hw_version);
1362 1364
1363 /* Driver version. */ 1365 /* Driver version. */
1364 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1366 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1369,8 +1371,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1369 eiter->len = cpu_to_be16(4 + alen); 1371 eiter->len = cpu_to_be16(4 + alen);
1370 size += 4 + alen; 1372 size += 4 + alen;
1371 1373
1372 DEBUG13(printk("%s(%ld): DRIVERVER=%s.\n", __func__, vha->host_no, 1374 ql_dbg(ql_dbg_disc, vha, 0x202b,
1373 eiter->a.driver_version)); 1375 "Driver ver = %s.\n", eiter->a.driver_version);
1374 1376
1375 /* Option ROM version. */ 1377 /* Option ROM version. */
1376 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1378 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1381,8 +1383,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1381 eiter->len = cpu_to_be16(4 + alen); 1383 eiter->len = cpu_to_be16(4 + alen);
1382 size += 4 + alen; 1384 size += 4 + alen;
1383 1385
1384 DEBUG13(printk("%s(%ld): OPTROMVER=%s.\n", __func__, vha->host_no, 1386 ql_dbg(ql_dbg_disc, vha , 0x202c,
1385 eiter->a.orom_version)); 1387 "Optrom vers = %s.\n", eiter->a.orom_version);
1386 1388
1387 /* Firmware version */ 1389 /* Firmware version */
1388 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1390 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1393,44 +1395,46 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1393 eiter->len = cpu_to_be16(4 + alen); 1395 eiter->len = cpu_to_be16(4 + alen);
1394 size += 4 + alen; 1396 size += 4 + alen;
1395 1397
1396 DEBUG13(printk("%s(%ld): FIRMWAREVER=%s.\n", __func__, vha->host_no, 1398 ql_dbg(ql_dbg_disc, vha, 0x202d,
1397 eiter->a.fw_version)); 1399 "Firmware vers = %s.\n", eiter->a.fw_version);
1398 1400
1399 /* Update MS request size. */ 1401 /* Update MS request size. */
1400 qla2x00_update_ms_fdmi_iocb(vha, size + 16); 1402 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1401 1403
1402 DEBUG13(printk("%s(%ld): RHBA identifier=" 1404 ql_dbg(ql_dbg_disc, vha, 0x202e,
1403 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__, 1405 "RHBA identifier = "
1404 vha->host_no, ct_req->req.rhba.hba_identifier[0], 1406 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n",
1407 ct_req->req.rhba.hba_identifier[0],
1405 ct_req->req.rhba.hba_identifier[1], 1408 ct_req->req.rhba.hba_identifier[1],
1406 ct_req->req.rhba.hba_identifier[2], 1409 ct_req->req.rhba.hba_identifier[2],
1407 ct_req->req.rhba.hba_identifier[3], 1410 ct_req->req.rhba.hba_identifier[3],
1408 ct_req->req.rhba.hba_identifier[4], 1411 ct_req->req.rhba.hba_identifier[4],
1409 ct_req->req.rhba.hba_identifier[5], 1412 ct_req->req.rhba.hba_identifier[5],
1410 ct_req->req.rhba.hba_identifier[6], 1413 ct_req->req.rhba.hba_identifier[6],
1411 ct_req->req.rhba.hba_identifier[7], size)); 1414 ct_req->req.rhba.hba_identifier[7], size);
1412 DEBUG13(qla2x00_dump_buffer(entries, size)); 1415 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
1416 entries, size);
1413 1417
1414 /* Execute MS IOCB */ 1418 /* Execute MS IOCB */
1415 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 1419 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1416 sizeof(ms_iocb_entry_t)); 1420 sizeof(ms_iocb_entry_t));
1417 if (rval != QLA_SUCCESS) { 1421 if (rval != QLA_SUCCESS) {
1418 /*EMPTY*/ 1422 /*EMPTY*/
1419 DEBUG2_3(printk("scsi(%ld): RHBA issue IOCB failed (%d).\n", 1423 ql_dbg(ql_dbg_disc, vha, 0x2030,
1420 vha->host_no, rval)); 1424 "RHBA issue IOCB failed (%d).\n", rval);
1421 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") != 1425 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
1422 QLA_SUCCESS) { 1426 QLA_SUCCESS) {
1423 rval = QLA_FUNCTION_FAILED; 1427 rval = QLA_FUNCTION_FAILED;
1424 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && 1428 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1425 ct_rsp->header.explanation_code == 1429 ct_rsp->header.explanation_code ==
1426 CT_EXPL_ALREADY_REGISTERED) { 1430 CT_EXPL_ALREADY_REGISTERED) {
1427 DEBUG2_13(printk("%s(%ld): HBA already registered.\n", 1431 ql_dbg(ql_dbg_disc, vha, 0x2034,
1428 __func__, vha->host_no)); 1432 "HBA already registered.\n");
1429 rval = QLA_ALREADY_REGISTERED; 1433 rval = QLA_ALREADY_REGISTERED;
1430 } 1434 }
1431 } else { 1435 } else {
1432 DEBUG2(printk("scsi(%ld): RHBA exiting normally.\n", 1436 ql_dbg(ql_dbg_disc, vha, 0x2035,
1433 vha->host_no)); 1437 "RHBA exiting normally.\n");
1434 } 1438 }
1435 1439
1436 return rval; 1440 return rval;
@@ -1464,26 +1468,26 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
1464 /* Prepare FDMI command arguments -- portname. */ 1468 /* Prepare FDMI command arguments -- portname. */
1465 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE); 1469 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
1466 1470
1467 DEBUG13(printk("%s(%ld): DHBA portname=" 1471 ql_dbg(ql_dbg_disc, vha, 0x2036,
1468 "%02x%02x%02x%02x%02x%02x%02x%02x.\n", __func__, vha->host_no, 1472 "DHBA portname = %02x%02x%02x%02x%02x%02x%02x%02x.\n",
1469 ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1], 1473 ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1],
1470 ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3], 1474 ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3],
1471 ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5], 1475 ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5],
1472 ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7])); 1476 ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]);
1473 1477
1474 /* Execute MS IOCB */ 1478 /* Execute MS IOCB */
1475 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 1479 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1476 sizeof(ms_iocb_entry_t)); 1480 sizeof(ms_iocb_entry_t));
1477 if (rval != QLA_SUCCESS) { 1481 if (rval != QLA_SUCCESS) {
1478 /*EMPTY*/ 1482 /*EMPTY*/
1479 DEBUG2_3(printk("scsi(%ld): DHBA issue IOCB failed (%d).\n", 1483 ql_dbg(ql_dbg_disc, vha, 0x2037,
1480 vha->host_no, rval)); 1484 "DHBA issue IOCB failed (%d).\n", rval);
1481 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") != 1485 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
1482 QLA_SUCCESS) { 1486 QLA_SUCCESS) {
1483 rval = QLA_FUNCTION_FAILED; 1487 rval = QLA_FUNCTION_FAILED;
1484 } else { 1488 } else {
1485 DEBUG2(printk("scsi(%ld): DHBA exiting normally.\n", 1489 ql_dbg(ql_dbg_disc, vha, 0x2038,
1486 vha->host_no)); 1490 "DHBA exiting normally.\n");
1487 } 1491 }
1488 1492
1489 return rval; 1493 return rval;
@@ -1534,9 +1538,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1534 eiter->a.fc4_types[2] = 0x01; 1538 eiter->a.fc4_types[2] = 0x01;
1535 size += 4 + 32; 1539 size += 4 + 32;
1536 1540
1537 DEBUG13(printk("%s(%ld): FC4_TYPES=%02x %02x.\n", __func__, 1541 ql_dbg(ql_dbg_disc, vha, 0x2039,
1538 vha->host_no, eiter->a.fc4_types[2], 1542 "FC4_TYPES=%02x %02x.\n",
1539 eiter->a.fc4_types[1])); 1543 eiter->a.fc4_types[2],
1544 eiter->a.fc4_types[1]);
1540 1545
1541 /* Supported speed. */ 1546 /* Supported speed. */
1542 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1547 eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1561,8 +1566,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1561 FDMI_PORT_SPEED_1GB); 1566 FDMI_PORT_SPEED_1GB);
1562 size += 4 + 4; 1567 size += 4 + 4;
1563 1568
1564 DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, vha->host_no, 1569 ql_dbg(ql_dbg_disc, vha, 0x203a,
1565 eiter->a.sup_speed)); 1570 "Supported_Speed=%x.\n", eiter->a.sup_speed);
1566 1571
1567 /* Current speed. */ 1572 /* Current speed. */
1568 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1573 eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1596,8 +1601,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1596 } 1601 }
1597 size += 4 + 4; 1602 size += 4 + 4;
1598 1603
1599 DEBUG13(printk("%s(%ld): CURRENT_SPEED=%x.\n", __func__, vha->host_no, 1604 ql_dbg(ql_dbg_disc, vha, 0x203b,
1600 eiter->a.cur_speed)); 1605 "Current_Speed=%x.\n", eiter->a.cur_speed);
1601 1606
1602 /* Max frame size. */ 1607 /* Max frame size. */
1603 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1608 eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1609,8 +1614,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1609 eiter->a.max_frame_size = cpu_to_be32(max_frame_size); 1614 eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
1610 size += 4 + 4; 1615 size += 4 + 4;
1611 1616
1612 DEBUG13(printk("%s(%ld): MAX_FRAME_SIZE=%x.\n", __func__, vha->host_no, 1617 ql_dbg(ql_dbg_disc, vha, 0x203c,
1613 eiter->a.max_frame_size)); 1618 "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
1614 1619
1615 /* OS device name. */ 1620 /* OS device name. */
1616 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1621 eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1621,8 +1626,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1621 eiter->len = cpu_to_be16(4 + alen); 1626 eiter->len = cpu_to_be16(4 + alen);
1622 size += 4 + alen; 1627 size += 4 + alen;
1623 1628
1624 DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, vha->host_no, 1629 ql_dbg(ql_dbg_disc, vha, 0x204b,
1625 eiter->a.os_dev_name)); 1630 "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
1626 1631
1627 /* Hostname. */ 1632 /* Hostname. */
1628 if (strlen(fc_host_system_hostname(vha->host))) { 1633 if (strlen(fc_host_system_hostname(vha->host))) {
@@ -1637,35 +1642,36 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1637 eiter->len = cpu_to_be16(4 + alen); 1642 eiter->len = cpu_to_be16(4 + alen);
1638 size += 4 + alen; 1643 size += 4 + alen;
1639 1644
1640 DEBUG13(printk("%s(%ld): HOSTNAME=%s.\n", __func__, 1645 ql_dbg(ql_dbg_disc, vha, 0x203d,
1641 vha->host_no, eiter->a.host_name)); 1646 "HostName=%s.\n", eiter->a.host_name);
1642 } 1647 }
1643 1648
1644 /* Update MS request size. */ 1649 /* Update MS request size. */
1645 qla2x00_update_ms_fdmi_iocb(vha, size + 16); 1650 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1646 1651
1647 DEBUG13(printk("%s(%ld): RPA portname=" 1652 ql_dbg(ql_dbg_disc, vha, 0x203e,
1648 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__, 1653 "RPA portname= %02x%02x%02x%02x%02X%02x%02x%02x size=%d.\n",
1649 vha->host_no, ct_req->req.rpa.port_name[0], 1654 ct_req->req.rpa.port_name[0], ct_req->req.rpa.port_name[1],
1650 ct_req->req.rpa.port_name[1], ct_req->req.rpa.port_name[2], 1655 ct_req->req.rpa.port_name[2], ct_req->req.rpa.port_name[3],
1651 ct_req->req.rpa.port_name[3], ct_req->req.rpa.port_name[4], 1656 ct_req->req.rpa.port_name[4], ct_req->req.rpa.port_name[5],
1652 ct_req->req.rpa.port_name[5], ct_req->req.rpa.port_name[6], 1657 ct_req->req.rpa.port_name[6], ct_req->req.rpa.port_name[7],
1653 ct_req->req.rpa.port_name[7], size)); 1658 size);
1654 DEBUG13(qla2x00_dump_buffer(entries, size)); 1659 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
1660 entries, size);
1655 1661
1656 /* Execute MS IOCB */ 1662 /* Execute MS IOCB */
1657 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 1663 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1658 sizeof(ms_iocb_entry_t)); 1664 sizeof(ms_iocb_entry_t));
1659 if (rval != QLA_SUCCESS) { 1665 if (rval != QLA_SUCCESS) {
1660 /*EMPTY*/ 1666 /*EMPTY*/
1661 DEBUG2_3(printk("scsi(%ld): RPA issue IOCB failed (%d).\n", 1667 ql_dbg(ql_dbg_disc, vha, 0x2040,
1662 vha->host_no, rval)); 1668 "RPA issue IOCB failed (%d).\n", rval);
1663 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") != 1669 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
1664 QLA_SUCCESS) { 1670 QLA_SUCCESS) {
1665 rval = QLA_FUNCTION_FAILED; 1671 rval = QLA_FUNCTION_FAILED;
1666 } else { 1672 } else {
1667 DEBUG2(printk("scsi(%ld): RPA exiting normally.\n", 1673 ql_dbg(ql_dbg_disc, vha, 0x2041,
1668 vha->host_no)); 1674 "RPA exiting nornally.\n");
1669 } 1675 }
1670 1676
1671 return rval; 1677 return rval;
@@ -1749,8 +1755,8 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
1749 sizeof(ms_iocb_entry_t)); 1755 sizeof(ms_iocb_entry_t));
1750 if (rval != QLA_SUCCESS) { 1756 if (rval != QLA_SUCCESS) {
1751 /*EMPTY*/ 1757 /*EMPTY*/
1752 DEBUG2_3(printk("scsi(%ld): GFPN_ID issue IOCB " 1758 ql_dbg(ql_dbg_disc, vha, 0x2023,
1753 "failed (%d).\n", vha->host_no, rval)); 1759 "GFPN_ID issue IOCB failed (%d).\n", rval);
1754 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 1760 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
1755 "GFPN_ID") != QLA_SUCCESS) { 1761 "GFPN_ID") != QLA_SUCCESS) {
1756 rval = QLA_FUNCTION_FAILED; 1762 rval = QLA_FUNCTION_FAILED;
@@ -1860,8 +1866,8 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1860 sizeof(ms_iocb_entry_t)); 1866 sizeof(ms_iocb_entry_t));
1861 if (rval != QLA_SUCCESS) { 1867 if (rval != QLA_SUCCESS) {
1862 /*EMPTY*/ 1868 /*EMPTY*/
1863 DEBUG2_3(printk("scsi(%ld): GPSC issue IOCB " 1869 ql_dbg(ql_dbg_disc, vha, 0x2059,
1864 "failed (%d).\n", vha->host_no, rval)); 1870 "GPSC issue IOCB failed (%d).\n", rval);
1865 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 1871 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
1866 "GPSC")) != QLA_SUCCESS) { 1872 "GPSC")) != QLA_SUCCESS) {
1867 /* FM command unsupported? */ 1873 /* FM command unsupported? */
@@ -1870,9 +1876,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1870 CT_REASON_INVALID_COMMAND_CODE || 1876 CT_REASON_INVALID_COMMAND_CODE ||
1871 ct_rsp->header.reason_code == 1877 ct_rsp->header.reason_code ==
1872 CT_REASON_COMMAND_UNSUPPORTED)) { 1878 CT_REASON_COMMAND_UNSUPPORTED)) {
1873 DEBUG2(printk("scsi(%ld): GPSC command " 1879 ql_dbg(ql_dbg_disc, vha, 0x205a,
1874 "unsupported, disabling query...\n", 1880 "GPSC command unsupported, disabling "
1875 vha->host_no)); 1881 "query.\n");
1876 ha->flags.gpsc_supported = 0; 1882 ha->flags.gpsc_supported = 0;
1877 rval = QLA_FUNCTION_FAILED; 1883 rval = QLA_FUNCTION_FAILED;
1878 break; 1884 break;
@@ -1898,9 +1904,10 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1898 break; 1904 break;
1899 } 1905 }
1900 1906
1901 DEBUG2_3(printk("scsi(%ld): GPSC ext entry - " 1907 ql_dbg(ql_dbg_disc, vha, 0x205b,
1902 "fpn %02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x " 1908 "GPSC ext entry - fpn "
1903 "speed=%04x.\n", vha->host_no, 1909 "%02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x "
1910 "speed=%04x.\n",
1904 list[i].fabric_port_name[0], 1911 list[i].fabric_port_name[0],
1905 list[i].fabric_port_name[1], 1912 list[i].fabric_port_name[1],
1906 list[i].fabric_port_name[2], 1913 list[i].fabric_port_name[2],
@@ -1910,7 +1917,7 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1910 list[i].fabric_port_name[6], 1917 list[i].fabric_port_name[6],
1911 list[i].fabric_port_name[7], 1918 list[i].fabric_port_name[7],
1912 be16_to_cpu(ct_rsp->rsp.gpsc.speeds), 1919 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
1913 be16_to_cpu(ct_rsp->rsp.gpsc.speed))); 1920 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
1914 } 1921 }
1915 1922
1916 /* Last device exit. */ 1923 /* Last device exit. */
@@ -1968,14 +1975,12 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
1968 sizeof(ms_iocb_entry_t)); 1975 sizeof(ms_iocb_entry_t));
1969 1976
1970 if (rval != QLA_SUCCESS) { 1977 if (rval != QLA_SUCCESS) {
1971 DEBUG2_3(printk(KERN_INFO 1978 ql_dbg(ql_dbg_disc, vha, 0x205c,
1972 "scsi(%ld): GFF_ID issue IOCB failed " 1979 "GFF_ID issue IOCB failed (%d).\n", rval);
1973 "(%d).\n", vha->host_no, rval));
1974 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 1980 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
1975 "GFF_ID") != QLA_SUCCESS) { 1981 "GFF_ID") != QLA_SUCCESS) {
1976 DEBUG2_3(printk(KERN_INFO 1982 ql_dbg(ql_dbg_disc, vha, 0x205d,
1977 "scsi(%ld): GFF_ID IOCB status had a " 1983 "GFF_ID IOCB status had a failure status code.\n");
1978 "failure status code\n", vha->host_no));
1979 } else { 1984 } else {
1980 fcp_scsi_features = 1985 fcp_scsi_features =
1981 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET]; 1986 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 920b76bfbb93..def694271bf7 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -153,11 +153,10 @@ qla2x00_async_iocb_timeout(srb_t *sp)
153 fc_port_t *fcport = sp->fcport; 153 fc_port_t *fcport = sp->fcport;
154 struct srb_ctx *ctx = sp->ctx; 154 struct srb_ctx *ctx = sp->ctx;
155 155
156 DEBUG2(printk(KERN_WARNING 156 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
157 "scsi(%ld:%x): Async-%s timeout - portid=%02x%02x%02x.\n", 157 "Async-%s timeout - portid=%02x%02x%02x.\n",
158 fcport->vha->host_no, sp->handle, 158 ctx->name, fcport->d_id.b.domain, fcport->d_id.b.area,
159 ctx->name, fcport->d_id.b.domain, 159 fcport->d_id.b.al_pa);
160 fcport->d_id.b.area, fcport->d_id.b.al_pa));
161 160
162 fcport->flags &= ~FCF_ASYNC_SENT; 161 fcport->flags &= ~FCF_ASYNC_SENT;
163 if (ctx->type == SRB_LOGIN_CMD) { 162 if (ctx->type == SRB_LOGIN_CMD) {
@@ -211,11 +210,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
211 if (rval != QLA_SUCCESS) 210 if (rval != QLA_SUCCESS)
212 goto done_free_sp; 211 goto done_free_sp;
213 212
214 DEBUG2(printk(KERN_DEBUG 213 ql_dbg(ql_dbg_disc, vha, 0x2072,
215 "scsi(%ld:%x): Async-login - loop-id=%x portid=%02x%02x%02x " 214 "Async-login - loopid=%x portid=%02x%02x%02x retries=%d.\n",
216 "retries=%d.\n", fcport->vha->host_no, sp->handle, fcport->loop_id, 215 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
217 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 216 fcport->d_id.b.al_pa, fcport->login_retry);
218 fcport->login_retry));
219 return rval; 217 return rval;
220 218
221done_free_sp: 219done_free_sp:
@@ -259,10 +257,10 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
259 if (rval != QLA_SUCCESS) 257 if (rval != QLA_SUCCESS)
260 goto done_free_sp; 258 goto done_free_sp;
261 259
262 DEBUG2(printk(KERN_DEBUG 260 ql_dbg(ql_dbg_disc, vha, 0x2070,
263 "scsi(%ld:%x): Async-logout - loop-id=%x portid=%02x%02x%02x.\n", 261 "Async-logout - loop-id=%x portid=%02x%02x%02x.\n",
264 fcport->vha->host_no, sp->handle, fcport->loop_id, 262 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
265 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa)); 263 fcport->d_id.b.al_pa);
266 return rval; 264 return rval;
267 265
268done_free_sp: 266done_free_sp:
@@ -309,11 +307,10 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
309 if (rval != QLA_SUCCESS) 307 if (rval != QLA_SUCCESS)
310 goto done_free_sp; 308 goto done_free_sp;
311 309
312 DEBUG2(printk(KERN_DEBUG 310 ql_dbg(ql_dbg_disc, vha, 0x206f,
313 "scsi(%ld:%x): Async-adisc - loop-id=%x portid=%02x%02x%02x.\n", 311 "Async-adisc - loopid=%x portid=%02x%02x%02x.\n",
314 fcport->vha->host_no, sp->handle, fcport->loop_id, 312 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
315 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa)); 313 fcport->d_id.b.al_pa);
316
317 return rval; 314 return rval;
318 315
319done_free_sp: 316done_free_sp:
@@ -362,11 +359,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
362 if (rval != QLA_SUCCESS) 359 if (rval != QLA_SUCCESS)
363 goto done_free_sp; 360 goto done_free_sp;
364 361
365 DEBUG2(printk(KERN_DEBUG 362 ql_dbg(ql_dbg_taskm, vha, 0x802f,
366 "scsi(%ld:%x): Async-tmf - loop-id=%x portid=%02x%02x%02x.\n", 363 "Async-tmf loop-id=%x portid=%02x%02x%02x.\n",
367 fcport->vha->host_no, sp->handle, fcport->loop_id, 364 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
368 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa)); 365 fcport->d_id.b.al_pa);
369
370 return rval; 366 return rval;
371 367
372done_free_sp: 368done_free_sp:
@@ -471,9 +467,8 @@ qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport,
471 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 467 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
472 468
473 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) { 469 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
474 DEBUG2_3_11(printk(KERN_WARNING 470 ql_dbg(ql_dbg_taskm, vha, 0x8030,
475 "%s(%ld): TM IOCB failed (%x).\n", 471 "TM IOCB failed (%x).\n", rval);
476 __func__, vha->host_no, rval));
477 } 472 }
478 473
479 return; 474 return;
@@ -519,11 +514,12 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
519 set_bit(0, ha->req_qid_map); 514 set_bit(0, ha->req_qid_map);
520 set_bit(0, ha->rsp_qid_map); 515 set_bit(0, ha->rsp_qid_map);
521 516
522 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n"); 517 ql_log(ql_log_info, vha, 0x0040,
518 "Configuring PCI space...\n");
523 rval = ha->isp_ops->pci_config(vha); 519 rval = ha->isp_ops->pci_config(vha);
524 if (rval) { 520 if (rval) {
525 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n", 521 ql_log(ql_log_warn, vha, 0x0044,
526 vha->host_no)); 522 "Unable to configure PCI space.\n");
527 return (rval); 523 return (rval);
528 } 524 }
529 525
@@ -531,20 +527,21 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
531 527
532 rval = qla2xxx_get_flash_info(vha); 528 rval = qla2xxx_get_flash_info(vha);
533 if (rval) { 529 if (rval) {
534 DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n", 530 ql_log(ql_log_fatal, vha, 0x004f,
535 vha->host_no)); 531 "Unable to validate FLASH data.\n");
536 return (rval); 532 return (rval);
537 } 533 }
538 534
539 ha->isp_ops->get_flash_version(vha, req->ring); 535 ha->isp_ops->get_flash_version(vha, req->ring);
540 536 ql_log(ql_log_info, vha, 0x0061,
541 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); 537 "Configure NVRAM parameters...\n");
542 538
543 ha->isp_ops->nvram_config(vha); 539 ha->isp_ops->nvram_config(vha);
544 540
545 if (ha->flags.disable_serdes) { 541 if (ha->flags.disable_serdes) {
546 /* Mask HBA via NVRAM settings? */ 542 /* Mask HBA via NVRAM settings? */
547 qla_printk(KERN_INFO, ha, "Masking HBA WWPN " 543 ql_log(ql_log_info, vha, 0x0077,
544 "Masking HBA WWPN "
548 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n", 545 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
549 vha->port_name[0], vha->port_name[1], 546 vha->port_name[0], vha->port_name[1],
550 vha->port_name[2], vha->port_name[3], 547 vha->port_name[2], vha->port_name[3],
@@ -553,7 +550,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
553 return QLA_FUNCTION_FAILED; 550 return QLA_FUNCTION_FAILED;
554 } 551 }
555 552
556 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n"); 553 ql_log(ql_log_info, vha, 0x0078,
554 "Verifying loaded RISC code...\n");
557 555
558 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) { 556 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
559 rval = ha->isp_ops->chip_diag(vha); 557 rval = ha->isp_ops->chip_diag(vha);
@@ -567,7 +565,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
567 if (IS_QLA84XX(ha)) { 565 if (IS_QLA84XX(ha)) {
568 ha->cs84xx = qla84xx_get_chip(vha); 566 ha->cs84xx = qla84xx_get_chip(vha);
569 if (!ha->cs84xx) { 567 if (!ha->cs84xx) {
570 qla_printk(KERN_ERR, ha, 568 ql_log(ql_log_warn, vha, 0x00d0,
571 "Unable to configure ISP84XX.\n"); 569 "Unable to configure ISP84XX.\n");
572 return QLA_FUNCTION_FAILED; 570 return QLA_FUNCTION_FAILED;
573 } 571 }
@@ -579,8 +577,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
579 /* Issue verify 84xx FW IOCB to complete 84xx initialization */ 577 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
580 rval = qla84xx_init_chip(vha); 578 rval = qla84xx_init_chip(vha);
581 if (rval != QLA_SUCCESS) { 579 if (rval != QLA_SUCCESS) {
582 qla_printk(KERN_ERR, ha, 580 ql_log(ql_log_warn, vha, 0x00d4,
583 "Unable to initialize ISP84XX.\n"); 581 "Unable to initialize ISP84XX.\n");
584 qla84xx_put_chip(vha); 582 qla84xx_put_chip(vha);
585 } 583 }
586 } 584 }
@@ -797,9 +795,7 @@ qla2x00_isp_firmware(scsi_qla_host_t *vha)
797 rval = QLA_FUNCTION_FAILED; 795 rval = QLA_FUNCTION_FAILED;
798 796
799 if (ha->flags.disable_risc_code_load) { 797 if (ha->flags.disable_risc_code_load) {
800 DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n", 798 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
801 vha->host_no));
802 qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n");
803 799
804 /* Verify checksum of loaded RISC code. */ 800 /* Verify checksum of loaded RISC code. */
805 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address); 801 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
@@ -810,10 +806,9 @@ qla2x00_isp_firmware(scsi_qla_host_t *vha)
810 } 806 }
811 } 807 }
812 808
813 if (rval) { 809 if (rval)
814 DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n", 810 ql_dbg(ql_dbg_init, vha, 0x007a,
815 vha->host_no)); 811 "**** Load RISC code ****.\n");
816 }
817 812
818 return (rval); 813 return (rval);
819} 814}
@@ -1105,8 +1100,8 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
1105 /* Assume a failed state */ 1100 /* Assume a failed state */
1106 rval = QLA_FUNCTION_FAILED; 1101 rval = QLA_FUNCTION_FAILED;
1107 1102
1108 DEBUG3(printk("scsi(%ld): Testing device at %lx.\n", 1103 ql_dbg(ql_dbg_init, vha, 0x007b,
1109 vha->host_no, (u_long)&reg->flash_address)); 1104 "Testing device at %lx.\n", (u_long)&reg->flash_address);
1110 1105
1111 spin_lock_irqsave(&ha->hardware_lock, flags); 1106 spin_lock_irqsave(&ha->hardware_lock, flags);
1112 1107
@@ -1128,8 +1123,8 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
1128 if (!cnt) 1123 if (!cnt)
1129 goto chip_diag_failed; 1124 goto chip_diag_failed;
1130 1125
1131 DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n", 1126 ql_dbg(ql_dbg_init, vha, 0x007c,
1132 vha->host_no)); 1127 "Reset register cleared by chip reset.\n");
1133 1128
1134 /* Reset RISC processor. */ 1129 /* Reset RISC processor. */
1135 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 1130 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
@@ -1150,7 +1145,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
1150 goto chip_diag_failed; 1145 goto chip_diag_failed;
1151 1146
1152 /* Check product ID of chip */ 1147 /* Check product ID of chip */
1153 DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no)); 1148 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product Id of chip.\n");
1154 1149
1155 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 1150 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
1156 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 1151 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
@@ -1158,8 +1153,9 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
1158 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4)); 1153 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
1159 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) || 1154 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
1160 mb[3] != PROD_ID_3) { 1155 mb[3] != PROD_ID_3) {
1161 qla_printk(KERN_WARNING, ha, 1156 ql_log(ql_log_warn, vha, 0x0062,
1162 "Wrong product ID = 0x%x,0x%x,0x%x\n", mb[1], mb[2], mb[3]); 1157 "Wrong product ID = 0x%x,0x%x,0x%x.\n",
1158 mb[1], mb[2], mb[3]);
1163 1159
1164 goto chip_diag_failed; 1160 goto chip_diag_failed;
1165 } 1161 }
@@ -1178,8 +1174,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
1178 if (IS_QLA2200(ha) && 1174 if (IS_QLA2200(ha) &&
1179 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { 1175 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
1180 /* Limit firmware transfer size with a 2200A */ 1176 /* Limit firmware transfer size with a 2200A */
1181 DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n", 1177 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
1182 vha->host_no));
1183 1178
1184 ha->device_type |= DT_ISP2200A; 1179 ha->device_type |= DT_ISP2200A;
1185 ha->fw_transfer_size = 128; 1180 ha->fw_transfer_size = 128;
@@ -1188,24 +1183,20 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
1188 /* Wrap Incoming Mailboxes Test. */ 1183 /* Wrap Incoming Mailboxes Test. */
1189 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1184 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1190 1185
1191 DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", vha->host_no)); 1186 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
1192 rval = qla2x00_mbx_reg_test(vha); 1187 rval = qla2x00_mbx_reg_test(vha);
1193 if (rval) { 1188 if (rval)
1194 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n", 1189 ql_log(ql_log_warn, vha, 0x0080,
1195 vha->host_no)); 1190 "Failed mailbox send register test.\n");
1196 qla_printk(KERN_WARNING, ha, 1191 else
1197 "Failed mailbox send register test\n");
1198 }
1199 else {
1200 /* Flag a successful rval */ 1192 /* Flag a successful rval */
1201 rval = QLA_SUCCESS; 1193 rval = QLA_SUCCESS;
1202 }
1203 spin_lock_irqsave(&ha->hardware_lock, flags); 1194 spin_lock_irqsave(&ha->hardware_lock, flags);
1204 1195
1205chip_diag_failed: 1196chip_diag_failed:
1206 if (rval) 1197 if (rval)
1207 DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED " 1198 ql_log(ql_log_info, vha, 0x0081,
1208 "****\n", vha->host_no)); 1199 "Chip diagnostics **** FAILED ****.\n");
1209 1200
1210 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1201 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1211 1202
@@ -1232,10 +1223,8 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
1232 1223
1233 rval = qla2x00_mbx_reg_test(vha); 1224 rval = qla2x00_mbx_reg_test(vha);
1234 if (rval) { 1225 if (rval) {
1235 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n", 1226 ql_log(ql_log_warn, vha, 0x0082,
1236 vha->host_no)); 1227 "Failed mailbox send register test.\n");
1237 qla_printk(KERN_WARNING, ha,
1238 "Failed mailbox send register test\n");
1239 } else { 1228 } else {
1240 /* Flag a successful rval */ 1229 /* Flag a successful rval */
1241 rval = QLA_SUCCESS; 1230 rval = QLA_SUCCESS;
@@ -1257,8 +1246,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1257 struct rsp_que *rsp = ha->rsp_q_map[0]; 1246 struct rsp_que *rsp = ha->rsp_q_map[0];
1258 1247
1259 if (ha->fw_dump) { 1248 if (ha->fw_dump) {
1260 qla_printk(KERN_WARNING, ha, 1249 ql_dbg(ql_dbg_init, vha, 0x00bd,
1261 "Firmware dump previously allocated.\n"); 1250 "Firmware dump already allocated.\n");
1262 return; 1251 return;
1263 } 1252 }
1264 1253
@@ -1288,8 +1277,9 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1288 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 1277 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
1289 GFP_KERNEL); 1278 GFP_KERNEL);
1290 if (!tc) { 1279 if (!tc) {
1291 qla_printk(KERN_WARNING, ha, "Unable to allocate " 1280 ql_log(ql_log_warn, vha, 0x00be,
1292 "(%d KB) for FCE.\n", FCE_SIZE / 1024); 1281 "Unable to allocate (%d KB) for FCE.\n",
1282 FCE_SIZE / 1024);
1293 goto try_eft; 1283 goto try_eft;
1294 } 1284 }
1295 1285
@@ -1297,16 +1287,15 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1297 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS, 1287 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
1298 ha->fce_mb, &ha->fce_bufs); 1288 ha->fce_mb, &ha->fce_bufs);
1299 if (rval) { 1289 if (rval) {
1300 qla_printk(KERN_WARNING, ha, "Unable to initialize " 1290 ql_log(ql_log_warn, vha, 0x00bf,
1301 "FCE (%d).\n", rval); 1291 "Unable to initialize FCE (%d).\n", rval);
1302 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, 1292 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
1303 tc_dma); 1293 tc_dma);
1304 ha->flags.fce_enabled = 0; 1294 ha->flags.fce_enabled = 0;
1305 goto try_eft; 1295 goto try_eft;
1306 } 1296 }
1307 1297 ql_log(ql_log_info, vha, 0x00c0,
1308 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n", 1298 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
1309 FCE_SIZE / 1024);
1310 1299
1311 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; 1300 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
1312 ha->flags.fce_enabled = 1; 1301 ha->flags.fce_enabled = 1;
@@ -1317,23 +1306,23 @@ try_eft:
1317 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 1306 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
1318 GFP_KERNEL); 1307 GFP_KERNEL);
1319 if (!tc) { 1308 if (!tc) {
1320 qla_printk(KERN_WARNING, ha, "Unable to allocate " 1309 ql_log(ql_log_warn, vha, 0x00c1,
1321 "(%d KB) for EFT.\n", EFT_SIZE / 1024); 1310 "Unable to allocate (%d KB) for EFT.\n",
1311 EFT_SIZE / 1024);
1322 goto cont_alloc; 1312 goto cont_alloc;
1323 } 1313 }
1324 1314
1325 memset(tc, 0, EFT_SIZE); 1315 memset(tc, 0, EFT_SIZE);
1326 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); 1316 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
1327 if (rval) { 1317 if (rval) {
1328 qla_printk(KERN_WARNING, ha, "Unable to initialize " 1318 ql_log(ql_log_warn, vha, 0x00c2,
1329 "EFT (%d).\n", rval); 1319 "Unable to initialize EFT (%d).\n", rval);
1330 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, 1320 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
1331 tc_dma); 1321 tc_dma);
1332 goto cont_alloc; 1322 goto cont_alloc;
1333 } 1323 }
1334 1324 ql_log(ql_log_info, vha, 0x00c3,
1335 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n", 1325 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
1336 EFT_SIZE / 1024);
1337 1326
1338 eft_size = EFT_SIZE; 1327 eft_size = EFT_SIZE;
1339 ha->eft_dma = tc_dma; 1328 ha->eft_dma = tc_dma;
@@ -1350,8 +1339,9 @@ cont_alloc:
1350 1339
1351 ha->fw_dump = vmalloc(dump_size); 1340 ha->fw_dump = vmalloc(dump_size);
1352 if (!ha->fw_dump) { 1341 if (!ha->fw_dump) {
1353 qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for " 1342 ql_log(ql_log_warn, vha, 0x00c4,
1354 "firmware dump!!!\n", dump_size / 1024); 1343 "Unable to allocate (%d KB) for firmware dump.\n",
1344 dump_size / 1024);
1355 1345
1356 if (ha->fce) { 1346 if (ha->fce) {
1357 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, 1347 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
@@ -1368,8 +1358,8 @@ cont_alloc:
1368 } 1358 }
1369 return; 1359 return;
1370 } 1360 }
1371 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n", 1361 ql_log(ql_log_info, vha, 0x00c5,
1372 dump_size / 1024); 1362 "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
1373 1363
1374 ha->fw_dump_len = dump_size; 1364 ha->fw_dump_len = dump_size;
1375 ha->fw_dump->signature[0] = 'Q'; 1365 ha->fw_dump->signature[0] = 'Q';
@@ -1398,23 +1388,21 @@ qla81xx_mpi_sync(scsi_qla_host_t *vha)
1398 int rval; 1388 int rval;
1399 uint16_t dc; 1389 uint16_t dc;
1400 uint32_t dw; 1390 uint32_t dw;
1401 struct qla_hw_data *ha = vha->hw;
1402 1391
1403 if (!IS_QLA81XX(vha->hw)) 1392 if (!IS_QLA81XX(vha->hw))
1404 return QLA_SUCCESS; 1393 return QLA_SUCCESS;
1405 1394
1406 rval = qla2x00_write_ram_word(vha, 0x7c00, 1); 1395 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
1407 if (rval != QLA_SUCCESS) { 1396 if (rval != QLA_SUCCESS) {
1408 DEBUG2(qla_printk(KERN_WARNING, ha, 1397 ql_log(ql_log_warn, vha, 0x0105,
1409 "Sync-MPI: Unable to acquire semaphore.\n")); 1398 "Unable to acquire semaphore.\n");
1410 goto done; 1399 goto done;
1411 } 1400 }
1412 1401
1413 pci_read_config_word(vha->hw->pdev, 0x54, &dc); 1402 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
1414 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw); 1403 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
1415 if (rval != QLA_SUCCESS) { 1404 if (rval != QLA_SUCCESS) {
1416 DEBUG2(qla_printk(KERN_WARNING, ha, 1405 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
1417 "Sync-MPI: Unable to read sync.\n"));
1418 goto done_release; 1406 goto done_release;
1419 } 1407 }
1420 1408
@@ -1426,15 +1414,14 @@ qla81xx_mpi_sync(scsi_qla_host_t *vha)
1426 dw |= dc; 1414 dw |= dc;
1427 rval = qla2x00_write_ram_word(vha, 0x7a15, dw); 1415 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
1428 if (rval != QLA_SUCCESS) { 1416 if (rval != QLA_SUCCESS) {
1429 DEBUG2(qla_printk(KERN_WARNING, ha, 1417 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
1430 "Sync-MPI: Unable to gain sync.\n"));
1431 } 1418 }
1432 1419
1433done_release: 1420done_release:
1434 rval = qla2x00_write_ram_word(vha, 0x7c00, 0); 1421 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
1435 if (rval != QLA_SUCCESS) { 1422 if (rval != QLA_SUCCESS) {
1436 DEBUG2(qla_printk(KERN_WARNING, ha, 1423 ql_log(ql_log_warn, vha, 0x006d,
1437 "Sync-MPI: Unable to release semaphore.\n")); 1424 "Unable to release semaphore.\n");
1438 } 1425 }
1439 1426
1440done: 1427done:
@@ -1479,14 +1466,14 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
1479 /* Load firmware sequences */ 1466 /* Load firmware sequences */
1480 rval = ha->isp_ops->load_risc(vha, &srisc_address); 1467 rval = ha->isp_ops->load_risc(vha, &srisc_address);
1481 if (rval == QLA_SUCCESS) { 1468 if (rval == QLA_SUCCESS) {
1482 DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC " 1469 ql_dbg(ql_dbg_init, vha, 0x00c9,
1483 "code.\n", vha->host_no)); 1470 "Verifying Checksum of loaded RISC code.\n");
1484 1471
1485 rval = qla2x00_verify_checksum(vha, srisc_address); 1472 rval = qla2x00_verify_checksum(vha, srisc_address);
1486 if (rval == QLA_SUCCESS) { 1473 if (rval == QLA_SUCCESS) {
1487 /* Start firmware execution. */ 1474 /* Start firmware execution. */
1488 DEBUG(printk("scsi(%ld): Checksum OK, start " 1475 ql_dbg(ql_dbg_init, vha, 0x00ca,
1489 "firmware.\n", vha->host_no)); 1476 "Starting firmware.\n");
1490 1477
1491 rval = qla2x00_execute_fw(vha, srisc_address); 1478 rval = qla2x00_execute_fw(vha, srisc_address);
1492 /* Retrieve firmware information. */ 1479 /* Retrieve firmware information. */
@@ -1522,9 +1509,9 @@ enable_82xx_npiv:
1522 } 1509 }
1523 } 1510 }
1524 } else { 1511 } else {
1525 DEBUG2(printk(KERN_INFO 1512 ql_log(ql_log_fatal, vha, 0x00cd,
1526 "scsi(%ld): ISP Firmware failed checksum.\n", 1513 "ISP Firmware failed checksum.\n");
1527 vha->host_no)); 1514 goto failed;
1528 } 1515 }
1529 } 1516 }
1530 1517
@@ -1549,7 +1536,7 @@ enable_82xx_npiv:
1549 ha->flags.fac_supported = 1; 1536 ha->flags.fac_supported = 1;
1550 ha->fdt_block_size = size << 2; 1537 ha->fdt_block_size = size << 2;
1551 } else { 1538 } else {
1552 qla_printk(KERN_ERR, ha, 1539 ql_log(ql_log_warn, vha, 0x00ce,
1553 "Unsupported FAC firmware (%d.%02d.%02d).\n", 1540 "Unsupported FAC firmware (%d.%02d.%02d).\n",
1554 ha->fw_major_version, ha->fw_minor_version, 1541 ha->fw_major_version, ha->fw_minor_version,
1555 ha->fw_subminor_version); 1542 ha->fw_subminor_version);
@@ -1557,8 +1544,8 @@ enable_82xx_npiv:
1557 } 1544 }
1558failed: 1545failed:
1559 if (rval) { 1546 if (rval) {
1560 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n", 1547 ql_log(ql_log_fatal, vha, 0x00cf,
1561 vha->host_no)); 1548 "Setup chip ****FAILED****.\n");
1562 } 1549 }
1563 1550
1564 return (rval); 1551 return (rval);
@@ -1608,10 +1595,11 @@ qla2x00_update_fw_options(scsi_qla_host_t *vha)
1608 return; 1595 return;
1609 1596
1610 /* Serial Link options. */ 1597 /* Serial Link options. */
1611 DEBUG3(printk("scsi(%ld): Serial link options:\n", 1598 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
1612 vha->host_no)); 1599 "Serial link options.\n");
1613 DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options, 1600 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
1614 sizeof(ha->fw_seriallink_options))); 1601 (uint8_t *)&ha->fw_seriallink_options,
1602 sizeof(ha->fw_seriallink_options));
1615 1603
1616 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 1604 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
1617 if (ha->fw_seriallink_options[3] & BIT_2) { 1605 if (ha->fw_seriallink_options[3] & BIT_2) {
@@ -1688,7 +1676,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
1688 le16_to_cpu(ha->fw_seriallink_options24[2]), 1676 le16_to_cpu(ha->fw_seriallink_options24[2]),
1689 le16_to_cpu(ha->fw_seriallink_options24[3])); 1677 le16_to_cpu(ha->fw_seriallink_options24[3]));
1690 if (rval != QLA_SUCCESS) { 1678 if (rval != QLA_SUCCESS) {
1691 qla_printk(KERN_WARNING, ha, 1679 ql_log(ql_log_warn, vha, 0x0104,
1692 "Unable to update Serial Link options (%x).\n", rval); 1680 "Unable to update Serial Link options (%x).\n", rval);
1693 } 1681 }
1694} 1682}
@@ -1746,8 +1734,9 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1746 icb->rid = __constant_cpu_to_le16(rid); 1734 icb->rid = __constant_cpu_to_le16(rid);
1747 if (ha->flags.msix_enabled) { 1735 if (ha->flags.msix_enabled) {
1748 msix = &ha->msix_entries[1]; 1736 msix = &ha->msix_entries[1];
1749 DEBUG2_17(printk(KERN_INFO 1737 ql_dbg(ql_dbg_init, vha, 0x00fd,
1750 "Registering vector 0x%x for base que\n", msix->entry)); 1738 "Registering vector 0x%x for base que.\n",
1739 msix->entry);
1751 icb->msix = cpu_to_le16(msix->entry); 1740 icb->msix = cpu_to_le16(msix->entry);
1752 } 1741 }
1753 /* Use alternate PCI bus number */ 1742 /* Use alternate PCI bus number */
@@ -1764,8 +1753,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1764 icb->firmware_options_2 &= 1753 icb->firmware_options_2 &=
1765 __constant_cpu_to_le32(~BIT_22); 1754 __constant_cpu_to_le32(~BIT_22);
1766 ha->flags.disable_msix_handshake = 1; 1755 ha->flags.disable_msix_handshake = 1;
1767 qla_printk(KERN_INFO, ha, 1756 ql_dbg(ql_dbg_init, vha, 0x00fe,
1768 "MSIX Handshake Disable Mode turned on\n"); 1757 "MSIX Handshake Disable Mode turned on.\n");
1769 } else { 1758 } else {
1770 icb->firmware_options_2 |= 1759 icb->firmware_options_2 |=
1771 __constant_cpu_to_le32(BIT_22); 1760 __constant_cpu_to_le32(BIT_22);
@@ -1850,7 +1839,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1850 /* Update any ISP specific firmware options before initialization. */ 1839 /* Update any ISP specific firmware options before initialization. */
1851 ha->isp_ops->update_fw_options(vha); 1840 ha->isp_ops->update_fw_options(vha);
1852 1841
1853 DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no)); 1842 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
1854 1843
1855 if (ha->flags.npiv_supported) { 1844 if (ha->flags.npiv_supported) {
1856 if (ha->operating_mode == LOOP) 1845 if (ha->operating_mode == LOOP)
@@ -1866,11 +1855,11 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1866 1855
1867 rval = qla2x00_init_firmware(vha, ha->init_cb_size); 1856 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
1868 if (rval) { 1857 if (rval) {
1869 DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n", 1858 ql_log(ql_log_fatal, vha, 0x00d2,
1870 vha->host_no)); 1859 "Init Firmware **** FAILED ****.\n");
1871 } else { 1860 } else {
1872 DEBUG3(printk("scsi(%ld): Init firmware -- success.\n", 1861 ql_dbg(ql_dbg_init, vha, 0x00d3,
1873 vha->host_no)); 1862 "Init Firmware -- success.\n");
1874 } 1863 }
1875 1864
1876 return (rval); 1865 return (rval);
@@ -1913,10 +1902,8 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1913 1902
1914 /* Wait for ISP to finish LIP */ 1903 /* Wait for ISP to finish LIP */
1915 if (!vha->flags.init_done) 1904 if (!vha->flags.init_done)
1916 qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n"); 1905 ql_log(ql_log_info, vha, 0x801e,
1917 1906 "Waiting for LIP to complete.\n");
1918 DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n",
1919 vha->host_no));
1920 1907
1921 do { 1908 do {
1922 rval = qla2x00_get_firmware_state(vha, state); 1909 rval = qla2x00_get_firmware_state(vha, state);
@@ -1925,30 +1912,35 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1925 vha->device_flags &= ~DFLG_NO_CABLE; 1912 vha->device_flags &= ~DFLG_NO_CABLE;
1926 } 1913 }
1927 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { 1914 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
1928 DEBUG16(printk("scsi(%ld): fw_state=%x " 1915 ql_dbg(ql_dbg_taskm, vha, 0x801f,
1929 "84xx=%x.\n", vha->host_no, state[0], 1916 "fw_state=%x 84xx=%x.\n", state[0],
1930 state[2])); 1917 state[2]);
1931 if ((state[2] & FSTATE_LOGGED_IN) && 1918 if ((state[2] & FSTATE_LOGGED_IN) &&
1932 (state[2] & FSTATE_WAITING_FOR_VERIFY)) { 1919 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
1933 DEBUG16(printk("scsi(%ld): Sending " 1920 ql_dbg(ql_dbg_taskm, vha, 0x8028,
1934 "verify iocb.\n", vha->host_no)); 1921 "Sending verify iocb.\n");
1935 1922
1936 cs84xx_time = jiffies; 1923 cs84xx_time = jiffies;
1937 rval = qla84xx_init_chip(vha); 1924 rval = qla84xx_init_chip(vha);
1938 if (rval != QLA_SUCCESS) 1925 if (rval != QLA_SUCCESS) {
1926 ql_log(ql_log_warn,
1927 vha, 0x8043,
1928 "Init chip failed.\n");
1939 break; 1929 break;
1930 }
1940 1931
1941 /* Add time taken to initialize. */ 1932 /* Add time taken to initialize. */
1942 cs84xx_time = jiffies - cs84xx_time; 1933 cs84xx_time = jiffies - cs84xx_time;
1943 wtime += cs84xx_time; 1934 wtime += cs84xx_time;
1944 mtime += cs84xx_time; 1935 mtime += cs84xx_time;
1945 DEBUG16(printk("scsi(%ld): Increasing " 1936 ql_dbg(ql_dbg_taskm, vha, 0x8042,
1946 "wait time by %ld. New time %ld\n", 1937 "Increasing wait time by %ld. "
1947 vha->host_no, cs84xx_time, wtime)); 1938 "New time %ld.\n", cs84xx_time,
1939 wtime);
1948 } 1940 }
1949 } else if (state[0] == FSTATE_READY) { 1941 } else if (state[0] == FSTATE_READY) {
1950 DEBUG(printk("scsi(%ld): F/W Ready - OK \n", 1942 ql_dbg(ql_dbg_taskm, vha, 0x8037,
1951 vha->host_no)); 1943 "F/W Ready - OK.\n");
1952 1944
1953 qla2x00_get_retry_cnt(vha, &ha->retry_count, 1945 qla2x00_get_retry_cnt(vha, &ha->retry_count,
1954 &ha->login_timeout, &ha->r_a_tov); 1946 &ha->login_timeout, &ha->r_a_tov);
@@ -1965,7 +1957,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1965 * other than Wait for Login. 1957 * other than Wait for Login.
1966 */ 1958 */
1967 if (time_after_eq(jiffies, mtime)) { 1959 if (time_after_eq(jiffies, mtime)) {
1968 qla_printk(KERN_INFO, ha, 1960 ql_log(ql_log_info, vha, 0x8038,
1969 "Cable is unplugged...\n"); 1961 "Cable is unplugged...\n");
1970 1962
1971 vha->device_flags |= DFLG_NO_CABLE; 1963 vha->device_flags |= DFLG_NO_CABLE;
@@ -1985,17 +1977,17 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1985 /* Delay for a while */ 1977 /* Delay for a while */
1986 msleep(500); 1978 msleep(500);
1987 1979
1988 DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", 1980 ql_dbg(ql_dbg_taskm, vha, 0x8039,
1989 vha->host_no, state[0], jiffies)); 1981 "fw_state=%x curr time=%lx.\n", state[0], jiffies);
1990 } while (1); 1982 } while (1);
1991 1983
1992 DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n", 1984 ql_dbg(ql_dbg_taskm, vha, 0x803a,
1993 vha->host_no, state[0], state[1], state[2], state[3], state[4], 1985 "fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0],
1994 jiffies)); 1986 state[1], state[2], state[3], state[4], jiffies);
1995 1987
1996 if (rval) { 1988 if (rval) {
1997 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n", 1989 ql_log(ql_log_warn, vha, 0x803b,
1998 vha->host_no)); 1990 "Firmware ready **** FAILED ****.\n");
1999 } 1991 }
2000 1992
2001 return (rval); 1993 return (rval);
@@ -2034,19 +2026,19 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2034 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || 2026 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
2035 IS_QLA8XXX_TYPE(ha) || 2027 IS_QLA8XXX_TYPE(ha) ||
2036 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { 2028 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
2037 DEBUG2(printk("%s(%ld) Loop is in a transition state\n", 2029 ql_dbg(ql_dbg_disc, vha, 0x2008,
2038 __func__, vha->host_no)); 2030 "Loop is in a transition state.\n");
2039 } else { 2031 } else {
2040 qla_printk(KERN_WARNING, ha, 2032 ql_log(ql_log_warn, vha, 0x2009,
2041 "ERROR -- Unable to get host loop ID.\n"); 2033 "Unable to get host loop ID.\n");
2042 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2034 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2043 } 2035 }
2044 return (rval); 2036 return (rval);
2045 } 2037 }
2046 2038
2047 if (topo == 4) { 2039 if (topo == 4) {
2048 qla_printk(KERN_INFO, ha, 2040 ql_log(ql_log_info, vha, 0x200a,
2049 "Cannot get topology - retrying.\n"); 2041 "Cannot get topology - retrying.\n");
2050 return (QLA_FUNCTION_FAILED); 2042 return (QLA_FUNCTION_FAILED);
2051 } 2043 }
2052 2044
@@ -2059,31 +2051,27 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2059 2051
2060 switch (topo) { 2052 switch (topo) {
2061 case 0: 2053 case 0:
2062 DEBUG3(printk("scsi(%ld): HBA in NL topology.\n", 2054 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
2063 vha->host_no));
2064 ha->current_topology = ISP_CFG_NL; 2055 ha->current_topology = ISP_CFG_NL;
2065 strcpy(connect_type, "(Loop)"); 2056 strcpy(connect_type, "(Loop)");
2066 break; 2057 break;
2067 2058
2068 case 1: 2059 case 1:
2069 DEBUG3(printk("scsi(%ld): HBA in FL topology.\n", 2060 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
2070 vha->host_no));
2071 ha->switch_cap = sw_cap; 2061 ha->switch_cap = sw_cap;
2072 ha->current_topology = ISP_CFG_FL; 2062 ha->current_topology = ISP_CFG_FL;
2073 strcpy(connect_type, "(FL_Port)"); 2063 strcpy(connect_type, "(FL_Port)");
2074 break; 2064 break;
2075 2065
2076 case 2: 2066 case 2:
2077 DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n", 2067 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
2078 vha->host_no));
2079 ha->operating_mode = P2P; 2068 ha->operating_mode = P2P;
2080 ha->current_topology = ISP_CFG_N; 2069 ha->current_topology = ISP_CFG_N;
2081 strcpy(connect_type, "(N_Port-to-N_Port)"); 2070 strcpy(connect_type, "(N_Port-to-N_Port)");
2082 break; 2071 break;
2083 2072
2084 case 3: 2073 case 3:
2085 DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n", 2074 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
2086 vha->host_no));
2087 ha->switch_cap = sw_cap; 2075 ha->switch_cap = sw_cap;
2088 ha->operating_mode = P2P; 2076 ha->operating_mode = P2P;
2089 ha->current_topology = ISP_CFG_F; 2077 ha->current_topology = ISP_CFG_F;
@@ -2091,9 +2079,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2091 break; 2079 break;
2092 2080
2093 default: 2081 default:
2094 DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. " 2082 ql_dbg(ql_dbg_disc, vha, 0x200f,
2095 "Using NL.\n", 2083 "HBA in unknown topology %x, using NL.\n", topo);
2096 vha->host_no, topo));
2097 ha->current_topology = ISP_CFG_NL; 2084 ha->current_topology = ISP_CFG_NL;
2098 strcpy(connect_type, "(Loop)"); 2085 strcpy(connect_type, "(Loop)");
2099 break; 2086 break;
@@ -2106,14 +2093,16 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2106 vha->d_id.b.al_pa = al_pa; 2093 vha->d_id.b.al_pa = al_pa;
2107 2094
2108 if (!vha->flags.init_done) 2095 if (!vha->flags.init_done)
2109 qla_printk(KERN_INFO, ha, 2096 ql_log(ql_log_info, vha, 0x2010,
2110 "Topology - %s, Host Loop address 0x%x\n", 2097 "Topology - %s, Host Loop address 0x%x.\n",
2111 connect_type, vha->loop_id); 2098 connect_type, vha->loop_id);
2112 2099
2113 if (rval) { 2100 if (rval) {
2114 DEBUG2_3(printk("scsi(%ld): FAILED.\n", vha->host_no)); 2101 ql_log(ql_log_warn, vha, 0x2011,
2102 "%s FAILED\n", __func__);
2115 } else { 2103 } else {
2116 DEBUG3(printk("scsi(%ld): exiting normally.\n", vha->host_no)); 2104 ql_dbg(ql_dbg_disc, vha, 0x2012,
2105 "%s success\n", __func__);
2117 } 2106 }
2118 2107
2119 return(rval); 2108 return(rval);
@@ -2227,18 +2216,22 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2227 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) 2216 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
2228 chksum += *ptr++; 2217 chksum += *ptr++;
2229 2218
2230 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); 2219 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
2231 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 2220 "Contents of NVRAM.\n");
2221 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
2222 (uint8_t *)nv, ha->nvram_size);
2232 2223
2233 /* Bad NVRAM data, set defaults parameters. */ 2224 /* Bad NVRAM data, set defaults parameters. */
2234 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || 2225 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2235 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) { 2226 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
2236 /* Reset NVRAM data. */ 2227 /* Reset NVRAM data. */
2237 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 2228 ql_log(ql_log_warn, vha, 0x0064,
2238 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 2229 "Inconisistent NVRAM "
2239 nv->nvram_version); 2230 "detected: checksum=0x%x id=%c version=0x%x.\n",
2240 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 2231 chksum, nv->id[0], nv->nvram_version);
2241 "invalid -- WWPN) defaults.\n"); 2232 ql_log(ql_log_warn, vha, 0x0065,
2233 "Falling back to "
2234 "functioning (yet invalid -- WWPN) defaults.\n");
2242 2235
2243 /* 2236 /*
2244 * Set default initialization control block. 2237 * Set default initialization control block.
@@ -2382,8 +2375,13 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2382 /* 2375 /*
2383 * Set host adapter parameters. 2376 * Set host adapter parameters.
2384 */ 2377 */
2378
2379 /*
2380 * BIT_7 in the host-parameters section allows for modification to
2381 * internal driver logging.
2382 */
2385 if (nv->host_p[0] & BIT_7) 2383 if (nv->host_p[0] & BIT_7)
2386 ql2xextended_error_logging = 1; 2384 ql2xextended_error_logging = 0x7fffffff;
2387 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); 2385 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
2388 /* Always load RISC code on non ISP2[12]00 chips. */ 2386 /* Always load RISC code on non ISP2[12]00 chips. */
2389 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) 2387 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
@@ -2488,10 +2486,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2488 if (ha->zio_mode != QLA_ZIO_DISABLED) { 2486 if (ha->zio_mode != QLA_ZIO_DISABLED) {
2489 ha->zio_mode = QLA_ZIO_MODE_6; 2487 ha->zio_mode = QLA_ZIO_MODE_6;
2490 2488
2491 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer " 2489 ql_log(ql_log_info, vha, 0x0068,
2492 "delay (%d us).\n", vha->host_no, ha->zio_mode,
2493 ha->zio_timer * 100));
2494 qla_printk(KERN_INFO, ha,
2495 "ZIO mode %d enabled; timer delay (%d us).\n", 2490 "ZIO mode %d enabled; timer delay (%d us).\n",
2496 ha->zio_mode, ha->zio_timer * 100); 2491 ha->zio_mode, ha->zio_timer * 100);
2497 2492
@@ -2502,8 +2497,8 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2502 } 2497 }
2503 2498
2504 if (rval) { 2499 if (rval) {
2505 DEBUG2_3(printk(KERN_WARNING 2500 ql_log(ql_log_warn, vha, 0x0069,
2506 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no)); 2501 "NVRAM configuration failed.\n");
2507 } 2502 }
2508 return (rval); 2503 return (rval);
2509} 2504}
@@ -2574,15 +2569,15 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2574 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) { 2569 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
2575 rval = qla2x00_configure_hba(vha); 2570 rval = qla2x00_configure_hba(vha);
2576 if (rval != QLA_SUCCESS) { 2571 if (rval != QLA_SUCCESS) {
2577 DEBUG(printk("scsi(%ld): Unable to configure HBA.\n", 2572 ql_dbg(ql_dbg_disc, vha, 0x2013,
2578 vha->host_no)); 2573 "Unable to configure HBA.\n");
2579 return (rval); 2574 return (rval);
2580 } 2575 }
2581 } 2576 }
2582 2577
2583 save_flags = flags = vha->dpc_flags; 2578 save_flags = flags = vha->dpc_flags;
2584 DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n", 2579 ql_dbg(ql_dbg_disc, vha, 0x2014,
2585 vha->host_no, flags)); 2580 "Configure loop -- dpc flags = 0x%lx.\n", flags);
2586 2581
2587 /* 2582 /*
2588 * If we have both an RSCN and PORT UPDATE pending then handle them 2583 * If we have both an RSCN and PORT UPDATE pending then handle them
@@ -2619,15 +2614,21 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2619 } 2614 }
2620 2615
2621 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) { 2616 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
2622 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 2617 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2618 ql_dbg(ql_dbg_disc, vha, 0x2015,
2619 "Loop resync needed, failing.\n");
2623 rval = QLA_FUNCTION_FAILED; 2620 rval = QLA_FUNCTION_FAILED;
2621 }
2624 else 2622 else
2625 rval = qla2x00_configure_local_loop(vha); 2623 rval = qla2x00_configure_local_loop(vha);
2626 } 2624 }
2627 2625
2628 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) { 2626 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
2629 if (LOOP_TRANSITION(vha)) 2627 if (LOOP_TRANSITION(vha)) {
2628 ql_dbg(ql_dbg_disc, vha, 0x201e,
2629 "Needs RSCN update and loop transition.\n");
2630 rval = QLA_FUNCTION_FAILED; 2630 rval = QLA_FUNCTION_FAILED;
2631 }
2631 else 2632 else
2632 rval = qla2x00_configure_fabric(vha); 2633 rval = qla2x00_configure_fabric(vha);
2633 } 2634 }
@@ -2638,16 +2639,17 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2638 rval = QLA_FUNCTION_FAILED; 2639 rval = QLA_FUNCTION_FAILED;
2639 } else { 2640 } else {
2640 atomic_set(&vha->loop_state, LOOP_READY); 2641 atomic_set(&vha->loop_state, LOOP_READY);
2641 2642 ql_dbg(ql_dbg_disc, vha, 0x2069,
2642 DEBUG(printk("scsi(%ld): LOOP READY\n", vha->host_no)); 2643 "LOOP READY.\n");
2643 } 2644 }
2644 } 2645 }
2645 2646
2646 if (rval) { 2647 if (rval) {
2647 DEBUG2_3(printk("%s(%ld): *** FAILED ***\n", 2648 ql_dbg(ql_dbg_disc, vha, 0x206a,
2648 __func__, vha->host_no)); 2649 "%s *** FAILED ***.\n", __func__);
2649 } else { 2650 } else {
2650 DEBUG3(printk("%s: exiting normally\n", __func__)); 2651 ql_dbg(ql_dbg_disc, vha, 0x206b,
2652 "%s: exiting normally.\n", __func__);
2651 } 2653 }
2652 2654
2653 /* Restore state if a resync event occurred during processing */ 2655 /* Restore state if a resync event occurred during processing */
@@ -2695,8 +2697,10 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2695 new_fcport = NULL; 2697 new_fcport = NULL;
2696 entries = MAX_FIBRE_DEVICES; 2698 entries = MAX_FIBRE_DEVICES;
2697 2699
2698 DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", vha->host_no)); 2700 ql_dbg(ql_dbg_disc, vha, 0x2016,
2699 DEBUG3(qla2x00_get_fcal_position_map(vha, NULL)); 2701 "Getting FCAL position map.\n");
2702 if (ql2xextended_error_logging & ql_dbg_disc)
2703 qla2x00_get_fcal_position_map(vha, NULL);
2700 2704
2701 /* Get list of logged in devices. */ 2705 /* Get list of logged in devices. */
2702 memset(ha->gid_list, 0, GID_LIST_SIZE); 2706 memset(ha->gid_list, 0, GID_LIST_SIZE);
@@ -2705,14 +2709,17 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2705 if (rval != QLA_SUCCESS) 2709 if (rval != QLA_SUCCESS)
2706 goto cleanup_allocation; 2710 goto cleanup_allocation;
2707 2711
2708 DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n", 2712 ql_dbg(ql_dbg_disc, vha, 0x2017,
2709 vha->host_no, entries)); 2713 "Entries in ID list (%d).\n", entries);
2710 DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list, 2714 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
2711 entries * sizeof(struct gid_list_info))); 2715 (uint8_t *)ha->gid_list,
2716 entries * sizeof(struct gid_list_info));
2712 2717
2713 /* Allocate temporary fcport for any new fcports discovered. */ 2718 /* Allocate temporary fcport for any new fcports discovered. */
2714 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2719 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2715 if (new_fcport == NULL) { 2720 if (new_fcport == NULL) {
2721 ql_log(ql_log_warn, vha, 0x2018,
2722 "Memory allocation failed for fcport.\n");
2716 rval = QLA_MEMORY_ALLOC_FAILED; 2723 rval = QLA_MEMORY_ALLOC_FAILED;
2717 goto cleanup_allocation; 2724 goto cleanup_allocation;
2718 } 2725 }
@@ -2726,9 +2733,9 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2726 fcport->port_type != FCT_BROADCAST && 2733 fcport->port_type != FCT_BROADCAST &&
2727 (fcport->flags & FCF_FABRIC_DEVICE) == 0) { 2734 (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
2728 2735
2729 DEBUG(printk("scsi(%ld): Marking port lost, " 2736 ql_dbg(ql_dbg_disc, vha, 0x2019,
2730 "loop_id=0x%04x\n", 2737 "Marking port lost loop_id=0x%04x.\n",
2731 vha->host_no, fcport->loop_id)); 2738 fcport->loop_id);
2732 2739
2733 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 2740 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2734 } 2741 }
@@ -2769,12 +2776,12 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2769 new_fcport->vp_idx = vha->vp_idx; 2776 new_fcport->vp_idx = vha->vp_idx;
2770 rval2 = qla2x00_get_port_database(vha, new_fcport, 0); 2777 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
2771 if (rval2 != QLA_SUCCESS) { 2778 if (rval2 != QLA_SUCCESS) {
2772 DEBUG2(printk("scsi(%ld): Failed to retrieve fcport " 2779 ql_dbg(ql_dbg_disc, vha, 0x201a,
2773 "information -- get_port_database=%x, " 2780 "Failed to retrieve fcport information "
2774 "loop_id=0x%04x\n", 2781 "-- get_port_database=%x, loop_id=0x%04x.\n",
2775 vha->host_no, rval2, new_fcport->loop_id)); 2782 rval2, new_fcport->loop_id);
2776 DEBUG2(printk("scsi(%ld): Scheduling resync...\n", 2783 ql_dbg(ql_dbg_disc, vha, 0x201b,
2777 vha->host_no)); 2784 "Scheduling resync.\n");
2778 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 2785 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2779 continue; 2786 continue;
2780 } 2787 }
@@ -2810,6 +2817,8 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2810 fcport = new_fcport; 2817 fcport = new_fcport;
2811 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2818 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2812 if (new_fcport == NULL) { 2819 if (new_fcport == NULL) {
2820 ql_log(ql_log_warn, vha, 0x201c,
2821 "Failed to allocate memory for fcport.\n");
2813 rval = QLA_MEMORY_ALLOC_FAILED; 2822 rval = QLA_MEMORY_ALLOC_FAILED;
2814 goto cleanup_allocation; 2823 goto cleanup_allocation;
2815 } 2824 }
@@ -2828,8 +2837,8 @@ cleanup_allocation:
2828 kfree(new_fcport); 2837 kfree(new_fcport);
2829 2838
2830 if (rval != QLA_SUCCESS) { 2839 if (rval != QLA_SUCCESS) {
2831 DEBUG2(printk("scsi(%ld): Configure local loop error exit: " 2840 ql_dbg(ql_dbg_disc, vha, 0x201d,
2832 "rval=%x\n", vha->host_no, rval)); 2841 "Configure local loop error exit: rval=%x.\n", rval);
2833 } 2842 }
2834 2843
2835 return (rval); 2844 return (rval);
@@ -2858,27 +2867,27 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2858 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed, 2867 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
2859 mb); 2868 mb);
2860 if (rval != QLA_SUCCESS) { 2869 if (rval != QLA_SUCCESS) {
2861 DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA " 2870 ql_dbg(ql_dbg_disc, vha, 0x2004,
2862 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n", 2871 "Unable to adjust iIDMA "
2863 vha->host_no, fcport->port_name[0], fcport->port_name[1], 2872 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x "
2873 "%04x.\n", fcport->port_name[0], fcport->port_name[1],
2864 fcport->port_name[2], fcport->port_name[3], 2874 fcport->port_name[2], fcport->port_name[3],
2865 fcport->port_name[4], fcport->port_name[5], 2875 fcport->port_name[4], fcport->port_name[5],
2866 fcport->port_name[6], fcport->port_name[7], rval, 2876 fcport->port_name[6], fcport->port_name[7], rval,
2867 fcport->fp_speed, mb[0], mb[1])); 2877 fcport->fp_speed, mb[0], mb[1]);
2868 } else { 2878 } else {
2869 link_speed = link_speeds[LS_UNKNOWN]; 2879 link_speed = link_speeds[LS_UNKNOWN];
2870 if (fcport->fp_speed < 5) 2880 if (fcport->fp_speed < 5)
2871 link_speed = link_speeds[fcport->fp_speed]; 2881 link_speed = link_speeds[fcport->fp_speed];
2872 else if (fcport->fp_speed == 0x13) 2882 else if (fcport->fp_speed == 0x13)
2873 link_speed = link_speeds[5]; 2883 link_speed = link_speeds[5];
2874 DEBUG2(qla_printk(KERN_INFO, ha, 2884 ql_dbg(ql_dbg_disc, vha, 0x2005,
2875 "iIDMA adjusted to %s GB/s on " 2885 "iIDMA adjusted to %s GB/s "
2876 "%02x%02x%02x%02x%02x%02x%02x%02x.\n", 2886 "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed,
2877 link_speed, fcport->port_name[0], 2887 fcport->port_name[0], fcport->port_name[1],
2878 fcport->port_name[1], fcport->port_name[2], 2888 fcport->port_name[2], fcport->port_name[3],
2879 fcport->port_name[3], fcport->port_name[4], 2889 fcport->port_name[4], fcport->port_name[5],
2880 fcport->port_name[5], fcport->port_name[6], 2890 fcport->port_name[6], fcport->port_name[7]);
2881 fcport->port_name[7]));
2882 } 2891 }
2883} 2892}
2884 2893
@@ -2887,7 +2896,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2887{ 2896{
2888 struct fc_rport_identifiers rport_ids; 2897 struct fc_rport_identifiers rport_ids;
2889 struct fc_rport *rport; 2898 struct fc_rport *rport;
2890 struct qla_hw_data *ha = vha->hw;
2891 unsigned long flags; 2899 unsigned long flags;
2892 2900
2893 qla2x00_rport_del(fcport); 2901 qla2x00_rport_del(fcport);
@@ -2899,8 +2907,8 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2899 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 2907 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2900 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids); 2908 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
2901 if (!rport) { 2909 if (!rport) {
2902 qla_printk(KERN_WARNING, ha, 2910 ql_log(ql_log_warn, vha, 0x2006,
2903 "Unable to allocate fc remote port!\n"); 2911 "Unable to allocate fc remote port.\n");
2904 return; 2912 return;
2905 } 2913 }
2906 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 2914 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
@@ -2975,8 +2983,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
2975 loop_id = SNS_FL_PORT; 2983 loop_id = SNS_FL_PORT;
2976 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1); 2984 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
2977 if (rval != QLA_SUCCESS) { 2985 if (rval != QLA_SUCCESS) {
2978 DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL " 2986 ql_dbg(ql_dbg_disc, vha, 0x201f,
2979 "Port\n", vha->host_no)); 2987 "MBX_GET_PORT_NAME failed, No FL Port.\n");
2980 2988
2981 vha->device_flags &= ~SWITCH_FOUND; 2989 vha->device_flags &= ~SWITCH_FOUND;
2982 return (QLA_SUCCESS); 2990 return (QLA_SUCCESS);
@@ -3003,32 +3011,32 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3003 ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, 3011 ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
3004 0xfc, mb, BIT_1 | BIT_0); 3012 0xfc, mb, BIT_1 | BIT_0);
3005 if (mb[0] != MBS_COMMAND_COMPLETE) { 3013 if (mb[0] != MBS_COMMAND_COMPLETE) {
3006 DEBUG2(qla_printk(KERN_INFO, ha, 3014 ql_dbg(ql_dbg_disc, vha, 0x2042,
3007 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " 3015 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
3008 "mb[2]=%x mb[6]=%x mb[7]=%x\n", loop_id, 3016 "mb[6]=%x mb[7]=%x.\n", loop_id, mb[0], mb[1],
3009 mb[0], mb[1], mb[2], mb[6], mb[7])); 3017 mb[2], mb[6], mb[7]);
3010 return (QLA_SUCCESS); 3018 return (QLA_SUCCESS);
3011 } 3019 }
3012 3020
3013 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) { 3021 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
3014 if (qla2x00_rft_id(vha)) { 3022 if (qla2x00_rft_id(vha)) {
3015 /* EMPTY */ 3023 /* EMPTY */
3016 DEBUG2(printk("scsi(%ld): Register FC-4 " 3024 ql_dbg(ql_dbg_disc, vha, 0x2045,
3017 "TYPE failed.\n", vha->host_no)); 3025 "Register FC-4 TYPE failed.\n");
3018 } 3026 }
3019 if (qla2x00_rff_id(vha)) { 3027 if (qla2x00_rff_id(vha)) {
3020 /* EMPTY */ 3028 /* EMPTY */
3021 DEBUG2(printk("scsi(%ld): Register FC-4 " 3029 ql_dbg(ql_dbg_disc, vha, 0x2049,
3022 "Features failed.\n", vha->host_no)); 3030 "Register FC-4 Features failed.\n");
3023 } 3031 }
3024 if (qla2x00_rnn_id(vha)) { 3032 if (qla2x00_rnn_id(vha)) {
3025 /* EMPTY */ 3033 /* EMPTY */
3026 DEBUG2(printk("scsi(%ld): Register Node Name " 3034 ql_dbg(ql_dbg_disc, vha, 0x204f,
3027 "failed.\n", vha->host_no)); 3035 "Register Node Name failed.\n");
3028 } else if (qla2x00_rsnn_nn(vha)) { 3036 } else if (qla2x00_rsnn_nn(vha)) {
3029 /* EMPTY */ 3037 /* EMPTY */
3030 DEBUG2(printk("scsi(%ld): Register Symbolic " 3038 ql_dbg(ql_dbg_disc, vha, 0x2053,
3031 "Node Name failed.\n", vha->host_no)); 3039 "Register Symobilic Node Name failed.\n");
3032 } 3040 }
3033 } 3041 }
3034 3042
@@ -3132,8 +3140,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3132 } 3140 }
3133 3141
3134 if (rval) { 3142 if (rval) {
3135 DEBUG2(printk("scsi(%ld): Configure fabric error exit: " 3143 ql_dbg(ql_dbg_disc, vha, 0x2068,
3136 "rval=%d\n", vha->host_no, rval)); 3144 "Configure fabric error exit rval=%d.\n", rval);
3137 } 3145 }
3138 3146
3139 return (rval); 3147 return (rval);
@@ -3175,8 +3183,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3175 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL); 3183 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL);
3176 if (!swl) { 3184 if (!swl) {
3177 /*EMPTY*/ 3185 /*EMPTY*/
3178 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback " 3186 ql_dbg(ql_dbg_disc, vha, 0x2054,
3179 "on GA_NXT\n", vha->host_no)); 3187 "GID_PT allocations failed, fallback on GA_NXT.\n");
3180 } else { 3188 } else {
3181 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) { 3189 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
3182 kfree(swl); 3190 kfree(swl);
@@ -3201,6 +3209,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3201 /* Allocate temporary fcport for any new fcports discovered. */ 3209 /* Allocate temporary fcport for any new fcports discovered. */
3202 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 3210 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3203 if (new_fcport == NULL) { 3211 if (new_fcport == NULL) {
3212 ql_log(ql_log_warn, vha, 0x205e,
3213 "Failed to allocate memory for fcport.\n");
3204 kfree(swl); 3214 kfree(swl);
3205 return (QLA_MEMORY_ALLOC_FAILED); 3215 return (QLA_MEMORY_ALLOC_FAILED);
3206 } 3216 }
@@ -3247,9 +3257,9 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3247 /* Send GA_NXT to the switch */ 3257 /* Send GA_NXT to the switch */
3248 rval = qla2x00_ga_nxt(vha, new_fcport); 3258 rval = qla2x00_ga_nxt(vha, new_fcport);
3249 if (rval != QLA_SUCCESS) { 3259 if (rval != QLA_SUCCESS) {
3250 qla_printk(KERN_WARNING, ha, 3260 ql_log(ql_log_warn, vha, 0x2064,
3251 "SNS scan failed -- assuming zero-entry " 3261 "SNS scan failed -- assuming "
3252 "result...\n"); 3262 "zero-entry result.\n");
3253 list_for_each_entry_safe(fcport, fcptemp, 3263 list_for_each_entry_safe(fcport, fcptemp,
3254 new_fcports, list) { 3264 new_fcports, list) {
3255 list_del(&fcport->list); 3265 list_del(&fcport->list);
@@ -3265,9 +3275,11 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3265 wrap.b24 = new_fcport->d_id.b24; 3275 wrap.b24 = new_fcport->d_id.b24;
3266 first_dev = 0; 3276 first_dev = 0;
3267 } else if (new_fcport->d_id.b24 == wrap.b24) { 3277 } else if (new_fcport->d_id.b24 == wrap.b24) {
3268 DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n", 3278 ql_dbg(ql_dbg_disc, vha, 0x2065,
3269 vha->host_no, new_fcport->d_id.b.domain, 3279 "Device wrap (%02x%02x%02x).\n",
3270 new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa)); 3280 new_fcport->d_id.b.domain,
3281 new_fcport->d_id.b.area,
3282 new_fcport->d_id.b.al_pa);
3271 break; 3283 break;
3272 } 3284 }
3273 3285
@@ -3372,6 +3384,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3372 nxt_d_id.b24 = new_fcport->d_id.b24; 3384 nxt_d_id.b24 = new_fcport->d_id.b24;
3373 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 3385 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3374 if (new_fcport == NULL) { 3386 if (new_fcport == NULL) {
3387 ql_log(ql_log_warn, vha, 0x2066,
3388 "Memory allocation failed for fcport.\n");
3375 kfree(swl); 3389 kfree(swl);
3376 return (QLA_MEMORY_ALLOC_FAILED); 3390 return (QLA_MEMORY_ALLOC_FAILED);
3377 } 3391 }
@@ -3501,10 +3515,10 @@ qla2x00_device_resync(scsi_qla_host_t *vha)
3501 d_id.b.area = MSB(LSW(rscn_entry)); 3515 d_id.b.area = MSB(LSW(rscn_entry));
3502 d_id.b.al_pa = LSB(LSW(rscn_entry)); 3516 d_id.b.al_pa = LSB(LSW(rscn_entry));
3503 3517
3504 DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = " 3518 ql_dbg(ql_dbg_disc, vha, 0x2020,
3505 "[%02x/%02x%02x%02x].\n", 3519 "RSCN queue entry[%d] = [%02x/%02x%02x%02x].\n",
3506 vha->host_no, vha->rscn_out_ptr, format, d_id.b.domain, 3520 vha->rscn_out_ptr, format, d_id.b.domain, d_id.b.area,
3507 d_id.b.area, d_id.b.al_pa)); 3521 d_id.b.al_pa);
3508 3522
3509 vha->rscn_out_ptr++; 3523 vha->rscn_out_ptr++;
3510 if (vha->rscn_out_ptr == MAX_RSCN_COUNT) 3524 if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
@@ -3520,17 +3534,17 @@ qla2x00_device_resync(scsi_qla_host_t *vha)
3520 if (rscn_entry != vha->rscn_queue[rscn_out_iter]) 3534 if (rscn_entry != vha->rscn_queue[rscn_out_iter])
3521 break; 3535 break;
3522 3536
3523 DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue " 3537 ql_dbg(ql_dbg_disc, vha, 0x2021,
3524 "entry found at [%d].\n", vha->host_no, 3538 "Skipping duplicate RSCN queue entry found at "
3525 rscn_out_iter)); 3539 "[%d].\n", rscn_out_iter);
3526 3540
3527 vha->rscn_out_ptr = rscn_out_iter; 3541 vha->rscn_out_ptr = rscn_out_iter;
3528 } 3542 }
3529 3543
3530 /* Queue overflow, set switch default case. */ 3544 /* Queue overflow, set switch default case. */
3531 if (vha->flags.rscn_queue_overflow) { 3545 if (vha->flags.rscn_queue_overflow) {
3532 DEBUG(printk("scsi(%ld): device_resync: rscn " 3546 ql_dbg(ql_dbg_disc, vha, 0x2022,
3533 "overflow.\n", vha->host_no)); 3547 "device_resync: rscn overflow.\n");
3534 3548
3535 format = 3; 3549 format = 3;
3536 vha->flags.rscn_queue_overflow = 0; 3550 vha->flags.rscn_queue_overflow = 0;
@@ -3659,10 +3673,11 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3659 tmp_loopid = 0; 3673 tmp_loopid = 0;
3660 3674
3661 for (;;) { 3675 for (;;) {
3662 DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x " 3676 ql_dbg(ql_dbg_disc, vha, 0x2000,
3663 "for port %02x%02x%02x.\n", 3677 "Trying Fabric Login w/loop id 0x%04x for port "
3664 vha->host_no, fcport->loop_id, fcport->d_id.b.domain, 3678 "%02x%02x%02x.\n",
3665 fcport->d_id.b.area, fcport->d_id.b.al_pa)); 3679 fcport->loop_id, fcport->d_id.b.domain,
3680 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3666 3681
3667 /* Login fcport on switch. */ 3682 /* Login fcport on switch. */
3668 ha->isp_ops->fabric_login(vha, fcport->loop_id, 3683 ha->isp_ops->fabric_login(vha, fcport->loop_id,
@@ -3680,10 +3695,11 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3680 tmp_loopid = fcport->loop_id; 3695 tmp_loopid = fcport->loop_id;
3681 fcport->loop_id = mb[1]; 3696 fcport->loop_id = mb[1];
3682 3697
3683 DEBUG(printk("Fabric Login: port in use - next " 3698 ql_dbg(ql_dbg_disc, vha, 0x2001,
3684 "loop id=0x%04x, port Id=%02x%02x%02x.\n", 3699 "Fabric Login: port in use - next loop "
3700 "id=0x%04x, port id= %02x%02x%02x.\n",
3685 fcport->loop_id, fcport->d_id.b.domain, 3701 fcport->loop_id, fcport->d_id.b.domain,
3686 fcport->d_id.b.area, fcport->d_id.b.al_pa)); 3702 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3687 3703
3688 } else if (mb[0] == MBS_COMMAND_COMPLETE) { 3704 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
3689 /* 3705 /*
@@ -3744,11 +3760,11 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3744 /* 3760 /*
3745 * unrecoverable / not handled error 3761 * unrecoverable / not handled error
3746 */ 3762 */
3747 DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x " 3763 ql_dbg(ql_dbg_disc, vha, 0x2002,
3748 "loop_id=%x jiffies=%lx.\n", 3764 "Failed=%x port_id=%02x%02x%02x loop_id=%x "
3749 __func__, vha->host_no, mb[0], 3765 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
3750 fcport->d_id.b.domain, fcport->d_id.b.area, 3766 fcport->d_id.b.area, fcport->d_id.b.al_pa,
3751 fcport->d_id.b.al_pa, fcport->loop_id, jiffies)); 3767 fcport->loop_id, jiffies);
3752 3768
3753 *next_loopid = fcport->loop_id; 3769 *next_loopid = fcport->loop_id;
3754 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3770 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
@@ -3852,7 +3868,8 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
3852 return (QLA_FUNCTION_FAILED); 3868 return (QLA_FUNCTION_FAILED);
3853 3869
3854 if (rval) 3870 if (rval)
3855 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__)); 3871 ql_dbg(ql_dbg_disc, vha, 0x206c,
3872 "%s *** FAILED ***.\n", __func__);
3856 3873
3857 return (rval); 3874 return (rval);
3858} 3875}
@@ -3929,8 +3946,8 @@ qla82xx_quiescent_state_cleanup(scsi_qla_host_t *vha)
3929 struct qla_hw_data *ha = vha->hw; 3946 struct qla_hw_data *ha = vha->hw;
3930 struct scsi_qla_host *vp; 3947 struct scsi_qla_host *vp;
3931 3948
3932 qla_printk(KERN_INFO, ha, 3949 ql_dbg(ql_dbg_p3p, vha, 0xb002,
3933 "Performing ISP error recovery - ha= %p.\n", ha); 3950 "Performing ISP error recovery - ha=%p.\n", ha);
3934 3951
3935 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 3952 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
3936 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 3953 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -3964,8 +3981,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3964 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3981 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3965 ha->qla_stats.total_isp_aborts++; 3982 ha->qla_stats.total_isp_aborts++;
3966 3983
3967 qla_printk(KERN_INFO, ha, 3984 ql_log(ql_log_info, vha, 0x00af,
3968 "Performing ISP error recovery - ha= %p.\n", ha); 3985 "Performing ISP error recovery - ha=%p.\n", ha);
3969 3986
3970 /* For ISP82XX, reset_chip is just disabling interrupts. 3987 /* For ISP82XX, reset_chip is just disabling interrupts.
3971 * Driver waits for the completion of the commands. 3988 * Driver waits for the completion of the commands.
@@ -4016,6 +4033,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
4016 /* Make sure for ISP 82XX IO DMA is complete */ 4033 /* Make sure for ISP 82XX IO DMA is complete */
4017 if (IS_QLA82XX(ha)) { 4034 if (IS_QLA82XX(ha)) {
4018 qla82xx_chip_reset_cleanup(vha); 4035 qla82xx_chip_reset_cleanup(vha);
4036 ql_log(ql_log_info, vha, 0x00b4,
4037 "Done chip reset cleanup.\n");
4019 4038
4020 /* Done waiting for pending commands. 4039 /* Done waiting for pending commands.
4021 * Reset the online flag. 4040 * Reset the online flag.
@@ -4097,7 +4116,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4097 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 4116 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
4098 &ha->fce_bufs); 4117 &ha->fce_bufs);
4099 if (rval) { 4118 if (rval) {
4100 qla_printk(KERN_WARNING, ha, 4119 ql_log(ql_log_warn, vha, 0x8033,
4101 "Unable to reinitialize FCE " 4120 "Unable to reinitialize FCE "
4102 "(%d).\n", rval); 4121 "(%d).\n", rval);
4103 ha->flags.fce_enabled = 0; 4122 ha->flags.fce_enabled = 0;
@@ -4109,7 +4128,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4109 rval = qla2x00_enable_eft_trace(vha, 4128 rval = qla2x00_enable_eft_trace(vha,
4110 ha->eft_dma, EFT_NUM_BUFFERS); 4129 ha->eft_dma, EFT_NUM_BUFFERS);
4111 if (rval) { 4130 if (rval) {
4112 qla_printk(KERN_WARNING, ha, 4131 ql_log(ql_log_warn, vha, 0x8034,
4113 "Unable to reinitialize EFT " 4132 "Unable to reinitialize EFT "
4114 "(%d).\n", rval); 4133 "(%d).\n", rval);
4115 } 4134 }
@@ -4118,9 +4137,9 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4118 vha->flags.online = 1; 4137 vha->flags.online = 1;
4119 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 4138 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
4120 if (ha->isp_abort_cnt == 0) { 4139 if (ha->isp_abort_cnt == 0) {
4121 qla_printk(KERN_WARNING, ha, 4140 ql_log(ql_log_fatal, vha, 0x8035,
4122 "ISP error recovery failed - " 4141 "ISP error recover failed - "
4123 "board disabled\n"); 4142 "board disabled.\n");
4124 /* 4143 /*
4125 * The next call disables the board 4144 * The next call disables the board
4126 * completely. 4145 * completely.
@@ -4132,16 +4151,16 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4132 status = 0; 4151 status = 0;
4133 } else { /* schedule another ISP abort */ 4152 } else { /* schedule another ISP abort */
4134 ha->isp_abort_cnt--; 4153 ha->isp_abort_cnt--;
4135 DEBUG(printk("qla%ld: ISP abort - " 4154 ql_dbg(ql_dbg_taskm, vha, 0x8020,
4136 "retry remaining %d\n", 4155 "ISP abort - retry remaining %d.\n",
4137 vha->host_no, ha->isp_abort_cnt)); 4156 ha->isp_abort_cnt);
4138 status = 1; 4157 status = 1;
4139 } 4158 }
4140 } else { 4159 } else {
4141 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 4160 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
4142 DEBUG(printk("qla2x00(%ld): ISP error recovery " 4161 ql_dbg(ql_dbg_taskm, vha, 0x8021,
4143 "- retrying (%d) more times\n", 4162 "ISP error recovery - retrying (%d) "
4144 vha->host_no, ha->isp_abort_cnt)); 4163 "more times.\n", ha->isp_abort_cnt);
4145 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 4164 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4146 status = 1; 4165 status = 1;
4147 } 4166 }
@@ -4150,9 +4169,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4150 } 4169 }
4151 4170
4152 if (!status) { 4171 if (!status) {
4153 DEBUG(printk(KERN_INFO 4172 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
4154 "qla2x00_abort_isp(%ld): succeeded.\n",
4155 vha->host_no));
4156 4173
4157 spin_lock_irqsave(&ha->vport_slock, flags); 4174 spin_lock_irqsave(&ha->vport_slock, flags);
4158 list_for_each_entry(vp, &ha->vp_list, list) { 4175 list_for_each_entry(vp, &ha->vp_list, list) {
@@ -4169,8 +4186,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4169 spin_unlock_irqrestore(&ha->vport_slock, flags); 4186 spin_unlock_irqrestore(&ha->vport_slock, flags);
4170 4187
4171 } else { 4188 } else {
4172 qla_printk(KERN_INFO, ha, 4189 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n");
4173 "qla2x00_abort_isp: **** FAILED ****\n");
4174 } 4190 }
4175 4191
4176 return(status); 4192 return(status);
@@ -4211,8 +4227,8 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
4211 4227
4212 status = qla2x00_fw_ready(vha); 4228 status = qla2x00_fw_ready(vha);
4213 if (!status) { 4229 if (!status) {
4214 DEBUG(printk("%s(): Start configure loop, " 4230 ql_dbg(ql_dbg_taskm, vha, 0x8031,
4215 "status = %d\n", __func__, status)); 4231 "Start configure loop status = %d.\n", status);
4216 4232
4217 /* Issue a marker after FW becomes ready. */ 4233 /* Issue a marker after FW becomes ready. */
4218 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 4234 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
@@ -4234,9 +4250,8 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
4234 if ((vha->device_flags & DFLG_NO_CABLE)) 4250 if ((vha->device_flags & DFLG_NO_CABLE))
4235 status = 0; 4251 status = 0;
4236 4252
4237 DEBUG(printk("%s(): Configure loop done, status = 0x%x\n", 4253 ql_dbg(ql_dbg_taskm, vha, 0x8032,
4238 __func__, 4254 "Configure loop done, status = 0x%x.\n", status);
4239 status));
4240 } 4255 }
4241 return (status); 4256 return (status);
4242} 4257}
@@ -4256,13 +4271,13 @@ qla25xx_init_queues(struct qla_hw_data *ha)
4256 rsp->options &= ~BIT_0; 4271 rsp->options &= ~BIT_0;
4257 ret = qla25xx_init_rsp_que(base_vha, rsp); 4272 ret = qla25xx_init_rsp_que(base_vha, rsp);
4258 if (ret != QLA_SUCCESS) 4273 if (ret != QLA_SUCCESS)
4259 DEBUG2_17(printk(KERN_WARNING 4274 ql_dbg(ql_dbg_init, base_vha, 0x00ff,
4260 "%s Rsp que:%d init failed\n", __func__, 4275 "%s Rsp que: %d init failed.\n",
4261 rsp->id)); 4276 __func__, rsp->id);
4262 else 4277 else
4263 DEBUG2_17(printk(KERN_INFO 4278 ql_dbg(ql_dbg_init, base_vha, 0x0100,
4264 "%s Rsp que:%d inited\n", __func__, 4279 "%s Rsp que: %d inited.\n",
4265 rsp->id)); 4280 __func__, rsp->id);
4266 } 4281 }
4267 } 4282 }
4268 for (i = 1; i < ha->max_req_queues; i++) { 4283 for (i = 1; i < ha->max_req_queues; i++) {
@@ -4272,13 +4287,13 @@ qla25xx_init_queues(struct qla_hw_data *ha)
4272 req->options &= ~BIT_0; 4287 req->options &= ~BIT_0;
4273 ret = qla25xx_init_req_que(base_vha, req); 4288 ret = qla25xx_init_req_que(base_vha, req);
4274 if (ret != QLA_SUCCESS) 4289 if (ret != QLA_SUCCESS)
4275 DEBUG2_17(printk(KERN_WARNING 4290 ql_dbg(ql_dbg_init, base_vha, 0x0101,
4276 "%s Req que:%d init failed\n", __func__, 4291 "%s Req que: %d init failed.\n",
4277 req->id)); 4292 __func__, req->id);
4278 else 4293 else
4279 DEBUG2_17(printk(KERN_WARNING 4294 ql_dbg(ql_dbg_init, base_vha, 0x0102,
4280 "%s Req que:%d inited\n", __func__, 4295 "%s Req que: %d inited.\n",
4281 req->id)); 4296 __func__, req->id);
4282 } 4297 }
4283 } 4298 }
4284 return ret; 4299 return ret;
@@ -4397,19 +4412,22 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4397 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 4412 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
4398 chksum += le32_to_cpu(*dptr++); 4413 chksum += le32_to_cpu(*dptr++);
4399 4414
4400 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); 4415 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
4401 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 4416 "Contents of NVRAM\n");
4417 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
4418 (uint8_t *)nv, ha->nvram_size);
4402 4419
4403 /* Bad NVRAM data, set defaults parameters. */ 4420 /* Bad NVRAM data, set defaults parameters. */
4404 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' 4421 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
4405 || nv->id[3] != ' ' || 4422 || nv->id[3] != ' ' ||
4406 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { 4423 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
4407 /* Reset NVRAM data. */ 4424 /* Reset NVRAM data. */
4408 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 4425 ql_log(ql_log_warn, vha, 0x006b,
4409 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 4426 "Inconisistent NVRAM detected: checksum=0x%x id=%c "
4410 le16_to_cpu(nv->nvram_version)); 4427 "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
4411 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 4428 ql_log(ql_log_warn, vha, 0x006c,
4412 "invalid -- WWPN) defaults.\n"); 4429 "Falling back to functioning (yet invalid -- WWPN) "
4430 "defaults.\n");
4413 4431
4414 /* 4432 /*
4415 * Set default initialization control block. 4433 * Set default initialization control block.
@@ -4587,10 +4605,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4587 if (ha->zio_mode != QLA_ZIO_DISABLED) { 4605 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4588 ha->zio_mode = QLA_ZIO_MODE_6; 4606 ha->zio_mode = QLA_ZIO_MODE_6;
4589 4607
4590 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay " 4608 ql_log(ql_log_info, vha, 0x006f,
4591 "(%d us).\n", vha->host_no, ha->zio_mode,
4592 ha->zio_timer * 100));
4593 qla_printk(KERN_INFO, ha,
4594 "ZIO mode %d enabled; timer delay (%d us).\n", 4609 "ZIO mode %d enabled; timer delay (%d us).\n",
4595 ha->zio_mode, ha->zio_timer * 100); 4610 ha->zio_mode, ha->zio_timer * 100);
4596 4611
@@ -4601,8 +4616,8 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4601 } 4616 }
4602 4617
4603 if (rval) { 4618 if (rval) {
4604 DEBUG2_3(printk(KERN_WARNING 4619 ql_log(ql_log_warn, vha, 0x0070,
4605 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no)); 4620 "NVRAM configuration failed.\n");
4606 } 4621 }
4607 return (rval); 4622 return (rval);
4608} 4623}
@@ -4620,8 +4635,8 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
4620 struct qla_hw_data *ha = vha->hw; 4635 struct qla_hw_data *ha = vha->hw;
4621 struct req_que *req = ha->req_q_map[0]; 4636 struct req_que *req = ha->req_q_map[0];
4622 4637
4623 qla_printk(KERN_INFO, ha, 4638 ql_dbg(ql_dbg_init, vha, 0x008b,
4624 "FW: Loading from flash (%x)...\n", faddr); 4639 "Loading firmware from flash (%x).\n", faddr);
4625 4640
4626 rval = QLA_SUCCESS; 4641 rval = QLA_SUCCESS;
4627 4642
@@ -4637,11 +4652,12 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
4637 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 4652 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
4638 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 4653 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
4639 dcode[3] == 0)) { 4654 dcode[3] == 0)) {
4640 qla_printk(KERN_WARNING, ha, 4655 ql_log(ql_log_fatal, vha, 0x008c,
4641 "Unable to verify integrity of flash firmware image!\n"); 4656 "Unable to verify the integrity of flash firmware "
4642 qla_printk(KERN_WARNING, ha, 4657 "image.\n");
4643 "Firmware data: %08x %08x %08x %08x!\n", dcode[0], 4658 ql_log(ql_log_fatal, vha, 0x008d,
4644 dcode[1], dcode[2], dcode[3]); 4659 "Firmware data: %08x %08x %08x %08x.\n",
4660 dcode[0], dcode[1], dcode[2], dcode[3]);
4645 4661
4646 return QLA_FUNCTION_FAILED; 4662 return QLA_FUNCTION_FAILED;
4647 } 4663 }
@@ -4660,9 +4676,10 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
4660 if (dlen > risc_size) 4676 if (dlen > risc_size)
4661 dlen = risc_size; 4677 dlen = risc_size;
4662 4678
4663 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 4679 ql_dbg(ql_dbg_init, vha, 0x008e,
4664 "addr %x, number of dwords 0x%x, offset 0x%x.\n", 4680 "Loading risc segment@ risc addr %x "
4665 vha->host_no, risc_addr, dlen, faddr)); 4681 "number of dwords 0x%x offset 0x%x.\n",
4682 risc_addr, dlen, faddr);
4666 4683
4667 qla24xx_read_flash_data(vha, dcode, faddr, dlen); 4684 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
4668 for (i = 0; i < dlen; i++) 4685 for (i = 0; i < dlen; i++)
@@ -4671,12 +4688,9 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
4671 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 4688 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4672 dlen); 4689 dlen);
4673 if (rval) { 4690 if (rval) {
4674 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 4691 ql_log(ql_log_fatal, vha, 0x008f,
4675 "segment %d of firmware\n", vha->host_no, 4692 "Failed to load segment %d of firmware.\n",
4676 fragment)); 4693 fragment);
4677 qla_printk(KERN_WARNING, ha,
4678 "[ERROR] Failed to load segment %d of "
4679 "firmware\n", fragment);
4680 break; 4694 break;
4681 } 4695 }
4682 4696
@@ -4709,9 +4723,10 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4709 /* Load firmware blob. */ 4723 /* Load firmware blob. */
4710 blob = qla2x00_request_firmware(vha); 4724 blob = qla2x00_request_firmware(vha);
4711 if (!blob) { 4725 if (!blob) {
4712 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n"); 4726 ql_log(ql_log_info, vha, 0x0083,
4713 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " 4727 "Fimware image unavailable.\n");
4714 "from: " QLA_FW_URL ".\n"); 4728 ql_log(ql_log_info, vha, 0x0084,
4729 "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
4715 return QLA_FUNCTION_FAILED; 4730 return QLA_FUNCTION_FAILED;
4716 } 4731 }
4717 4732
@@ -4724,8 +4739,8 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4724 4739
4725 /* Validate firmware image by checking version. */ 4740 /* Validate firmware image by checking version. */
4726 if (blob->fw->size < 8 * sizeof(uint16_t)) { 4741 if (blob->fw->size < 8 * sizeof(uint16_t)) {
4727 qla_printk(KERN_WARNING, ha, 4742 ql_log(ql_log_fatal, vha, 0x0085,
4728 "Unable to verify integrity of firmware image (%Zd)!\n", 4743 "Unable to verify integrity of firmware image (%Zd).\n",
4729 blob->fw->size); 4744 blob->fw->size);
4730 goto fail_fw_integrity; 4745 goto fail_fw_integrity;
4731 } 4746 }
@@ -4734,11 +4749,11 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4734 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff && 4749 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
4735 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 && 4750 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
4736 wcode[2] == 0 && wcode[3] == 0)) { 4751 wcode[2] == 0 && wcode[3] == 0)) {
4737 qla_printk(KERN_WARNING, ha, 4752 ql_log(ql_log_fatal, vha, 0x0086,
4738 "Unable to verify integrity of firmware image!\n"); 4753 "Unable to verify integrity of firmware image.\n");
4739 qla_printk(KERN_WARNING, ha, 4754 ql_log(ql_log_fatal, vha, 0x0087,
4740 "Firmware data: %04x %04x %04x %04x!\n", wcode[0], 4755 "Firmware data: %04x %04x %04x %04x.\n",
4741 wcode[1], wcode[2], wcode[3]); 4756 wcode[0], wcode[1], wcode[2], wcode[3]);
4742 goto fail_fw_integrity; 4757 goto fail_fw_integrity;
4743 } 4758 }
4744 4759
@@ -4751,9 +4766,9 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4751 /* Validate firmware image size. */ 4766 /* Validate firmware image size. */
4752 fwclen += risc_size * sizeof(uint16_t); 4767 fwclen += risc_size * sizeof(uint16_t);
4753 if (blob->fw->size < fwclen) { 4768 if (blob->fw->size < fwclen) {
4754 qla_printk(KERN_WARNING, ha, 4769 ql_log(ql_log_fatal, vha, 0x0088,
4755 "Unable to verify integrity of firmware image " 4770 "Unable to verify integrity of firmware image "
4756 "(%Zd)!\n", blob->fw->size); 4771 "(%Zd).\n", blob->fw->size);
4757 goto fail_fw_integrity; 4772 goto fail_fw_integrity;
4758 } 4773 }
4759 4774
@@ -4762,10 +4777,9 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4762 wlen = (uint16_t)(ha->fw_transfer_size >> 1); 4777 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
4763 if (wlen > risc_size) 4778 if (wlen > risc_size)
4764 wlen = risc_size; 4779 wlen = risc_size;
4765 4780 ql_dbg(ql_dbg_init, vha, 0x0089,
4766 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 4781 "Loading risc segment@ risc addr %x number of "
4767 "addr %x, number of words 0x%x.\n", vha->host_no, 4782 "words 0x%x.\n", risc_addr, wlen);
4768 risc_addr, wlen));
4769 4783
4770 for (i = 0; i < wlen; i++) 4784 for (i = 0; i < wlen; i++)
4771 wcode[i] = swab16(fwcode[i]); 4785 wcode[i] = swab16(fwcode[i]);
@@ -4773,12 +4787,9 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4773 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 4787 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4774 wlen); 4788 wlen);
4775 if (rval) { 4789 if (rval) {
4776 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 4790 ql_log(ql_log_fatal, vha, 0x008a,
4777 "segment %d of firmware\n", vha->host_no, 4791 "Failed to load segment %d of firmware.\n",
4778 fragment)); 4792 fragment);
4779 qla_printk(KERN_WARNING, ha,
4780 "[ERROR] Failed to load segment %d of "
4781 "firmware\n", fragment);
4782 break; 4793 break;
4783 } 4794 }
4784 4795
@@ -4814,15 +4825,17 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4814 /* Load firmware blob. */ 4825 /* Load firmware blob. */
4815 blob = qla2x00_request_firmware(vha); 4826 blob = qla2x00_request_firmware(vha);
4816 if (!blob) { 4827 if (!blob) {
4817 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n"); 4828 ql_log(ql_log_warn, vha, 0x0090,
4818 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " 4829 "Fimware image unavailable.\n");
4819 "from: " QLA_FW_URL ".\n"); 4830 ql_log(ql_log_warn, vha, 0x0091,
4831 "Firmware images can be retrieved from: "
4832 QLA_FW_URL ".\n");
4820 4833
4821 return QLA_FUNCTION_FAILED; 4834 return QLA_FUNCTION_FAILED;
4822 } 4835 }
4823 4836
4824 qla_printk(KERN_INFO, ha, 4837 ql_log(ql_log_info, vha, 0x0092,
4825 "FW: Loading via request-firmware...\n"); 4838 "Loading via request-firmware.\n");
4826 4839
4827 rval = QLA_SUCCESS; 4840 rval = QLA_SUCCESS;
4828 4841
@@ -4834,8 +4847,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4834 4847
4835 /* Validate firmware image by checking version. */ 4848 /* Validate firmware image by checking version. */
4836 if (blob->fw->size < 8 * sizeof(uint32_t)) { 4849 if (blob->fw->size < 8 * sizeof(uint32_t)) {
4837 qla_printk(KERN_WARNING, ha, 4850 ql_log(ql_log_fatal, vha, 0x0093,
4838 "Unable to verify integrity of firmware image (%Zd)!\n", 4851 "Unable to verify integrity of firmware image (%Zd).\n",
4839 blob->fw->size); 4852 blob->fw->size);
4840 goto fail_fw_integrity; 4853 goto fail_fw_integrity;
4841 } 4854 }
@@ -4845,11 +4858,12 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4845 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 4858 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
4846 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 4859 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
4847 dcode[3] == 0)) { 4860 dcode[3] == 0)) {
4848 qla_printk(KERN_WARNING, ha, 4861 ql_log(ql_log_fatal, vha, 0x0094,
4849 "Unable to verify integrity of firmware image!\n"); 4862 "Unable to verify integrity of firmware image (%Zd).\n",
4850 qla_printk(KERN_WARNING, ha, 4863 blob->fw->size);
4851 "Firmware data: %08x %08x %08x %08x!\n", dcode[0], 4864 ql_log(ql_log_fatal, vha, 0x0095,
4852 dcode[1], dcode[2], dcode[3]); 4865 "Firmware data: %08x %08x %08x %08x.\n",
4866 dcode[0], dcode[1], dcode[2], dcode[3]);
4853 goto fail_fw_integrity; 4867 goto fail_fw_integrity;
4854 } 4868 }
4855 4869
@@ -4861,9 +4875,9 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4861 /* Validate firmware image size. */ 4875 /* Validate firmware image size. */
4862 fwclen += risc_size * sizeof(uint32_t); 4876 fwclen += risc_size * sizeof(uint32_t);
4863 if (blob->fw->size < fwclen) { 4877 if (blob->fw->size < fwclen) {
4864 qla_printk(KERN_WARNING, ha, 4878 ql_log(ql_log_fatal, vha, 0x0096,
4865 "Unable to verify integrity of firmware image " 4879 "Unable to verify integrity of firmware image "
4866 "(%Zd)!\n", blob->fw->size); 4880 "(%Zd).\n", blob->fw->size);
4867 4881
4868 goto fail_fw_integrity; 4882 goto fail_fw_integrity;
4869 } 4883 }
@@ -4874,9 +4888,9 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4874 if (dlen > risc_size) 4888 if (dlen > risc_size)
4875 dlen = risc_size; 4889 dlen = risc_size;
4876 4890
4877 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 4891 ql_dbg(ql_dbg_init, vha, 0x0097,
4878 "addr %x, number of dwords 0x%x.\n", vha->host_no, 4892 "Loading risc segment@ risc addr %x "
4879 risc_addr, dlen)); 4893 "number of dwords 0x%x.\n", risc_addr, dlen);
4880 4894
4881 for (i = 0; i < dlen; i++) 4895 for (i = 0; i < dlen; i++)
4882 dcode[i] = swab32(fwcode[i]); 4896 dcode[i] = swab32(fwcode[i]);
@@ -4884,12 +4898,9 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4884 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 4898 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4885 dlen); 4899 dlen);
4886 if (rval) { 4900 if (rval) {
4887 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 4901 ql_log(ql_log_fatal, vha, 0x0098,
4888 "segment %d of firmware\n", vha->host_no, 4902 "Failed to load segment %d of firmware.\n",
4889 fragment)); 4903 fragment);
4890 qla_printk(KERN_WARNING, ha,
4891 "[ERROR] Failed to load segment %d of "
4892 "firmware\n", fragment);
4893 break; 4904 break;
4894 } 4905 }
4895 4906
@@ -4953,14 +4964,13 @@ try_blob_fw:
4953 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw) 4964 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
4954 return rval; 4965 return rval;
4955 4966
4956 qla_printk(KERN_ERR, ha, 4967 ql_log(ql_log_info, vha, 0x0099,
4957 "FW: Attempting to fallback to golden firmware...\n"); 4968 "Attempting to fallback to golden firmware.\n");
4958 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); 4969 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
4959 if (rval != QLA_SUCCESS) 4970 if (rval != QLA_SUCCESS)
4960 return rval; 4971 return rval;
4961 4972
4962 qla_printk(KERN_ERR, ha, 4973 ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
4963 "FW: Please update operational firmware...\n");
4964 ha->flags.running_gold_fw = 1; 4974 ha->flags.running_gold_fw = 1;
4965 4975
4966 return rval; 4976 return rval;
@@ -4987,8 +4997,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4987 continue; 4997 continue;
4988 if (qla2x00_setup_chip(vha) != QLA_SUCCESS) 4998 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
4989 continue; 4999 continue;
4990 qla_printk(KERN_INFO, ha, 5000 ql_log(ql_log_info, vha, 0x8015,
4991 "Attempting retry of stop-firmware command...\n"); 5001 "Attempting retry of stop-firmware command.\n");
4992 ret = qla2x00_stop_firmware(vha); 5002 ret = qla2x00_stop_firmware(vha);
4993 } 5003 }
4994} 5004}
@@ -5023,10 +5033,10 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
5023 /* Login to SNS first */ 5033 /* Login to SNS first */
5024 ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1); 5034 ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1);
5025 if (mb[0] != MBS_COMMAND_COMPLETE) { 5035 if (mb[0] != MBS_COMMAND_COMPLETE) {
5026 DEBUG15(qla_printk(KERN_INFO, ha, 5036 ql_dbg(ql_dbg_init, vha, 0x0103,
5027 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " 5037 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
5028 "mb[2]=%x mb[6]=%x mb[7]=%x\n", NPH_SNS, 5038 "mb[6]=%x mb[7]=%x.\n",
5029 mb[0], mb[1], mb[2], mb[6], mb[7])); 5039 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
5030 return (QLA_FUNCTION_FAILED); 5040 return (QLA_FUNCTION_FAILED);
5031 } 5041 }
5032 5042
@@ -5146,19 +5156,23 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5146 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 5156 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
5147 chksum += le32_to_cpu(*dptr++); 5157 chksum += le32_to_cpu(*dptr++);
5148 5158
5149 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); 5159 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
5150 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 5160 "Contents of NVRAM:\n");
5161 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
5162 (uint8_t *)nv, ha->nvram_size);
5151 5163
5152 /* Bad NVRAM data, set defaults parameters. */ 5164 /* Bad NVRAM data, set defaults parameters. */
5153 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' 5165 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
5154 || nv->id[3] != ' ' || 5166 || nv->id[3] != ' ' ||
5155 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { 5167 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
5156 /* Reset NVRAM data. */ 5168 /* Reset NVRAM data. */
5157 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 5169 ql_log(ql_log_info, vha, 0x0073,
5158 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 5170 "Inconisistent NVRAM detected: checksum=0x%x id=%c "
5171 "version=0x%x.\n", chksum, nv->id[0],
5159 le16_to_cpu(nv->nvram_version)); 5172 le16_to_cpu(nv->nvram_version));
5160 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 5173 ql_log(ql_log_info, vha, 0x0074,
5161 "invalid -- WWPN) defaults.\n"); 5174 "Falling back to functioning (yet invalid -- WWPN) "
5175 "defaults.\n");
5162 5176
5163 /* 5177 /*
5164 * Set default initialization control block. 5178 * Set default initialization control block.
@@ -5350,12 +5364,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5350 if (ha->zio_mode != QLA_ZIO_DISABLED) { 5364 if (ha->zio_mode != QLA_ZIO_DISABLED) {
5351 ha->zio_mode = QLA_ZIO_MODE_6; 5365 ha->zio_mode = QLA_ZIO_MODE_6;
5352 5366
5353 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay " 5367 ql_log(ql_log_info, vha, 0x0075,
5354 "(%d us).\n", vha->host_no, ha->zio_mode,
5355 ha->zio_timer * 100));
5356 qla_printk(KERN_INFO, ha,
5357 "ZIO mode %d enabled; timer delay (%d us).\n", 5368 "ZIO mode %d enabled; timer delay (%d us).\n",
5358 ha->zio_mode, ha->zio_timer * 100); 5369 ha->zio_mode,
5370 ha->zio_timer * 100);
5359 5371
5360 icb->firmware_options_2 |= cpu_to_le32( 5372 icb->firmware_options_2 |= cpu_to_le32(
5361 (uint32_t)ha->zio_mode); 5373 (uint32_t)ha->zio_mode);
@@ -5364,8 +5376,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5364 } 5376 }
5365 5377
5366 if (rval) { 5378 if (rval) {
5367 DEBUG2_3(printk(KERN_WARNING 5379 ql_log(ql_log_warn, vha, 0x0076,
5368 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no)); 5380 "NVRAM configuration failed.\n");
5369 } 5381 }
5370 return (rval); 5382 return (rval);
5371} 5383}
@@ -5388,9 +5400,8 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5388 5400
5389 status = qla2x00_fw_ready(vha); 5401 status = qla2x00_fw_ready(vha);
5390 if (!status) { 5402 if (!status) {
5391 qla_printk(KERN_INFO, ha, 5403 ql_log(ql_log_info, vha, 0x803c,
5392 "%s(): Start configure loop, " 5404 "Start configure loop, status =%d.\n", status);
5393 "status = %d\n", __func__, status);
5394 5405
5395 /* Issue a marker after FW becomes ready. */ 5406 /* Issue a marker after FW becomes ready. */
5396 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 5407 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
@@ -5412,9 +5423,8 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5412 if ((vha->device_flags & DFLG_NO_CABLE)) 5423 if ((vha->device_flags & DFLG_NO_CABLE))
5413 status = 0; 5424 status = 0;
5414 5425
5415 qla_printk(KERN_INFO, ha, 5426 ql_log(ql_log_info, vha, 0x803d,
5416 "%s(): Configure loop done, status = 0x%x\n", 5427 "Configure loop done, status = 0x%x.\n", status);
5417 __func__, status);
5418 } 5428 }
5419 5429
5420 if (!status) { 5430 if (!status) {
@@ -5450,9 +5460,9 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5450 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 5460 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
5451 &ha->fce_bufs); 5461 &ha->fce_bufs);
5452 if (rval) { 5462 if (rval) {
5453 qla_printk(KERN_WARNING, ha, 5463 ql_log(ql_log_warn, vha, 0x803e,
5454 "Unable to reinitialize FCE " 5464 "Unable to reinitialize FCE (%d).\n",
5455 "(%d).\n", rval); 5465 rval);
5456 ha->flags.fce_enabled = 0; 5466 ha->flags.fce_enabled = 0;
5457 } 5467 }
5458 } 5468 }
@@ -5462,17 +5472,16 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5462 rval = qla2x00_enable_eft_trace(vha, 5472 rval = qla2x00_enable_eft_trace(vha,
5463 ha->eft_dma, EFT_NUM_BUFFERS); 5473 ha->eft_dma, EFT_NUM_BUFFERS);
5464 if (rval) { 5474 if (rval) {
5465 qla_printk(KERN_WARNING, ha, 5475 ql_log(ql_log_warn, vha, 0x803f,
5466 "Unable to reinitialize EFT " 5476 "Unable to reinitialize EFT (%d).\n",
5467 "(%d).\n", rval); 5477 rval);
5468 } 5478 }
5469 } 5479 }
5470 } 5480 }
5471 5481
5472 if (!status) { 5482 if (!status) {
5473 DEBUG(printk(KERN_INFO 5483 ql_dbg(ql_dbg_taskm, vha, 0x8040,
5474 "qla82xx_restart_isp(%ld): succeeded.\n", 5484 "qla82xx_restart_isp succeeded.\n");
5475 vha->host_no));
5476 5485
5477 spin_lock_irqsave(&ha->vport_slock, flags); 5486 spin_lock_irqsave(&ha->vport_slock, flags);
5478 list_for_each_entry(vp, &ha->vp_list, list) { 5487 list_for_each_entry(vp, &ha->vp_list, list) {
@@ -5489,8 +5498,8 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5489 spin_unlock_irqrestore(&ha->vport_slock, flags); 5498 spin_unlock_irqrestore(&ha->vport_slock, flags);
5490 5499
5491 } else { 5500 } else {
5492 qla_printk(KERN_INFO, ha, 5501 ql_log(ql_log_warn, vha, 0x8041,
5493 "qla82xx_restart_isp: **** FAILED ****\n"); 5502 "qla82xx_restart_isp **** FAILED ****.\n");
5494 } 5503 }
5495 5504
5496 return status; 5505 return status;
@@ -5640,9 +5649,8 @@ qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5640 if (ret == QLA_SUCCESS) 5649 if (ret == QLA_SUCCESS)
5641 fcport->fcp_prio = priority; 5650 fcport->fcp_prio = priority;
5642 else 5651 else
5643 DEBUG2(printk(KERN_WARNING 5652 ql_dbg(ql_dbg_user, vha, 0x704f,
5644 "scsi(%ld): Unable to activate fcp priority, " 5653 "Unable to activate fcp priority, ret=0x%x.\n", ret);
5645 " ret=0x%x\n", vha->host_no, ret));
5646 5654
5647 return ret; 5655 return ret;
5648} 5656}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 4c8167e11f69..d2e904bc21c0 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -94,11 +94,11 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state)
94 94
95 /* Don't print state transitions during initial allocation of fcport */ 95 /* Don't print state transitions during initial allocation of fcport */
96 if (old_state && old_state != state) { 96 if (old_state && old_state != state) {
97 DEBUG(qla_printk(KERN_WARNING, fcport->vha->hw, 97 ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
98 "scsi(%ld): FCPort state transitioned from %s to %s - " 98 "FCPort state transitioned from %s to %s - "
99 "portid=%02x%02x%02x.\n", fcport->vha->host_no, 99 "portid=%02x%02x%02x.\n",
100 port_state_str[old_state], port_state_str[state], 100 port_state_str[old_state], port_state_str[state],
101 fcport->d_id.b.domain, fcport->d_id.b.area, 101 fcport->d_id.b.domain, fcport->d_id.b.area,
102 fcport->d_id.b.al_pa)); 102 fcport->d_id.b.al_pa);
103 } 103 }
104} 104}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 7bac3cd109d6..49d6906af886 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -150,7 +150,8 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
150 150
151 /* We only support T10 DIF right now */ 151 /* We only support T10 DIF right now */
152 if (guard != SHOST_DIX_GUARD_CRC) { 152 if (guard != SHOST_DIX_GUARD_CRC) {
153 DEBUG2(printk(KERN_ERR "Unsupported guard: %d\n", guard)); 153 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
154 "Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd);
154 return 0; 155 return 0;
155 } 156 }
156 157
@@ -343,9 +344,10 @@ qla2x00_start_scsi(srb_t *sp)
343 344
344 /* Send marker if required */ 345 /* Send marker if required */
345 if (vha->marker_needed != 0) { 346 if (vha->marker_needed != 0) {
346 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) 347 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
347 != QLA_SUCCESS) 348 QLA_SUCCESS) {
348 return (QLA_FUNCTION_FAILED); 349 return (QLA_FUNCTION_FAILED);
350 }
349 vha->marker_needed = 0; 351 vha->marker_needed = 0;
350 } 352 }
351 353
@@ -490,8 +492,8 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
490 mrk24 = NULL; 492 mrk24 = NULL;
491 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0); 493 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
492 if (mrk == NULL) { 494 if (mrk == NULL) {
493 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n", 495 ql_log(ql_log_warn, base_vha, 0x3026,
494 __func__, base_vha->host_no)); 496 "Failed to allocate Marker IOCB.\n");
495 497
496 return (QLA_FUNCTION_FAILED); 498 return (QLA_FUNCTION_FAILED);
497 } 499 }
@@ -547,9 +549,10 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
547 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); 549 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
548 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; 550 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
549 551
550 DEBUG5(printk("%s(): IOCB data:\n", __func__)); 552 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x302d,
551 DEBUG5(qla2x00_dump_buffer( 553 "IOCB data:\n");
552 (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE)); 554 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
555 (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE);
553 556
554 /* Adjust ring index. */ 557 /* Adjust ring index. */
555 req->ring_index++; 558 req->ring_index++;
@@ -604,7 +607,7 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
604 * Returns the number of IOCB entries needed to store @dsds. 607 * Returns the number of IOCB entries needed to store @dsds.
605 */ 608 */
606inline uint16_t 609inline uint16_t
607qla24xx_calc_iocbs(uint16_t dsds) 610qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
608{ 611{
609 uint16_t iocbs; 612 uint16_t iocbs;
610 613
@@ -614,8 +617,6 @@ qla24xx_calc_iocbs(uint16_t dsds)
614 if ((dsds - 1) % 5) 617 if ((dsds - 1) % 5)
615 iocbs++; 618 iocbs++;
616 } 619 }
617 DEBUG3(printk(KERN_DEBUG "%s(): Required PKT(s) = %d\n",
618 __func__, iocbs));
619 return iocbs; 620 return iocbs;
620} 621}
621 622
@@ -712,6 +713,7 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
712 unsigned int protcnt) 713 unsigned int protcnt)
713{ 714{
714 struct sd_dif_tuple *spt; 715 struct sd_dif_tuple *spt;
716 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
715 unsigned char op = scsi_get_prot_op(cmd); 717 unsigned char op = scsi_get_prot_op(cmd);
716 718
717 switch (scsi_get_prot_type(cmd)) { 719 switch (scsi_get_prot_type(cmd)) {
@@ -768,9 +770,9 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
768 op == SCSI_PROT_WRITE_PASS)) { 770 op == SCSI_PROT_WRITE_PASS)) {
769 spt = page_address(sg_page(scsi_prot_sglist(cmd))) + 771 spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
770 scsi_prot_sglist(cmd)[0].offset; 772 scsi_prot_sglist(cmd)[0].offset;
771 DEBUG18(printk(KERN_DEBUG 773 ql_dbg(ql_dbg_io, vha, 0x3008,
772 "%s(): LBA from user %p, lba = 0x%x\n", 774 "LBA from user %p, lba = 0x%x for cmd=%p.\n",
773 __func__, spt, (int)spt->ref_tag)); 775 spt, (int)spt->ref_tag, cmd);
774 pkt->ref_tag = swab32(spt->ref_tag); 776 pkt->ref_tag = swab32(spt->ref_tag);
775 pkt->app_tag_mask[0] = 0x0; 777 pkt->app_tag_mask[0] = 0x0;
776 pkt->app_tag_mask[1] = 0x0; 778 pkt->app_tag_mask[1] = 0x0;
@@ -789,11 +791,11 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
789 break; 791 break;
790 } 792 }
791 793
792 DEBUG18(printk(KERN_DEBUG 794 ql_dbg(ql_dbg_io, vha, 0x3009,
793 "%s(): Setting protection Tags: (BIG) ref tag = 0x%x," 795 "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
794 " app tag = 0x%x, prot SG count %d , cmd lba 0x%x," 796 "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
795 " prot_type=%u\n", __func__, pkt->ref_tag, pkt->app_tag, protcnt, 797 pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
796 (int)scsi_get_lba(cmd), scsi_get_prot_type(cmd))); 798 scsi_get_prot_type(cmd), cmd);
797} 799}
798 800
799 801
@@ -809,6 +811,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
809 uint32_t *cur_dsd = dsd; 811 uint32_t *cur_dsd = dsd;
810 int i; 812 int i;
811 uint16_t used_dsds = tot_dsds; 813 uint16_t used_dsds = tot_dsds;
814 scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host);
812 815
813 uint8_t *cp; 816 uint8_t *cp;
814 817
@@ -853,9 +856,10 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
853 cur_dsd = (uint32_t *)next_dsd; 856 cur_dsd = (uint32_t *)next_dsd;
854 } 857 }
855 sle_dma = sg_dma_address(sg); 858 sle_dma = sg_dma_address(sg);
856 DEBUG18(printk("%s(): %p, sg entry %d - addr =0x%x 0x%x," 859 ql_dbg(ql_dbg_io, vha, 0x300a,
857 " len =%d\n", __func__ , cur_dsd, i, LSD(sle_dma), 860 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
858 MSD(sle_dma), sg_dma_len(sg))); 861 cur_dsd, i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
862 sp->cmd);
859 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 863 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
860 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 864 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
861 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 865 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
@@ -863,8 +867,8 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
863 867
864 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { 868 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
865 cp = page_address(sg_page(sg)) + sg->offset; 869 cp = page_address(sg_page(sg)) + sg->offset;
866 DEBUG18(printk("%s(): User Data buffer= %p:\n", 870 ql_dbg(ql_dbg_io, vha, 0x300b,
867 __func__ , cp)); 871 "User data buffer=%p for cmd=%p.\n", cp, sp->cmd);
868 } 872 }
869 } 873 }
870 /* Null termination */ 874 /* Null termination */
@@ -888,7 +892,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
888 struct scsi_cmnd *cmd; 892 struct scsi_cmnd *cmd;
889 uint32_t *cur_dsd = dsd; 893 uint32_t *cur_dsd = dsd;
890 uint16_t used_dsds = tot_dsds; 894 uint16_t used_dsds = tot_dsds;
891 895 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
892 uint8_t *cp; 896 uint8_t *cp;
893 897
894 898
@@ -935,10 +939,11 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
935 } 939 }
936 sle_dma = sg_dma_address(sg); 940 sle_dma = sg_dma_address(sg);
937 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { 941 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
938 DEBUG18(printk(KERN_DEBUG 942 ql_dbg(ql_dbg_io, vha, 0x3027,
939 "%s(): %p, sg entry %d - addr =0x%x" 943 "%s(): %p, sg_entry %d - "
940 "0x%x, len =%d\n", __func__ , cur_dsd, i, 944 "addr=0x%x0x%x, len=%d.\n",
941 LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg))); 945 __func__, cur_dsd, i,
946 LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
942 } 947 }
943 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 948 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
944 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 949 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
@@ -946,8 +951,9 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
946 951
947 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { 952 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
948 cp = page_address(sg_page(sg)) + sg->offset; 953 cp = page_address(sg_page(sg)) + sg->offset;
949 DEBUG18(printk("%s(): Protection Data buffer = %p:\n", 954 ql_dbg(ql_dbg_io, vha, 0x3028,
950 __func__ , cp)); 955 "%s(): Protection Data buffer = %p.\n", __func__,
956 cp);
951 } 957 }
952 avail_dsds--; 958 avail_dsds--;
953 } 959 }
@@ -996,22 +1002,16 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
996 *((uint32_t *)(&cmd_pkt->entry_type)) = 1002 *((uint32_t *)(&cmd_pkt->entry_type)) =
997 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2); 1003 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
998 1004
1005 vha = sp->fcport->vha;
1006 ha = vha->hw;
1007
999 /* No data transfer */ 1008 /* No data transfer */
1000 data_bytes = scsi_bufflen(cmd); 1009 data_bytes = scsi_bufflen(cmd);
1001 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1010 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1002 DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
1003 __func__, data_bytes));
1004 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 1011 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1005 return QLA_SUCCESS; 1012 return QLA_SUCCESS;
1006 } 1013 }
1007 1014
1008 vha = sp->fcport->vha;
1009 ha = vha->hw;
1010
1011 DEBUG18(printk(KERN_DEBUG
1012 "%s(%ld): Executing cmd sp %p, prot_op=%u.\n", __func__,
1013 vha->host_no, sp, scsi_get_prot_op(sp->cmd)));
1014
1015 cmd_pkt->vp_index = sp->fcport->vp_idx; 1015 cmd_pkt->vp_index = sp->fcport->vp_idx;
1016 1016
1017 /* Set transfer direction */ 1017 /* Set transfer direction */
@@ -1056,8 +1056,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1056 1056
1057 /* Determine SCSI command length -- align to 4 byte boundary */ 1057 /* Determine SCSI command length -- align to 4 byte boundary */
1058 if (cmd->cmd_len > 16) { 1058 if (cmd->cmd_len > 16) {
1059 DEBUG18(printk(KERN_INFO "%s(): **** SCSI CMD > 16\n",
1060 __func__));
1061 additional_fcpcdb_len = cmd->cmd_len - 16; 1059 additional_fcpcdb_len = cmd->cmd_len - 16;
1062 if ((cmd->cmd_len % 4) != 0) { 1060 if ((cmd->cmd_len % 4) != 0) {
1063 /* SCSI cmd > 16 bytes must be multiple of 4 */ 1061 /* SCSI cmd > 16 bytes must be multiple of 4 */
@@ -1108,11 +1106,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1108 1106
1109 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ 1107 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1110 1108
1111 DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data"
1112 "entries %d, data bytes %d, Protection entries %d\n",
1113 __func__, vha->host_no, tot_dsds, (tot_dsds-tot_prot_dsds),
1114 data_bytes, tot_prot_dsds));
1115
1116 /* Compute dif len and adjust data len to incude protection */ 1109 /* Compute dif len and adjust data len to incude protection */
1117 total_bytes = data_bytes; 1110 total_bytes = data_bytes;
1118 dif_bytes = 0; 1111 dif_bytes = 0;
@@ -1150,14 +1143,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1150 additional_fcpcdb_len); 1143 additional_fcpcdb_len);
1151 *fcp_dl = htonl(total_bytes); 1144 *fcp_dl = htonl(total_bytes);
1152 1145
1153 DEBUG18(printk(KERN_INFO "%s(%ld): dif bytes = 0x%x (%d), total bytes"
1154 " = 0x%x (%d), dat block size =0x%x (%d)\n", __func__,
1155 vha->host_no, dif_bytes, dif_bytes, total_bytes, total_bytes,
1156 crc_ctx_pkt->blk_size, crc_ctx_pkt->blk_size));
1157
1158 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1146 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1159 DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
1160 __func__, data_bytes));
1161 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 1147 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1162 return QLA_SUCCESS; 1148 return QLA_SUCCESS;
1163 } 1149 }
@@ -1182,8 +1168,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1182 return QLA_SUCCESS; 1168 return QLA_SUCCESS;
1183 1169
1184crc_queuing_error: 1170crc_queuing_error:
1185 DEBUG18(qla_printk(KERN_INFO, ha,
1186 "CMD sent FAILED crc_q error:sp = %p\n", sp));
1187 /* Cleanup will be performed by the caller */ 1171 /* Cleanup will be performed by the caller */
1188 1172
1189 return QLA_FUNCTION_FAILED; 1173 return QLA_FUNCTION_FAILED;
@@ -1225,8 +1209,8 @@ qla24xx_start_scsi(srb_t *sp)
1225 1209
1226 /* Send marker if required */ 1210 /* Send marker if required */
1227 if (vha->marker_needed != 0) { 1211 if (vha->marker_needed != 0) {
1228 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) 1212 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1229 != QLA_SUCCESS) 1213 QLA_SUCCESS)
1230 return QLA_FUNCTION_FAILED; 1214 return QLA_FUNCTION_FAILED;
1231 vha->marker_needed = 0; 1215 vha->marker_needed = 0;
1232 } 1216 }
@@ -1243,8 +1227,9 @@ qla24xx_start_scsi(srb_t *sp)
1243 if (!req->outstanding_cmds[handle]) 1227 if (!req->outstanding_cmds[handle])
1244 break; 1228 break;
1245 } 1229 }
1246 if (index == MAX_OUTSTANDING_COMMANDS) 1230 if (index == MAX_OUTSTANDING_COMMANDS) {
1247 goto queuing_error; 1231 goto queuing_error;
1232 }
1248 1233
1249 /* Map the sg table so we have an accurate count of sg entries needed */ 1234 /* Map the sg table so we have an accurate count of sg entries needed */
1250 if (scsi_sg_count(cmd)) { 1235 if (scsi_sg_count(cmd)) {
@@ -1256,8 +1241,7 @@ qla24xx_start_scsi(srb_t *sp)
1256 nseg = 0; 1241 nseg = 0;
1257 1242
1258 tot_dsds = nseg; 1243 tot_dsds = nseg;
1259 1244 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1260 req_cnt = qla24xx_calc_iocbs(tot_dsds);
1261 if (req->cnt < (req_cnt + 2)) { 1245 if (req->cnt < (req_cnt + 2)) {
1262 cnt = RD_REG_DWORD_RELAXED(req->req_q_out); 1246 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1263 1247
@@ -1322,7 +1306,6 @@ qla24xx_start_scsi(srb_t *sp)
1322 /* Specify response queue number where completion should happen */ 1306 /* Specify response queue number where completion should happen */
1323 cmd_pkt->entry_status = (uint8_t) rsp->id; 1307 cmd_pkt->entry_status = (uint8_t) rsp->id;
1324 wmb(); 1308 wmb();
1325
1326 /* Adjust ring index. */ 1309 /* Adjust ring index. */
1327 req->ring_index++; 1310 req->ring_index++;
1328 if (req->ring_index == req->length) { 1311 if (req->ring_index == req->length) {
@@ -1534,9 +1517,6 @@ queuing_error:
1534 /* Cleanup will be performed by the caller (queuecommand) */ 1517 /* Cleanup will be performed by the caller (queuecommand) */
1535 1518
1536 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1519 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1537
1538 DEBUG18(qla_printk(KERN_INFO, ha,
1539 "CMD sent FAILED SCSI prot_op:%02x\n", scsi_get_prot_op(cmd)));
1540 return QLA_FUNCTION_FAILED; 1520 return QLA_FUNCTION_FAILED;
1541} 1521}
1542 1522
@@ -1581,8 +1561,11 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1581 if (!req->outstanding_cmds[handle]) 1561 if (!req->outstanding_cmds[handle])
1582 break; 1562 break;
1583 } 1563 }
1584 if (index == MAX_OUTSTANDING_COMMANDS) 1564 if (index == MAX_OUTSTANDING_COMMANDS) {
1565 ql_log(ql_log_warn, vha, 0x700b,
1566 "No room on oustanding cmd array.\n");
1585 goto queuing_error; 1567 goto queuing_error;
1568 }
1586 1569
1587 /* Prep command array. */ 1570 /* Prep command array. */
1588 req->current_outstanding_cmd = handle; 1571 req->current_outstanding_cmd = handle;
@@ -1999,8 +1982,11 @@ qla2x00_start_sp(srb_t *sp)
1999 rval = QLA_FUNCTION_FAILED; 1982 rval = QLA_FUNCTION_FAILED;
2000 spin_lock_irqsave(&ha->hardware_lock, flags); 1983 spin_lock_irqsave(&ha->hardware_lock, flags);
2001 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp); 1984 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2002 if (!pkt) 1985 if (!pkt) {
1986 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
1987 "qla2x00_alloc_iocbs failed.\n");
2003 goto done; 1988 goto done;
1989 }
2004 1990
2005 rval = QLA_SUCCESS; 1991 rval = QLA_SUCCESS;
2006 switch (ctx->type) { 1992 switch (ctx->type) {
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ae8e298746ba..b16b7725dee0 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -45,7 +45,7 @@ qla2100_intr_handler(int irq, void *dev_id)
45 rsp = (struct rsp_que *) dev_id; 45 rsp = (struct rsp_que *) dev_id;
46 if (!rsp) { 46 if (!rsp) {
47 printk(KERN_INFO 47 printk(KERN_INFO
48 "%s(): NULL response queue pointer\n", __func__); 48 "%s(): NULL response queue pointer.\n", __func__);
49 return (IRQ_NONE); 49 return (IRQ_NONE);
50 } 50 }
51 51
@@ -91,9 +91,9 @@ qla2100_intr_handler(int irq, void *dev_id)
91 qla2x00_async_event(vha, rsp, mb); 91 qla2x00_async_event(vha, rsp, mb);
92 } else { 92 } else {
93 /*EMPTY*/ 93 /*EMPTY*/
94 DEBUG2(printk("scsi(%ld): Unrecognized " 94 ql_dbg(ql_dbg_async, vha, 0x5025,
95 "interrupt type (%d).\n", 95 "Unrecognized interrupt type (%d).\n",
96 vha->host_no, mb[0])); 96 mb[0]);
97 } 97 }
98 /* Release mailbox registers. */ 98 /* Release mailbox registers. */
99 WRT_REG_WORD(&reg->semaphore, 0); 99 WRT_REG_WORD(&reg->semaphore, 0);
@@ -142,7 +142,7 @@ qla2300_intr_handler(int irq, void *dev_id)
142 rsp = (struct rsp_que *) dev_id; 142 rsp = (struct rsp_que *) dev_id;
143 if (!rsp) { 143 if (!rsp) {
144 printk(KERN_INFO 144 printk(KERN_INFO
145 "%s(): NULL response queue pointer\n", __func__); 145 "%s(): NULL response queue pointer.\n", __func__);
146 return (IRQ_NONE); 146 return (IRQ_NONE);
147 } 147 }
148 148
@@ -160,11 +160,13 @@ qla2300_intr_handler(int irq, void *dev_id)
160 160
161 hccr = RD_REG_WORD(&reg->hccr); 161 hccr = RD_REG_WORD(&reg->hccr);
162 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 162 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
163 qla_printk(KERN_INFO, ha, "Parity error -- " 163 ql_log(ql_log_warn, vha, 0x5026,
164 "HCCR=%x, Dumping firmware!\n", hccr); 164 "Parity error -- HCCR=%x, Dumping "
165 "firmware.\n", hccr);
165 else 166 else
166 qla_printk(KERN_INFO, ha, "RISC paused -- " 167 ql_log(ql_log_warn, vha, 0x5027,
167 "HCCR=%x, Dumping firmware!\n", hccr); 168 "RISC paused -- HCCR=%x, Dumping "
169 "firmware.\n", hccr);
168 170
169 /* 171 /*
170 * Issue a "HARD" reset in order for the RISC 172 * Issue a "HARD" reset in order for the RISC
@@ -213,9 +215,8 @@ qla2300_intr_handler(int irq, void *dev_id)
213 qla2x00_async_event(vha, rsp, mb); 215 qla2x00_async_event(vha, rsp, mb);
214 break; 216 break;
215 default: 217 default:
216 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 218 ql_dbg(ql_dbg_async, vha, 0x5028,
217 "(%d).\n", 219 "Unrecognized interrupt type (%d).\n", stat & 0xff);
218 vha->host_no, stat & 0xff));
219 break; 220 break;
220 } 221 }
221 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); 222 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
@@ -262,11 +263,11 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
262 } 263 }
263 264
264 if (ha->mcp) { 265 if (ha->mcp) {
265 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", 266 ql_dbg(ql_dbg_async, vha, 0x5000,
266 __func__, vha->host_no, ha->mcp->mb[0])); 267 "Got mbx completion. cmd=%x.\n", ha->mcp->mb[0]);
267 } else { 268 } else {
268 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", 269 ql_dbg(ql_dbg_async, vha, 0x5001,
269 __func__, vha->host_no)); 270 "MBX pointer ERROR.\n");
270 } 271 }
271} 272}
272 273
@@ -285,22 +286,24 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
285 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 286 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
286 mb[cnt] = RD_REG_WORD(wptr); 287 mb[cnt] = RD_REG_WORD(wptr);
287 288
288 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- " 289 ql_dbg(ql_dbg_async, vha, 0x5021,
289 "%04x %04x %04x %04x %04x %04x %04x.\n", vha->host_no, 290 "Inter-Driver Commucation %s -- "
290 event[aen & 0xff], 291 "%04x %04x %04x %04x %04x %04x %04x.\n",
291 mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6])); 292 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
293 mb[4], mb[5], mb[6]);
292 294
293 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 295 /* Acknowledgement needed? [Notify && non-zero timeout]. */
294 timeout = (descr >> 8) & 0xf; 296 timeout = (descr >> 8) & 0xf;
295 if (aen != MBA_IDC_NOTIFY || !timeout) 297 if (aen != MBA_IDC_NOTIFY || !timeout)
296 return; 298 return;
297 299
298 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- " 300 ql_dbg(ql_dbg_async, vha, 0x5022,
299 "ACK timeout=%d.\n", vha->host_no, event[aen & 0xff], timeout)); 301 "Inter-Driver Commucation %s -- ACK timeout=%d.\n",
302 vha->host_no, event[aen & 0xff], timeout);
300 303
301 rval = qla2x00_post_idc_ack_work(vha, mb); 304 rval = qla2x00_post_idc_ack_work(vha, mb);
302 if (rval != QLA_SUCCESS) 305 if (rval != QLA_SUCCESS)
303 qla_printk(KERN_WARNING, vha->hw, 306 ql_log(ql_log_warn, vha, 0x5023,
304 "IDC failed to post ACK.\n"); 307 "IDC failed to post ACK.\n");
305} 308}
306 309
@@ -393,15 +396,15 @@ skip_rio:
393 break; 396 break;
394 397
395 case MBA_RESET: /* Reset */ 398 case MBA_RESET: /* Reset */
396 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", 399 ql_dbg(ql_dbg_async, vha, 0x5002,
397 vha->host_no)); 400 "Asynchronous RESET.\n");
398 401
399 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 402 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
400 break; 403 break;
401 404
402 case MBA_SYSTEM_ERR: /* System Error */ 405 case MBA_SYSTEM_ERR: /* System Error */
403 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0; 406 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0;
404 qla_printk(KERN_INFO, ha, 407 ql_log(ql_log_warn, vha, 0x5003,
405 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " 408 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
406 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); 409 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
407 410
@@ -409,7 +412,7 @@ skip_rio:
409 412
410 if (IS_FWI2_CAPABLE(ha)) { 413 if (IS_FWI2_CAPABLE(ha)) {
411 if (mb[1] == 0 && mb[2] == 0) { 414 if (mb[1] == 0 && mb[2] == 0) {
412 qla_printk(KERN_ERR, ha, 415 ql_log(ql_log_fatal, vha, 0x5004,
413 "Unrecoverable Hardware Error: adapter " 416 "Unrecoverable Hardware Error: adapter "
414 "marked OFFLINE!\n"); 417 "marked OFFLINE!\n");
415 vha->flags.online = 0; 418 vha->flags.online = 0;
@@ -422,7 +425,7 @@ skip_rio:
422 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 425 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
423 } 426 }
424 } else if (mb[1] == 0) { 427 } else if (mb[1] == 0) {
425 qla_printk(KERN_INFO, ha, 428 ql_log(ql_log_fatal, vha, 0x5005,
426 "Unrecoverable Hardware Error: adapter marked " 429 "Unrecoverable Hardware Error: adapter marked "
427 "OFFLINE!\n"); 430 "OFFLINE!\n");
428 vha->flags.online = 0; 431 vha->flags.online = 0;
@@ -431,31 +434,27 @@ skip_rio:
431 break; 434 break;
432 435
433 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 436 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
434 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error (%x).\n", 437 ql_log(ql_log_warn, vha, 0x5006,
435 vha->host_no, mb[1])); 438 "ISP Request Transfer Error (%x).\n", mb[1]);
436 qla_printk(KERN_WARNING, ha,
437 "ISP Request Transfer Error (%x).\n", mb[1]);
438 439
439 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 440 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
440 break; 441 break;
441 442
442 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 443 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
443 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n", 444 ql_log(ql_log_warn, vha, 0x5007,
444 vha->host_no)); 445 "ISP Response Transfer Error.\n");
445 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
446 446
447 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 447 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
448 break; 448 break;
449 449
450 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 450 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
451 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n", 451 ql_dbg(ql_dbg_async, vha, 0x5008,
452 vha->host_no)); 452 "Asynchronous WAKEUP_THRES.\n");
453 break; 453 break;
454 454
455 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 455 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
456 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no, 456 ql_log(ql_log_info, vha, 0x5009,
457 mb[1])); 457 "LIP occurred (%x).\n", mb[1]);
458 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
459 458
460 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 459 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
461 atomic_set(&vha->loop_state, LOOP_DOWN); 460 atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -488,10 +487,8 @@ skip_rio:
488 ha->link_data_rate = mb[1]; 487 ha->link_data_rate = mb[1];
489 } 488 }
490 489
491 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n", 490 ql_log(ql_log_info, vha, 0x500a,
492 vha->host_no, link_speed)); 491 "LOOP UP detected (%s Gbps).\n", link_speed);
493 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
494 link_speed);
495 492
496 vha->flags.management_server_logged_in = 0; 493 vha->flags.management_server_logged_in = 0;
497 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 494 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
@@ -500,12 +497,9 @@ skip_rio:
500 case MBA_LOOP_DOWN: /* Loop Down Event */ 497 case MBA_LOOP_DOWN: /* Loop Down Event */
501 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0; 498 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
502 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx; 499 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
503 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN " 500 ql_log(ql_log_info, vha, 0x500b,
504 "(%x %x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3], 501 "LOOP DOWN detected (%x %x %x %x).\n",
505 mbx)); 502 mb[1], mb[2], mb[3], mbx);
506 qla_printk(KERN_INFO, ha,
507 "LOOP DOWN detected (%x %x %x %x).\n", mb[1], mb[2], mb[3],
508 mbx);
509 503
510 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 504 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
511 atomic_set(&vha->loop_state, LOOP_DOWN); 505 atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -525,9 +519,7 @@ skip_rio:
525 break; 519 break;
526 520
527 case MBA_LIP_RESET: /* LIP reset occurred */ 521 case MBA_LIP_RESET: /* LIP reset occurred */
528 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n", 522 ql_log(ql_log_info, vha, 0x500c,
529 vha->host_no, mb[1]));
530 qla_printk(KERN_INFO, ha,
531 "LIP reset occurred (%x).\n", mb[1]); 523 "LIP reset occurred (%x).\n", mb[1]);
532 524
533 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 525 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -554,14 +546,15 @@ skip_rio:
554 break; 546 break;
555 547
556 if (IS_QLA8XXX_TYPE(ha)) { 548 if (IS_QLA8XXX_TYPE(ha)) {
557 DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x " 549 ql_dbg(ql_dbg_async, vha, 0x500d,
558 "%04x\n", vha->host_no, mb[1], mb[2], mb[3])); 550 "DCBX Completed -- %04x %04x %04x.\n",
551 mb[1], mb[2], mb[3]);
559 if (ha->notify_dcbx_comp) 552 if (ha->notify_dcbx_comp)
560 complete(&ha->dcbx_comp); 553 complete(&ha->dcbx_comp);
561 554
562 } else 555 } else
563 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE " 556 ql_dbg(ql_dbg_async, vha, 0x500e,
564 "received.\n", vha->host_no)); 557 "Asynchronous P2P MODE received.\n");
565 558
566 /* 559 /*
567 * Until there's a transition from loop down to loop up, treat 560 * Until there's a transition from loop down to loop up, treat
@@ -594,10 +587,7 @@ skip_rio:
594 if (IS_QLA2100(ha)) 587 if (IS_QLA2100(ha))
595 break; 588 break;
596 589
597 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection " 590 ql_log(ql_log_info, vha, 0x500f,
598 "received.\n",
599 vha->host_no));
600 qla_printk(KERN_INFO, ha,
601 "Configuration change detected: value=%x.\n", mb[1]); 591 "Configuration change detected: value=%x.\n", mb[1]);
602 592
603 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 593 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -640,11 +630,9 @@ skip_rio:
640 630
641 /* Global event -- port logout or port unavailable. */ 631 /* Global event -- port logout or port unavailable. */
642 if (mb[1] == 0xffff && mb[2] == 0x7) { 632 if (mb[1] == 0xffff && mb[2] == 0x7) {
643 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n", 633 ql_dbg(ql_dbg_async, vha, 0x5010,
644 vha->host_no)); 634 "Port unavailable %04x %04x %04x.\n",
645 DEBUG(printk(KERN_INFO 635 mb[1], mb[2], mb[3]);
646 "scsi(%ld): Port unavailable %04x %04x %04x.\n",
647 vha->host_no, mb[1], mb[2], mb[3]));
648 636
649 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 637 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
650 atomic_set(&vha->loop_state, LOOP_DOWN); 638 atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -674,17 +662,15 @@ skip_rio:
674 atomic_set(&vha->loop_down_timer, 0); 662 atomic_set(&vha->loop_down_timer, 0);
675 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 663 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
676 atomic_read(&vha->loop_state) != LOOP_DEAD) { 664 atomic_read(&vha->loop_state) != LOOP_DEAD) {
677 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE " 665 ql_dbg(ql_dbg_async, vha, 0x5011,
678 "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1], 666 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
679 mb[2], mb[3])); 667 mb[1], mb[2], mb[3]);
680 break; 668 break;
681 } 669 }
682 670
683 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n", 671 ql_dbg(ql_dbg_async, vha, 0x5012,
684 vha->host_no)); 672 "Port database changed %04x %04x %04x.\n",
685 DEBUG(printk(KERN_INFO 673 mb[1], mb[2], mb[3]);
686 "scsi(%ld): Port database changed %04x %04x %04x.\n",
687 vha->host_no, mb[1], mb[2], mb[3]));
688 674
689 /* 675 /*
690 * Mark all devices as missing so we will login again. 676 * Mark all devices as missing so we will login again.
@@ -707,20 +693,17 @@ skip_rio:
707 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 693 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
708 break; 694 break;
709 695
710 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n", 696 ql_dbg(ql_dbg_async, vha, 0x5013,
711 vha->host_no)); 697 "RSCN database changed -- %04x %04x %04x.\n",
712 DEBUG(printk(KERN_INFO 698 mb[1], mb[2], mb[3]);
713 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
714 vha->host_no, mb[1], mb[2], mb[3]));
715 699
716 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 700 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
717 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 701 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
718 | vha->d_id.b.al_pa; 702 | vha->d_id.b.al_pa;
719 if (rscn_entry == host_pid) { 703 if (rscn_entry == host_pid) {
720 DEBUG(printk(KERN_INFO 704 ql_dbg(ql_dbg_async, vha, 0x5014,
721 "scsi(%ld): Ignoring RSCN update to local host " 705 "Ignoring RSCN update to local host "
722 "port ID (%06x)\n", 706 "port ID (%06x).\n", host_pid);
723 vha->host_no, host_pid));
724 break; 707 break;
725 } 708 }
726 709
@@ -747,8 +730,8 @@ skip_rio:
747 730
748 /* case MBA_RIO_RESPONSE: */ 731 /* case MBA_RIO_RESPONSE: */
749 case MBA_ZIO_RESPONSE: 732 case MBA_ZIO_RESPONSE:
750 DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n", 733 ql_dbg(ql_dbg_async, vha, 0x5015,
751 vha->host_no)); 734 "[R|Z]IO update completion.\n");
752 735
753 if (IS_FWI2_CAPABLE(ha)) 736 if (IS_FWI2_CAPABLE(ha))
754 qla24xx_process_response_queue(vha, rsp); 737 qla24xx_process_response_queue(vha, rsp);
@@ -757,61 +740,68 @@ skip_rio:
757 break; 740 break;
758 741
759 case MBA_DISCARD_RND_FRAME: 742 case MBA_DISCARD_RND_FRAME:
760 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " 743 ql_dbg(ql_dbg_async, vha, 0x5016,
761 "%04x.\n", vha->host_no, mb[1], mb[2], mb[3])); 744 "Discard RND Frame -- %04x %04x %04x.\n",
745 mb[1], mb[2], mb[3]);
762 break; 746 break;
763 747
764 case MBA_TRACE_NOTIFICATION: 748 case MBA_TRACE_NOTIFICATION:
765 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n", 749 ql_dbg(ql_dbg_async, vha, 0x5017,
766 vha->host_no, mb[1], mb[2])); 750 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
767 break; 751 break;
768 752
769 case MBA_ISP84XX_ALERT: 753 case MBA_ISP84XX_ALERT:
770 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- " 754 ql_dbg(ql_dbg_async, vha, 0x5018,
771 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); 755 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
756 mb[1], mb[2], mb[3]);
772 757
773 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 758 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
774 switch (mb[1]) { 759 switch (mb[1]) {
775 case A84_PANIC_RECOVERY: 760 case A84_PANIC_RECOVERY:
776 qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery " 761 ql_log(ql_log_info, vha, 0x5019,
777 "%04x %04x\n", mb[2], mb[3]); 762 "Alert 84XX: panic recovery %04x %04x.\n",
763 mb[2], mb[3]);
778 break; 764 break;
779 case A84_OP_LOGIN_COMPLETE: 765 case A84_OP_LOGIN_COMPLETE:
780 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 766 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
781 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:" 767 ql_log(ql_log_info, vha, 0x501a,
782 "firmware version %x\n", ha->cs84xx->op_fw_version)); 768 "Alert 84XX: firmware version %x.\n",
769 ha->cs84xx->op_fw_version);
783 break; 770 break;
784 case A84_DIAG_LOGIN_COMPLETE: 771 case A84_DIAG_LOGIN_COMPLETE:
785 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 772 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
786 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:" 773 ql_log(ql_log_info, vha, 0x501b,
787 "diagnostic firmware version %x\n", 774 "Alert 84XX: diagnostic firmware version %x.\n",
788 ha->cs84xx->diag_fw_version)); 775 ha->cs84xx->diag_fw_version);
789 break; 776 break;
790 case A84_GOLD_LOGIN_COMPLETE: 777 case A84_GOLD_LOGIN_COMPLETE:
791 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 778 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
792 ha->cs84xx->fw_update = 1; 779 ha->cs84xx->fw_update = 1;
793 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold " 780 ql_log(ql_log_info, vha, 0x501c,
794 "firmware version %x\n", 781 "Alert 84XX: gold firmware version %x.\n",
795 ha->cs84xx->gold_fw_version)); 782 ha->cs84xx->gold_fw_version);
796 break; 783 break;
797 default: 784 default:
798 qla_printk(KERN_ERR, ha, 785 ql_log(ql_log_warn, vha, 0x501d,
799 "Alert 84xx: Invalid Alert %04x %04x %04x\n", 786 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
800 mb[1], mb[2], mb[3]); 787 mb[1], mb[2], mb[3]);
801 } 788 }
802 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 789 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
803 break; 790 break;
804 case MBA_DCBX_START: 791 case MBA_DCBX_START:
805 DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n", 792 ql_dbg(ql_dbg_async, vha, 0x501e,
806 vha->host_no, mb[1], mb[2], mb[3])); 793 "DCBX Started -- %04x %04x %04x.\n",
794 mb[1], mb[2], mb[3]);
807 break; 795 break;
808 case MBA_DCBX_PARAM_UPDATE: 796 case MBA_DCBX_PARAM_UPDATE:
809 DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- " 797 ql_dbg(ql_dbg_async, vha, 0x501f,
810 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); 798 "DCBX Parameters Updated -- %04x %04x %04x.\n",
799 mb[1], mb[2], mb[3]);
811 break; 800 break;
812 case MBA_FCF_CONF_ERR: 801 case MBA_FCF_CONF_ERR:
813 DEBUG2(printk("scsi(%ld): FCF Configuration Error -- " 802 ql_dbg(ql_dbg_async, vha, 0x5020,
814 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); 803 "FCF Configuration Error -- %04x %04x %04x.\n",
804 mb[1], mb[2], mb[3]);
815 break; 805 break;
816 case MBA_IDC_COMPLETE: 806 case MBA_IDC_COMPLETE:
817 case MBA_IDC_NOTIFY: 807 case MBA_IDC_NOTIFY:
@@ -838,10 +828,8 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
838 828
839 /* Validate handle. */ 829 /* Validate handle. */
840 if (index >= MAX_OUTSTANDING_COMMANDS) { 830 if (index >= MAX_OUTSTANDING_COMMANDS) {
841 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n", 831 ql_log(ql_log_warn, vha, 0x3014,
842 vha->host_no, index)); 832 "Invalid SCSI command index (%x).\n", index);
843 qla_printk(KERN_WARNING, ha,
844 "Invalid SCSI completion handle %d.\n", index);
845 833
846 if (IS_QLA82XX(ha)) 834 if (IS_QLA82XX(ha))
847 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 835 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -859,10 +847,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
859 sp->cmd->result = DID_OK << 16; 847 sp->cmd->result = DID_OK << 16;
860 qla2x00_sp_compl(ha, sp); 848 qla2x00_sp_compl(ha, sp);
861 } else { 849 } else {
862 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion" 850 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
863 " handle(0x%x)\n", vha->host_no, req->id, index));
864 qla_printk(KERN_WARNING, ha,
865 "Invalid ISP SCSI completion handle\n");
866 851
867 if (IS_QLA82XX(ha)) 852 if (IS_QLA82XX(ha))
868 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 853 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -882,8 +867,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
882 867
883 index = LSW(pkt->handle); 868 index = LSW(pkt->handle);
884 if (index >= MAX_OUTSTANDING_COMMANDS) { 869 if (index >= MAX_OUTSTANDING_COMMANDS) {
885 qla_printk(KERN_WARNING, ha, 870 ql_log(ql_log_warn, vha, 0x5031,
886 "%s: Invalid completion handle (%x).\n", func, index); 871 "Invalid command index (%x).\n", index);
887 if (IS_QLA82XX(ha)) 872 if (IS_QLA82XX(ha))
888 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 873 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
889 else 874 else
@@ -892,15 +877,13 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
892 } 877 }
893 sp = req->outstanding_cmds[index]; 878 sp = req->outstanding_cmds[index];
894 if (!sp) { 879 if (!sp) {
895 qla_printk(KERN_WARNING, ha, 880 ql_log(ql_log_warn, vha, 0x5032,
896 "%s: Invalid completion handle (%x) -- timed-out.\n", func, 881 "Invalid completion handle (%x) -- timed-out.\n", index);
897 index);
898 return sp; 882 return sp;
899 } 883 }
900 if (sp->handle != index) { 884 if (sp->handle != index) {
901 qla_printk(KERN_WARNING, ha, 885 ql_log(ql_log_warn, vha, 0x5033,
902 "%s: SRB handle (%x) mismatch %x.\n", func, sp->handle, 886 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
903 index);
904 return NULL; 887 return NULL;
905 } 888 }
906 889
@@ -937,17 +920,17 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
937 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 920 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
938 QLA_LOGIO_LOGIN_RETRIED : 0; 921 QLA_LOGIO_LOGIN_RETRIED : 0;
939 if (mbx->entry_status) { 922 if (mbx->entry_status) {
940 DEBUG2(printk(KERN_WARNING 923 ql_dbg(ql_dbg_async, vha, 0x5043,
941 "scsi(%ld:%x): Async-%s error entry - portid=%02x%02x%02x " 924 "Async-%s error entry - portid=%02x%02x%02x "
942 "entry-status=%x status=%x state-flag=%x " 925 "entry-status=%x status=%x state-flag=%x "
943 "status-flags=%x.\n", 926 "status-flags=%x.\n",
944 fcport->vha->host_no, sp->handle, type, 927 type, fcport->d_id.b.domain, fcport->d_id.b.area,
945 fcport->d_id.b.domain, fcport->d_id.b.area,
946 fcport->d_id.b.al_pa, mbx->entry_status, 928 fcport->d_id.b.al_pa, mbx->entry_status,
947 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 929 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
948 le16_to_cpu(mbx->status_flags))); 930 le16_to_cpu(mbx->status_flags));
949 931
950 DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx, sizeof(*mbx))); 932 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5057,
933 (uint8_t *)mbx, sizeof(*mbx));
951 934
952 goto logio_done; 935 goto logio_done;
953 } 936 }
@@ -957,12 +940,10 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
957 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 940 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
958 status = 0; 941 status = 0;
959 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 942 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
960 DEBUG2(printk(KERN_DEBUG 943 ql_dbg(ql_dbg_async, vha, 0x5045,
961 "scsi(%ld:%x): Async-%s complete - portid=%02x%02x%02x " 944 "Async-%s complete - portid=%02x%02x%02x mbx1=%x.\n",
962 "mbx1=%x.\n", 945 type, fcport->d_id.b.domain, fcport->d_id.b.area,
963 fcport->vha->host_no, sp->handle, type, 946 fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1));
964 fcport->d_id.b.domain, fcport->d_id.b.area,
965 fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1)));
966 947
967 data[0] = MBS_COMMAND_COMPLETE; 948 data[0] = MBS_COMMAND_COMPLETE;
968 if (ctx->type == SRB_LOGIN_CMD) { 949 if (ctx->type == SRB_LOGIN_CMD) {
@@ -987,14 +968,14 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
987 break; 968 break;
988 } 969 }
989 970
990 DEBUG2(printk(KERN_WARNING 971 ql_log(ql_log_warn, vha, 0x5046,
991 "scsi(%ld:%x): Async-%s failed - portid=%02x%02x%02x status=%x " 972 "Async-%s failed - portid=%02x%02x%02x status=%x "
992 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", 973 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n",
993 fcport->vha->host_no, sp->handle, type, fcport->d_id.b.domain, 974 type, fcport->d_id.b.domain,
994 fcport->d_id.b.area, fcport->d_id.b.al_pa, status, 975 fcport->d_id.b.area, fcport->d_id.b.al_pa, status,
995 le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 976 le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
996 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 977 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
997 le16_to_cpu(mbx->mb7))); 978 le16_to_cpu(mbx->mb7));
998 979
999logio_done: 980logio_done:
1000 lio->done(sp); 981 lio->done(sp);
@@ -1025,9 +1006,8 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1025 type = "ct pass-through"; 1006 type = "ct pass-through";
1026 break; 1007 break;
1027 default: 1008 default:
1028 qla_printk(KERN_WARNING, ha, 1009 ql_log(ql_log_warn, vha, 0x5047,
1029 "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp, 1010 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
1030 sp_bsg->type);
1031 return; 1011 return;
1032 } 1012 }
1033 1013
@@ -1045,20 +1025,20 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1045 bsg_job->reply->reply_payload_rcv_len = 1025 bsg_job->reply->reply_payload_rcv_len =
1046 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); 1026 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1047 1027
1048 DEBUG2(qla_printk(KERN_WARNING, ha, 1028 ql_log(ql_log_warn, vha, 0x5048,
1049 "scsi(%ld): CT pass-through-%s error " 1029 "CT pass-through-%s error "
1050 "comp_status-status=0x%x total_byte = 0x%x.\n", 1030 "comp_status-status=0x%x total_byte = 0x%x.\n",
1051 vha->host_no, type, comp_status, 1031 type, comp_status,
1052 bsg_job->reply->reply_payload_rcv_len)); 1032 bsg_job->reply->reply_payload_rcv_len);
1053 } else { 1033 } else {
1054 DEBUG2(qla_printk(KERN_WARNING, ha, 1034 ql_log(ql_log_warn, vha, 0x5049,
1055 "scsi(%ld): CT pass-through-%s error " 1035 "CT pass-through-%s error "
1056 "comp_status-status=0x%x.\n", 1036 "comp_status-status=0x%x.\n", type, comp_status);
1057 vha->host_no, type, comp_status));
1058 bsg_job->reply->result = DID_ERROR << 16; 1037 bsg_job->reply->result = DID_ERROR << 16;
1059 bsg_job->reply->reply_payload_rcv_len = 0; 1038 bsg_job->reply->reply_payload_rcv_len = 0;
1060 } 1039 }
1061 DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt))); 1040 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5058,
1041 (uint8_t *)pkt, sizeof(*pkt));
1062 } else { 1042 } else {
1063 bsg_job->reply->result = DID_OK << 16; 1043 bsg_job->reply->result = DID_OK << 16;
1064 bsg_job->reply->reply_payload_rcv_len = 1044 bsg_job->reply->reply_payload_rcv_len =
@@ -1110,9 +1090,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1110 type = "ct pass-through"; 1090 type = "ct pass-through";
1111 break; 1091 break;
1112 default: 1092 default:
1113 qla_printk(KERN_WARNING, ha, 1093 ql_log(ql_log_warn, vha, 0x503e,
1114 "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp, 1094 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
1115 sp_bsg->type);
1116 return; 1095 return;
1117 } 1096 }
1118 1097
@@ -1132,27 +1111,31 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1132 bsg_job->reply->reply_payload_rcv_len = 1111 bsg_job->reply->reply_payload_rcv_len =
1133 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count); 1112 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
1134 1113
1135 DEBUG2(qla_printk(KERN_WARNING, ha, 1114 ql_log(ql_log_info, vha, 0x503f,
1136 "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x " 1115 "ELS-CT pass-through-%s error comp_status-status=0x%x "
1137 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 1116 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1138 vha->host_no, sp->handle, type, comp_status, fw_status[1], fw_status[2], 1117 type, comp_status, fw_status[1], fw_status[2],
1139 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count))); 1118 le16_to_cpu(((struct els_sts_entry_24xx *)
1119 pkt)->total_byte_count));
1140 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1120 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1141 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1121 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1142 } 1122 }
1143 else { 1123 else {
1144 DEBUG2(qla_printk(KERN_WARNING, ha, 1124 ql_log(ql_log_info, vha, 0x5040,
1145 "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x " 1125 "ELS-CT pass-through-%s error comp_status-status=0x%x "
1146 "error subcode 1=0x%x error subcode 2=0x%x.\n", 1126 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1147 vha->host_no, sp->handle, type, comp_status, 1127 type, comp_status,
1148 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1), 1128 le16_to_cpu(((struct els_sts_entry_24xx *)
1149 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2))); 1129 pkt)->error_subcode_1),
1130 le16_to_cpu(((struct els_sts_entry_24xx *)
1131 pkt)->error_subcode_2));
1150 bsg_job->reply->result = DID_ERROR << 16; 1132 bsg_job->reply->result = DID_ERROR << 16;
1151 bsg_job->reply->reply_payload_rcv_len = 0; 1133 bsg_job->reply->reply_payload_rcv_len = 0;
1152 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1134 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1153 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1135 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1154 } 1136 }
1155 DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt))); 1137 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5056,
1138 (uint8_t *)pkt, sizeof(*pkt));
1156 } 1139 }
1157 else { 1140 else {
1158 bsg_job->reply->result = DID_OK << 16; 1141 bsg_job->reply->result = DID_OK << 16;
@@ -1201,25 +1184,24 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1201 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1184 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1202 QLA_LOGIO_LOGIN_RETRIED : 0; 1185 QLA_LOGIO_LOGIN_RETRIED : 0;
1203 if (logio->entry_status) { 1186 if (logio->entry_status) {
1204 DEBUG2(printk(KERN_WARNING 1187 ql_log(ql_log_warn, vha, 0x5034,
1205 "scsi(%ld:%x): Async-%s error entry - " 1188 "Async-%s error entry - "
1206 "portid=%02x%02x%02x entry-status=%x.\n", 1189 "portid=%02x%02x%02x entry-status=%x.\n",
1207 fcport->vha->host_no, sp->handle, type, 1190 type, fcport->d_id.b.domain, fcport->d_id.b.area,
1208 fcport->d_id.b.domain, fcport->d_id.b.area, 1191 fcport->d_id.b.al_pa, logio->entry_status);
1209 fcport->d_id.b.al_pa, logio->entry_status)); 1192 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5059,
1210 DEBUG2(qla2x00_dump_buffer((uint8_t *)logio, sizeof(*logio))); 1193 (uint8_t *)logio, sizeof(*logio));
1211 1194
1212 goto logio_done; 1195 goto logio_done;
1213 } 1196 }
1214 1197
1215 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1198 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1216 DEBUG2(printk(KERN_DEBUG 1199 ql_dbg(ql_dbg_async, vha, 0x5036,
1217 "scsi(%ld:%x): Async-%s complete - portid=%02x%02x%02x " 1200 "Async-%s complete - portid=%02x%02x%02x "
1218 "iop0=%x.\n", 1201 "iop0=%x.\n",
1219 fcport->vha->host_no, sp->handle, type, 1202 type, fcport->d_id.b.domain, fcport->d_id.b.area,
1220 fcport->d_id.b.domain, fcport->d_id.b.area,
1221 fcport->d_id.b.al_pa, 1203 fcport->d_id.b.al_pa,
1222 le32_to_cpu(logio->io_parameter[0]))); 1204 le32_to_cpu(logio->io_parameter[0]));
1223 1205
1224 data[0] = MBS_COMMAND_COMPLETE; 1206 data[0] = MBS_COMMAND_COMPLETE;
1225 if (ctx->type != SRB_LOGIN_CMD) 1207 if (ctx->type != SRB_LOGIN_CMD)
@@ -1256,14 +1238,14 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1256 break; 1238 break;
1257 } 1239 }
1258 1240
1259 DEBUG2(printk(KERN_WARNING 1241 ql_dbg(ql_dbg_async, vha, 0x5037,
1260 "scsi(%ld:%x): Async-%s failed - portid=%02x%02x%02x comp=%x " 1242 "Async-%s failed - portid=%02x%02x%02x comp=%x "
1261 "iop0=%x iop1=%x.\n", 1243 "iop0=%x iop1=%x.\n",
1262 fcport->vha->host_no, sp->handle, type, fcport->d_id.b.domain, 1244 type, fcport->d_id.b.domain,
1263 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1245 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1264 le16_to_cpu(logio->comp_status), 1246 le16_to_cpu(logio->comp_status),
1265 le32_to_cpu(logio->io_parameter[0]), 1247 le32_to_cpu(logio->io_parameter[0]),
1266 le32_to_cpu(logio->io_parameter[1]))); 1248 le32_to_cpu(logio->io_parameter[1]));
1267 1249
1268logio_done: 1250logio_done:
1269 lio->done(sp); 1251 lio->done(sp);
@@ -1292,38 +1274,34 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1292 fcport = sp->fcport; 1274 fcport = sp->fcport;
1293 1275
1294 if (sts->entry_status) { 1276 if (sts->entry_status) {
1295 DEBUG2(printk(KERN_WARNING 1277 ql_log(ql_log_warn, vha, 0x5038,
1296 "scsi(%ld:%x): Async-%s error - entry-status(%x).\n", 1278 "Async-%s error - entry-status(%x).\n",
1297 fcport->vha->host_no, sp->handle, type, 1279 type, sts->entry_status);
1298 sts->entry_status));
1299 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1280 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1300 DEBUG2(printk(KERN_WARNING 1281 ql_log(ql_log_warn, vha, 0x5039,
1301 "scsi(%ld:%x): Async-%s error - completion status(%x).\n", 1282 "Async-%s error - completion status(%x).\n",
1302 fcport->vha->host_no, sp->handle, type, 1283 type, sts->comp_status);
1303 sts->comp_status));
1304 } else if (!(le16_to_cpu(sts->scsi_status) & 1284 } else if (!(le16_to_cpu(sts->scsi_status) &
1305 SS_RESPONSE_INFO_LEN_VALID)) { 1285 SS_RESPONSE_INFO_LEN_VALID)) {
1306 DEBUG2(printk(KERN_WARNING 1286 ql_log(ql_log_warn, vha, 0x503a,
1307 "scsi(%ld:%x): Async-%s error - no response info(%x).\n", 1287 "Async-%s error - no response info(%x).\n",
1308 fcport->vha->host_no, sp->handle, type, 1288 type, sts->scsi_status);
1309 sts->scsi_status));
1310 } else if (le32_to_cpu(sts->rsp_data_len) < 4) { 1289 } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
1311 DEBUG2(printk(KERN_WARNING 1290 ql_log(ql_log_warn, vha, 0x503b,
1312 "scsi(%ld:%x): Async-%s error - not enough response(%d).\n", 1291 "Async-%s error - not enough response(%d).\n",
1313 fcport->vha->host_no, sp->handle, type, 1292 type, sts->rsp_data_len);
1314 sts->rsp_data_len));
1315 } else if (sts->data[3]) { 1293 } else if (sts->data[3]) {
1316 DEBUG2(printk(KERN_WARNING 1294 ql_log(ql_log_warn, vha, 0x503c,
1317 "scsi(%ld:%x): Async-%s error - response(%x).\n", 1295 "Async-%s error - response(%x).\n",
1318 fcport->vha->host_no, sp->handle, type, 1296 type, sts->data[3]);
1319 sts->data[3]));
1320 } else { 1297 } else {
1321 error = 0; 1298 error = 0;
1322 } 1299 }
1323 1300
1324 if (error) { 1301 if (error) {
1325 iocb->u.tmf.data = error; 1302 iocb->u.tmf.data = error;
1326 DEBUG2(qla2x00_dump_buffer((uint8_t *)sts, sizeof(*sts))); 1303 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1304 (uint8_t *)sts, sizeof(*sts));
1327 } 1305 }
1328 1306
1329 iocb->done(sp); 1307 iocb->done(sp);
@@ -1360,8 +1338,8 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
1360 } 1338 }
1361 1339
1362 if (pkt->entry_status != 0) { 1340 if (pkt->entry_status != 0) {
1363 DEBUG3(printk(KERN_INFO 1341 ql_log(ql_log_warn, vha, 0x5035,
1364 "scsi(%ld): Process error entry.\n", vha->host_no)); 1342 "Process error entry.\n");
1365 1343
1366 qla2x00_error_entry(vha, rsp, pkt); 1344 qla2x00_error_entry(vha, rsp, pkt);
1367 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1345 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -1399,10 +1377,10 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
1399 break; 1377 break;
1400 default: 1378 default:
1401 /* Type Not Supported. */ 1379 /* Type Not Supported. */
1402 DEBUG4(printk(KERN_WARNING 1380 ql_log(ql_log_warn, vha, 0x504a,
1403 "scsi(%ld): Received unknown response pkt type %x " 1381 "Received unknown response pkt type %x "
1404 "entry status=%x.\n", 1382 "entry status=%x.\n",
1405 vha->host_no, pkt->entry_type, pkt->entry_status)); 1383 pkt->entry_type, pkt->entry_status);
1406 break; 1384 break;
1407 } 1385 }
1408 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1386 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -1418,6 +1396,7 @@ static inline void
1418qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 1396qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1419 uint32_t sense_len, struct rsp_que *rsp) 1397 uint32_t sense_len, struct rsp_que *rsp)
1420{ 1398{
1399 struct scsi_qla_host *vha = sp->fcport->vha;
1421 struct scsi_cmnd *cp = sp->cmd; 1400 struct scsi_cmnd *cp = sp->cmd;
1422 1401
1423 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 1402 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
@@ -1435,11 +1414,13 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1435 if (sp->request_sense_length != 0) 1414 if (sp->request_sense_length != 0)
1436 rsp->status_srb = sp; 1415 rsp->status_srb = sp;
1437 1416
1438 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " 1417 ql_dbg(ql_dbg_io, vha, 0x301c,
1439 "cmd=%p\n", __func__, sp->fcport->vha->host_no, 1418 "Check condition Sense data, scsi(%ld:%d:%d:%d) cmd=%p.\n",
1440 cp->device->channel, cp->device->id, cp->device->lun, cp)); 1419 sp->fcport->vha->host_no, cp->device->channel, cp->device->id,
1420 cp->device->lun, cp);
1441 if (sense_len) 1421 if (sense_len)
1442 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len)); 1422 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1423 cp->sense_buffer, sense_len);
1443} 1424}
1444 1425
1445struct scsi_dif_tuple { 1426struct scsi_dif_tuple {
@@ -1457,6 +1438,7 @@ struct scsi_dif_tuple {
1457static inline void 1438static inline void
1458qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 1439qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1459{ 1440{
1441 struct scsi_qla_host *vha = sp->fcport->vha;
1460 struct scsi_cmnd *cmd = sp->cmd; 1442 struct scsi_cmnd *cmd = sp->cmd;
1461 struct scsi_dif_tuple *ep = 1443 struct scsi_dif_tuple *ep =
1462 (struct scsi_dif_tuple *)&sts24->data[20]; 1444 (struct scsi_dif_tuple *)&sts24->data[20];
@@ -1473,15 +1455,15 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1473 e_guard = be16_to_cpu(ep->guard); 1455 e_guard = be16_to_cpu(ep->guard);
1474 a_guard = be16_to_cpu(ap->guard); 1456 a_guard = be16_to_cpu(ap->guard);
1475 1457
1476 DEBUG18(printk(KERN_DEBUG 1458 ql_dbg(ql_dbg_io, vha, 0x3023,
1477 "%s(): iocb(s) %p Returned STATUS\n", __func__, sts24)); 1459 "iocb(s) %p Returned STATUS.\n", sts24);
1478 1460
1479 DEBUG18(printk(KERN_ERR "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 1461 ql_dbg(ql_dbg_io, vha, 0x3024,
1462 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1480 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 1463 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1481 " tag=0x%x, act guard=0x%x, exp guard=0x%x\n", 1464 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
1482 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 1465 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1483 a_app_tag, e_app_tag, a_guard, e_guard)); 1466 a_app_tag, e_app_tag, a_guard, e_guard);
1484
1485 1467
1486 /* check guard */ 1468 /* check guard */
1487 if (e_guard != a_guard) { 1469 if (e_guard != a_guard) {
@@ -1569,9 +1551,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1569 sp = NULL; 1551 sp = NULL;
1570 1552
1571 if (sp == NULL) { 1553 if (sp == NULL) {
1572 qla_printk(KERN_WARNING, ha, 1554 ql_log(ql_log_warn, vha, 0x3017,
1573 "scsi(%ld): Invalid status handle (0x%x).\n", vha->host_no, 1555 "Invalid status handle (0x%x).\n", sts->handle);
1574 sts->handle);
1575 1556
1576 if (IS_QLA82XX(ha)) 1557 if (IS_QLA82XX(ha))
1577 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1558 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -1582,9 +1563,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1582 } 1563 }
1583 cp = sp->cmd; 1564 cp = sp->cmd;
1584 if (cp == NULL) { 1565 if (cp == NULL) {
1585 qla_printk(KERN_WARNING, ha, 1566 ql_log(ql_log_warn, vha, 0x3018,
1586 "scsi(%ld): Command already returned (0x%x/%p).\n", 1567 "Command already returned (0x%x/%p).\n",
1587 vha->host_no, sts->handle, sp); 1568 sts->handle, sp);
1588 1569
1589 return; 1570 return;
1590 } 1571 }
@@ -1629,10 +1610,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1629 par_sense_len -= rsp_info_len; 1610 par_sense_len -= rsp_info_len;
1630 } 1611 }
1631 if (rsp_info_len > 3 && rsp_info[3]) { 1612 if (rsp_info_len > 3 && rsp_info[3]) {
1632 DEBUG2(qla_printk(KERN_INFO, ha, 1613 ql_log(ql_log_warn, vha, 0x3019,
1633 "scsi(%ld:%d:%d): FCP I/O protocol failure " 1614 "FCP I/O protocol failure (0x%x/0x%x).\n",
1634 "(0x%x/0x%x).\n", vha->host_no, cp->device->id, 1615 rsp_info_len, rsp_info[3]);
1635 cp->device->lun, rsp_info_len, rsp_info[3]));
1636 1616
1637 cp->result = DID_BUS_BUSY << 16; 1617 cp->result = DID_BUS_BUSY << 16;
1638 goto out; 1618 goto out;
@@ -1661,11 +1641,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1661 if (!lscsi_status && 1641 if (!lscsi_status &&
1662 ((unsigned)(scsi_bufflen(cp) - resid) < 1642 ((unsigned)(scsi_bufflen(cp) - resid) <
1663 cp->underflow)) { 1643 cp->underflow)) {
1664 qla_printk(KERN_INFO, ha, 1644 ql_log(ql_log_warn, vha, 0x301a,
1665 "scsi(%ld:%d:%d): Mid-layer underflow " 1645 "Mid-layer underflow "
1666 "detected (0x%x of 0x%x bytes).\n", 1646 "detected (0x%x of 0x%x bytes).\n",
1667 vha->host_no, cp->device->id, 1647 resid, scsi_bufflen(cp));
1668 cp->device->lun, resid, scsi_bufflen(cp));
1669 1648
1670 cp->result = DID_ERROR << 16; 1649 cp->result = DID_ERROR << 16;
1671 break; 1650 break;
@@ -1674,9 +1653,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1674 cp->result = DID_OK << 16 | lscsi_status; 1653 cp->result = DID_OK << 16 | lscsi_status;
1675 1654
1676 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1655 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1677 DEBUG2(qla_printk(KERN_INFO, ha, 1656 ql_log(ql_log_warn, vha, 0x301b,
1678 "scsi(%ld:%d:%d) QUEUE FULL detected.\n", 1657 "QUEUE FULL detected.\n");
1679 vha->host_no, cp->device->id, cp->device->lun));
1680 break; 1658 break;
1681 } 1659 }
1682 logit = 0; 1660 logit = 0;
@@ -1697,11 +1675,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1697 scsi_set_resid(cp, resid); 1675 scsi_set_resid(cp, resid);
1698 if (scsi_status & SS_RESIDUAL_UNDER) { 1676 if (scsi_status & SS_RESIDUAL_UNDER) {
1699 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 1677 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
1700 DEBUG2(qla_printk(KERN_INFO, ha, 1678 ql_log(ql_log_warn, vha, 0x301d,
1701 "scsi(%ld:%d:%d) Dropped frame(s) detected " 1679 "Dropped frame(s) detected "
1702 "(0x%x of 0x%x bytes).\n", vha->host_no, 1680 "(0x%x of 0x%x bytes).\n",
1703 cp->device->id, cp->device->lun, resid, 1681 resid, scsi_bufflen(cp));
1704 scsi_bufflen(cp)));
1705 1682
1706 cp->result = DID_ERROR << 16 | lscsi_status; 1683 cp->result = DID_ERROR << 16 | lscsi_status;
1707 break; 1684 break;
@@ -1710,20 +1687,18 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1710 if (!lscsi_status && 1687 if (!lscsi_status &&
1711 ((unsigned)(scsi_bufflen(cp) - resid) < 1688 ((unsigned)(scsi_bufflen(cp) - resid) <
1712 cp->underflow)) { 1689 cp->underflow)) {
1713 qla_printk(KERN_INFO, ha, 1690 ql_log(ql_log_warn, vha, 0x301e,
1714 "scsi(%ld:%d:%d): Mid-layer underflow " 1691 "Mid-layer underflow "
1715 "detected (0x%x of 0x%x bytes).\n", 1692 "detected (0x%x of 0x%x bytes).\n",
1716 vha->host_no, cp->device->id, 1693 resid, scsi_bufflen(cp));
1717 cp->device->lun, resid, scsi_bufflen(cp));
1718 1694
1719 cp->result = DID_ERROR << 16; 1695 cp->result = DID_ERROR << 16;
1720 break; 1696 break;
1721 } 1697 }
1722 } else { 1698 } else {
1723 DEBUG2(qla_printk(KERN_INFO, ha, 1699 ql_log(ql_log_warn, vha, 0x301f,
1724 "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x " 1700 "Dropped frame(s) detected (0x%x "
1725 "of 0x%x bytes).\n", vha->host_no, cp->device->id, 1701 "of 0x%x bytes).\n", resid, scsi_bufflen(cp));
1726 cp->device->lun, resid, scsi_bufflen(cp)));
1727 1702
1728 cp->result = DID_ERROR << 16 | lscsi_status; 1703 cp->result = DID_ERROR << 16 | lscsi_status;
1729 goto check_scsi_status; 1704 goto check_scsi_status;
@@ -1739,10 +1714,8 @@ check_scsi_status:
1739 */ 1714 */
1740 if (lscsi_status != 0) { 1715 if (lscsi_status != 0) {
1741 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1716 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1742 DEBUG2(qla_printk(KERN_INFO, ha, 1717 ql_log(ql_log_warn, vha, 0x3020,
1743 "scsi(%ld:%d:%d) QUEUE FULL detected.\n", 1718 "QUEUE FULL detected.\n");
1744 vha->host_no, cp->device->id,
1745 cp->device->lun));
1746 logit = 1; 1719 logit = 1;
1747 break; 1720 break;
1748 } 1721 }
@@ -1781,10 +1754,9 @@ check_scsi_status:
1781 break; 1754 break;
1782 } 1755 }
1783 1756
1784 DEBUG2(qla_printk(KERN_INFO, ha, 1757 ql_dbg(ql_dbg_io, vha, 0x3021,
1785 "scsi(%ld:%d:%d) Port down status: port-state=0x%x\n", 1758 "Port down status: port-state=0x%x.\n",
1786 vha->host_no, cp->device->id, cp->device->lun, 1759 atomic_read(&fcport->state));
1787 atomic_read(&fcport->state)));
1788 1760
1789 if (atomic_read(&fcport->state) == FCS_ONLINE) 1761 if (atomic_read(&fcport->state) == FCS_ONLINE)
1790 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 1762 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
@@ -1804,15 +1776,13 @@ check_scsi_status:
1804 1776
1805out: 1777out:
1806 if (logit) 1778 if (logit)
1807 DEBUG2(qla_printk(KERN_INFO, ha, 1779 ql_dbg(ql_dbg_io, vha, 0x3022,
1808 "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) " 1780 "FCP command status: 0x%x-0x%x (0x%x) "
1809 "portid=%02x%02x%02x oxid=0x%x cdb=%02x%02x%02x len=0x%x " 1781 "oxid=0x%x cdb=%02x%02x%02x len=0x%x "
1810 "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no, 1782 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
1811 cp->device->id, cp->device->lun, comp_status, scsi_status, 1783 comp_status, scsi_status, cp->result, ox_id, cp->cmnd[0],
1812 cp->result, fcport->d_id.b.domain, fcport->d_id.b.area, 1784 cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len,
1813 fcport->d_id.b.al_pa, ox_id, cp->cmnd[0], cp->cmnd[1], 1785 resid_len, fw_resid_len);
1814 cp->cmnd[2], scsi_bufflen(cp), rsp_info_len, resid_len,
1815 fw_resid_len));
1816 1786
1817 if (rsp->status_srb == NULL) 1787 if (rsp->status_srb == NULL)
1818 qla2x00_sp_compl(ha, sp); 1788 qla2x00_sp_compl(ha, sp);
@@ -1830,16 +1800,15 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1830{ 1800{
1831 uint8_t sense_sz = 0; 1801 uint8_t sense_sz = 0;
1832 struct qla_hw_data *ha = rsp->hw; 1802 struct qla_hw_data *ha = rsp->hw;
1803 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
1833 srb_t *sp = rsp->status_srb; 1804 srb_t *sp = rsp->status_srb;
1834 struct scsi_cmnd *cp; 1805 struct scsi_cmnd *cp;
1835 1806
1836 if (sp != NULL && sp->request_sense_length != 0) { 1807 if (sp != NULL && sp->request_sense_length != 0) {
1837 cp = sp->cmd; 1808 cp = sp->cmd;
1838 if (cp == NULL) { 1809 if (cp == NULL) {
1839 DEBUG2(printk("%s(): Cmd already returned back to OS " 1810 ql_log(ql_log_warn, vha, 0x3025,
1840 "sp=%p.\n", __func__, sp)); 1811 "cmd is NULL: already returned to OS (sp=%p).\n",
1841 qla_printk(KERN_INFO, ha,
1842 "cmd is NULL: already returned to OS (sp=%p)\n",
1843 sp); 1812 sp);
1844 1813
1845 rsp->status_srb = NULL; 1814 rsp->status_srb = NULL;
@@ -1856,7 +1825,8 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1856 if (IS_FWI2_CAPABLE(ha)) 1825 if (IS_FWI2_CAPABLE(ha))
1857 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 1826 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1858 memcpy(sp->request_sense_ptr, pkt->data, sense_sz); 1827 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1859 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz)); 1828 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
1829 sp->request_sense_ptr, sense_sz);
1860 1830
1861 sp->request_sense_ptr += sense_sz; 1831 sp->request_sense_ptr += sense_sz;
1862 sp->request_sense_length -= sense_sz; 1832 sp->request_sense_length -= sense_sz;
@@ -1882,21 +1852,25 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1882 uint32_t handle = LSW(pkt->handle); 1852 uint32_t handle = LSW(pkt->handle);
1883 uint16_t que = MSW(pkt->handle); 1853 uint16_t que = MSW(pkt->handle);
1884 struct req_que *req = ha->req_q_map[que]; 1854 struct req_que *req = ha->req_q_map[que];
1885#if defined(QL_DEBUG_LEVEL_2) 1855
1886 if (pkt->entry_status & RF_INV_E_ORDER) 1856 if (pkt->entry_status & RF_INV_E_ORDER)
1887 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); 1857 ql_dbg(ql_dbg_async, vha, 0x502a,
1858 "Invalid Entry Order.\n");
1888 else if (pkt->entry_status & RF_INV_E_COUNT) 1859 else if (pkt->entry_status & RF_INV_E_COUNT)
1889 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__); 1860 ql_dbg(ql_dbg_async, vha, 0x502b,
1861 "Invalid Entry Count.\n");
1890 else if (pkt->entry_status & RF_INV_E_PARAM) 1862 else if (pkt->entry_status & RF_INV_E_PARAM)
1891 qla_printk(KERN_ERR, ha, 1863 ql_dbg(ql_dbg_async, vha, 0x502c,
1892 "%s: Invalid Entry Parameter\n", __func__); 1864 "Invalid Entry Parameter.\n");
1893 else if (pkt->entry_status & RF_INV_E_TYPE) 1865 else if (pkt->entry_status & RF_INV_E_TYPE)
1894 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__); 1866 ql_dbg(ql_dbg_async, vha, 0x502d,
1867 "Invalid Entry Type.\n");
1895 else if (pkt->entry_status & RF_BUSY) 1868 else if (pkt->entry_status & RF_BUSY)
1896 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__); 1869 ql_dbg(ql_dbg_async, vha, 0x502e,
1870 "Busy.\n");
1897 else 1871 else
1898 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__); 1872 ql_dbg(ql_dbg_async, vha, 0x502f,
1899#endif 1873 "UNKNOWN flag error.\n");
1900 1874
1901 /* Validate handle. */ 1875 /* Validate handle. */
1902 if (handle < MAX_OUTSTANDING_COMMANDS) 1876 if (handle < MAX_OUTSTANDING_COMMANDS)
@@ -1923,10 +1897,8 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1923 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == 1897 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1924 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7 1898 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
1925 || pkt->entry_type == COMMAND_TYPE_6) { 1899 || pkt->entry_type == COMMAND_TYPE_6) {
1926 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n", 1900 ql_log(ql_log_warn, vha, 0x5030,
1927 vha->host_no)); 1901 "Error entry - invalid handle.\n");
1928 qla_printk(KERN_WARNING, ha,
1929 "Error entry - invalid handle\n");
1930 1902
1931 if (IS_QLA82XX(ha)) 1903 if (IS_QLA82XX(ha))
1932 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1904 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -1960,11 +1932,11 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1960 } 1932 }
1961 1933
1962 if (ha->mcp) { 1934 if (ha->mcp) {
1963 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", 1935 ql_dbg(ql_dbg_async, vha, 0x504d,
1964 __func__, vha->host_no, ha->mcp->mb[0])); 1936 "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
1965 } else { 1937 } else {
1966 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", 1938 ql_dbg(ql_dbg_async, vha, 0x504e,
1967 __func__, vha->host_no)); 1939 "MBX pointer ERROR.\n");
1968 } 1940 }
1969} 1941}
1970 1942
@@ -1993,8 +1965,8 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1993 } 1965 }
1994 1966
1995 if (pkt->entry_status != 0) { 1967 if (pkt->entry_status != 0) {
1996 DEBUG3(printk(KERN_INFO 1968 ql_dbg(ql_dbg_async, vha, 0x5029,
1997 "scsi(%ld): Process error entry.\n", vha->host_no)); 1969 "Process error entry.\n");
1998 1970
1999 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); 1971 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2000 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1972 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -2030,10 +2002,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2030 break; 2002 break;
2031 default: 2003 default:
2032 /* Type Not Supported. */ 2004 /* Type Not Supported. */
2033 DEBUG4(printk(KERN_WARNING 2005 ql_dbg(ql_dbg_async, vha, 0x5042,
2034 "scsi(%ld): Received unknown response pkt type %x " 2006 "Received unknown response pkt type %x "
2035 "entry status=%x.\n", 2007 "entry status=%x.\n",
2036 vha->host_no, pkt->entry_type, pkt->entry_status)); 2008 pkt->entry_type, pkt->entry_status);
2037 break; 2009 break;
2038 } 2010 }
2039 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2011 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -2088,7 +2060,8 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2088 2060
2089next_test: 2061next_test:
2090 if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3) 2062 if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
2091 qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n"); 2063 ql_log(ql_log_info, vha, 0x504c,
2064 "Additional code -- 0x55AA.\n");
2092 2065
2093done: 2066done:
2094 WRT_REG_DWORD(&reg->iobase_window, 0x0000); 2067 WRT_REG_DWORD(&reg->iobase_window, 0x0000);
@@ -2121,7 +2094,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
2121 rsp = (struct rsp_que *) dev_id; 2094 rsp = (struct rsp_que *) dev_id;
2122 if (!rsp) { 2095 if (!rsp) {
2123 printk(KERN_INFO 2096 printk(KERN_INFO
2124 "%s(): NULL response queue pointer\n", __func__); 2097 "%s(): NULL response queue pointer.\n", __func__);
2125 return IRQ_NONE; 2098 return IRQ_NONE;
2126 } 2099 }
2127 2100
@@ -2142,8 +2115,9 @@ qla24xx_intr_handler(int irq, void *dev_id)
2142 2115
2143 hccr = RD_REG_DWORD(&reg->hccr); 2116 hccr = RD_REG_DWORD(&reg->hccr);
2144 2117
2145 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 2118 ql_log(ql_log_warn, vha, 0x504b,
2146 "Dumping firmware!\n", hccr); 2119 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2120 hccr);
2147 2121
2148 qla2xxx_check_risc_status(vha); 2122 qla2xxx_check_risc_status(vha);
2149 2123
@@ -2174,9 +2148,8 @@ qla24xx_intr_handler(int irq, void *dev_id)
2174 qla24xx_process_response_queue(vha, rsp); 2148 qla24xx_process_response_queue(vha, rsp);
2175 break; 2149 break;
2176 default: 2150 default:
2177 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 2151 ql_dbg(ql_dbg_async, vha, 0x504f,
2178 "(%d).\n", 2152 "Unrecognized interrupt type (%d).\n", stat * 0xff);
2179 vha->host_no, stat & 0xff));
2180 break; 2153 break;
2181 } 2154 }
2182 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 2155 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -2205,7 +2178,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
2205 rsp = (struct rsp_que *) dev_id; 2178 rsp = (struct rsp_que *) dev_id;
2206 if (!rsp) { 2179 if (!rsp) {
2207 printk(KERN_INFO 2180 printk(KERN_INFO
2208 "%s(): NULL response queue pointer\n", __func__); 2181 "%s(): NULL response queue pointer.\n", __func__);
2209 return IRQ_NONE; 2182 return IRQ_NONE;
2210 } 2183 }
2211 ha = rsp->hw; 2184 ha = rsp->hw;
@@ -2235,7 +2208,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
2235 rsp = (struct rsp_que *) dev_id; 2208 rsp = (struct rsp_que *) dev_id;
2236 if (!rsp) { 2209 if (!rsp) {
2237 printk(KERN_INFO 2210 printk(KERN_INFO
2238 "%s(): NULL response queue pointer\n", __func__); 2211 "%s(): NULL response queue pointer.\n", __func__);
2239 return IRQ_NONE; 2212 return IRQ_NONE;
2240 } 2213 }
2241 ha = rsp->hw; 2214 ha = rsp->hw;
@@ -2268,8 +2241,8 @@ qla24xx_msix_default(int irq, void *dev_id)
2268 2241
2269 rsp = (struct rsp_que *) dev_id; 2242 rsp = (struct rsp_que *) dev_id;
2270 if (!rsp) { 2243 if (!rsp) {
2271 DEBUG(printk( 2244 printk(KERN_INFO
2272 "%s(): NULL response queue pointer\n", __func__)); 2245 "%s(): NULL response queue pointer.\n", __func__);
2273 return IRQ_NONE; 2246 return IRQ_NONE;
2274 } 2247 }
2275 ha = rsp->hw; 2248 ha = rsp->hw;
@@ -2286,8 +2259,9 @@ qla24xx_msix_default(int irq, void *dev_id)
2286 2259
2287 hccr = RD_REG_DWORD(&reg->hccr); 2260 hccr = RD_REG_DWORD(&reg->hccr);
2288 2261
2289 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 2262 ql_log(ql_log_info, vha, 0x5050,
2290 "Dumping firmware!\n", hccr); 2263 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2264 hccr);
2291 2265
2292 qla2xxx_check_risc_status(vha); 2266 qla2xxx_check_risc_status(vha);
2293 2267
@@ -2318,9 +2292,8 @@ qla24xx_msix_default(int irq, void *dev_id)
2318 qla24xx_process_response_queue(vha, rsp); 2292 qla24xx_process_response_queue(vha, rsp);
2319 break; 2293 break;
2320 default: 2294 default:
2321 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 2295 ql_dbg(ql_dbg_async, vha, 0x5051,
2322 "(%d).\n", 2296 "Unrecognized interrupt type (%d).\n", stat & 0xff);
2323 vha->host_no, stat & 0xff));
2324 break; 2297 break;
2325 } 2298 }
2326 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 2299 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -2358,6 +2331,7 @@ qla24xx_disable_msix(struct qla_hw_data *ha)
2358{ 2331{
2359 int i; 2332 int i;
2360 struct qla_msix_entry *qentry; 2333 struct qla_msix_entry *qentry;
2334 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2361 2335
2362 for (i = 0; i < ha->msix_count; i++) { 2336 for (i = 0; i < ha->msix_count; i++) {
2363 qentry = &ha->msix_entries[i]; 2337 qentry = &ha->msix_entries[i];
@@ -2368,6 +2342,8 @@ qla24xx_disable_msix(struct qla_hw_data *ha)
2368 kfree(ha->msix_entries); 2342 kfree(ha->msix_entries);
2369 ha->msix_entries = NULL; 2343 ha->msix_entries = NULL;
2370 ha->flags.msix_enabled = 0; 2344 ha->flags.msix_enabled = 0;
2345 ql_dbg(ql_dbg_init, vha, 0x0042,
2346 "Disabled the MSI.\n");
2371} 2347}
2372 2348
2373static int 2349static int
@@ -2377,11 +2353,15 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2377 int i, ret; 2353 int i, ret;
2378 struct msix_entry *entries; 2354 struct msix_entry *entries;
2379 struct qla_msix_entry *qentry; 2355 struct qla_msix_entry *qentry;
2356 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2380 2357
2381 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 2358 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
2382 GFP_KERNEL); 2359 GFP_KERNEL);
2383 if (!entries) 2360 if (!entries) {
2361 ql_log(ql_log_warn, vha, 0x00bc,
2362 "Failed to allocate memory for msix_entry.\n");
2384 return -ENOMEM; 2363 return -ENOMEM;
2364 }
2385 2365
2386 for (i = 0; i < ha->msix_count; i++) 2366 for (i = 0; i < ha->msix_count; i++)
2387 entries[i].entry = i; 2367 entries[i].entry = i;
@@ -2391,16 +2371,18 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2391 if (ret < MIN_MSIX_COUNT) 2371 if (ret < MIN_MSIX_COUNT)
2392 goto msix_failed; 2372 goto msix_failed;
2393 2373
2394 qla_printk(KERN_WARNING, ha, 2374 ql_log(ql_log_warn, vha, 0x00c6,
2395 "MSI-X: Failed to enable support -- %d/%d\n" 2375 "MSI-X: Failed to enable support "
2396 " Retry with %d vectors\n", ha->msix_count, ret, ret); 2376 "-- %d/%d\n Retry with %d vectors.\n",
2377 ha->msix_count, ret, ret);
2397 ha->msix_count = ret; 2378 ha->msix_count = ret;
2398 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2379 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2399 if (ret) { 2380 if (ret) {
2400msix_failed: 2381msix_failed:
2401 qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable" 2382 ql_log(ql_log_fatal, vha, 0x00c7,
2402 " support, giving up -- %d/%d\n", 2383 "MSI-X: Failed to enable support, "
2403 ha->msix_count, ret); 2384 "giving up -- %d/%d.\n",
2385 ha->msix_count, ret);
2404 goto msix_out; 2386 goto msix_out;
2405 } 2387 }
2406 ha->max_rsp_queues = ha->msix_count - 1; 2388 ha->max_rsp_queues = ha->msix_count - 1;
@@ -2408,6 +2390,8 @@ msix_failed:
2408 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 2390 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2409 ha->msix_count, GFP_KERNEL); 2391 ha->msix_count, GFP_KERNEL);
2410 if (!ha->msix_entries) { 2392 if (!ha->msix_entries) {
2393 ql_log(ql_log_fatal, vha, 0x00c8,
2394 "Failed to allocate memory for ha->msix_entries.\n");
2411 ret = -ENOMEM; 2395 ret = -ENOMEM;
2412 goto msix_out; 2396 goto msix_out;
2413 } 2397 }
@@ -2434,9 +2418,9 @@ msix_failed:
2434 0, msix_entries[i].name, rsp); 2418 0, msix_entries[i].name, rsp);
2435 } 2419 }
2436 if (ret) { 2420 if (ret) {
2437 qla_printk(KERN_WARNING, ha, 2421 ql_log(ql_log_fatal, vha, 0x00cb,
2438 "MSI-X: Unable to register handler -- %x/%d.\n", 2422 "MSI-X: unable to register handler -- %x/%d.\n",
2439 qentry->vector, ret); 2423 qentry->vector, ret);
2440 qla24xx_disable_msix(ha); 2424 qla24xx_disable_msix(ha);
2441 ha->mqenable = 0; 2425 ha->mqenable = 0;
2442 goto msix_out; 2426 goto msix_out;
@@ -2449,6 +2433,12 @@ msix_failed:
2449 /* Enable MSI-X vector for response queue update for queue 0 */ 2433 /* Enable MSI-X vector for response queue update for queue 0 */
2450 if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 2434 if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2451 ha->mqenable = 1; 2435 ha->mqenable = 1;
2436 ql_dbg(ql_dbg_multiq, vha, 0xc005,
2437 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2438 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2439 ql_dbg(ql_dbg_init, vha, 0x0055,
2440 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2441 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2452 2442
2453msix_out: 2443msix_out:
2454 kfree(entries); 2444 kfree(entries);
@@ -2460,6 +2450,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2460{ 2450{
2461 int ret; 2451 int ret;
2462 device_reg_t __iomem *reg = ha->iobase; 2452 device_reg_t __iomem *reg = ha->iobase;
2453 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2463 2454
2464 /* If possible, enable MSI-X. */ 2455 /* If possible, enable MSI-X. */
2465 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && 2456 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
@@ -2470,30 +2461,30 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2470 (ha->pdev->subsystem_device == 0x7040 || 2461 (ha->pdev->subsystem_device == 0x7040 ||
2471 ha->pdev->subsystem_device == 0x7041 || 2462 ha->pdev->subsystem_device == 0x7041 ||
2472 ha->pdev->subsystem_device == 0x1705)) { 2463 ha->pdev->subsystem_device == 0x1705)) {
2473 DEBUG2(qla_printk(KERN_WARNING, ha, 2464 ql_log(ql_log_warn, vha, 0x0034,
2474 "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X,0x%X).\n", 2465 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
2475 ha->pdev->subsystem_vendor, 2466 ha->pdev->subsystem_vendor,
2476 ha->pdev->subsystem_device)); 2467 ha->pdev->subsystem_device);
2477 goto skip_msi; 2468 goto skip_msi;
2478 } 2469 }
2479 2470
2480 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX || 2471 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
2481 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { 2472 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
2482 DEBUG2(qla_printk(KERN_WARNING, ha, 2473 ql_log(ql_log_warn, vha, 0x0035,
2483 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n", 2474 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
2484 ha->pdev->revision, ha->fw_attributes)); 2475 ha->pdev->revision, ha->fw_attributes);
2485 goto skip_msix; 2476 goto skip_msix;
2486 } 2477 }
2487 2478
2488 ret = qla24xx_enable_msix(ha, rsp); 2479 ret = qla24xx_enable_msix(ha, rsp);
2489 if (!ret) { 2480 if (!ret) {
2490 DEBUG2(qla_printk(KERN_INFO, ha, 2481 ql_dbg(ql_dbg_init, vha, 0x0036,
2491 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision, 2482 "MSI-X: Enabled (0x%X, 0x%X).\n",
2492 ha->fw_attributes)); 2483 ha->chip_revision, ha->fw_attributes);
2493 goto clear_risc_ints; 2484 goto clear_risc_ints;
2494 } 2485 }
2495 qla_printk(KERN_WARNING, ha, 2486 ql_log(ql_log_info, vha, 0x0037,
2496 "MSI-X: Falling back-to MSI mode -- %d.\n", ret); 2487 "MSI-X Falling back-to MSI mode -%d.\n", ret);
2497skip_msix: 2488skip_msix:
2498 2489
2499 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 2490 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
@@ -2502,18 +2493,19 @@ skip_msix:
2502 2493
2503 ret = pci_enable_msi(ha->pdev); 2494 ret = pci_enable_msi(ha->pdev);
2504 if (!ret) { 2495 if (!ret) {
2505 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n")); 2496 ql_dbg(ql_dbg_init, vha, 0x0038,
2497 "MSI: Enabled.\n");
2506 ha->flags.msi_enabled = 1; 2498 ha->flags.msi_enabled = 1;
2507 } else 2499 } else
2508 qla_printk(KERN_WARNING, ha, 2500 ql_log(ql_log_warn, vha, 0x0039,
2509 "MSI-X: Falling back-to INTa mode -- %d.\n", ret); 2501 "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
2510skip_msi: 2502skip_msi:
2511 2503
2512 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 2504 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
2513 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 2505 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
2514 QLA2XXX_DRIVER_NAME, rsp); 2506 QLA2XXX_DRIVER_NAME, rsp);
2515 if (ret) { 2507 if (ret) {
2516 qla_printk(KERN_WARNING, ha, 2508 ql_log(ql_log_warn, vha, 0x003a,
2517 "Failed to reserve interrupt %d already in use.\n", 2509 "Failed to reserve interrupt %d already in use.\n",
2518 ha->pdev->irq); 2510 ha->pdev->irq);
2519 goto fail; 2511 goto fail;
@@ -2563,13 +2555,14 @@ int qla25xx_request_irq(struct rsp_que *rsp)
2563 struct qla_hw_data *ha = rsp->hw; 2555 struct qla_hw_data *ha = rsp->hw;
2564 struct qla_init_msix_entry *intr = &msix_entries[2]; 2556 struct qla_init_msix_entry *intr = &msix_entries[2];
2565 struct qla_msix_entry *msix = rsp->msix; 2557 struct qla_msix_entry *msix = rsp->msix;
2558 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2566 int ret; 2559 int ret;
2567 2560
2568 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp); 2561 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2569 if (ret) { 2562 if (ret) {
2570 qla_printk(KERN_WARNING, ha, 2563 ql_log(ql_log_fatal, vha, 0x00e6,
2571 "MSI-X: Unable to register handler -- %x/%d.\n", 2564 "MSI-X: Unable to register handler -- %x/%d.\n",
2572 msix->vector, ret); 2565 msix->vector, ret);
2573 return ret; 2566 return ret;
2574 } 2567 }
2575 msix->have_irq = 1; 2568 msix->have_irq = 1;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index c26f0acdfecc..f7604ea1af83 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -46,14 +46,18 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
46 struct qla_hw_data *ha = vha->hw; 46 struct qla_hw_data *ha = vha->hw;
47 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 47 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
48 48
49 if (ha->pdev->error_state > pci_channel_io_frozen) 49 ql_dbg(ql_dbg_mbx, base_vha, 0x1000, "Entered %s.\n", __func__);
50
51 if (ha->pdev->error_state > pci_channel_io_frozen) {
52 ql_log(ql_log_warn, base_vha, 0x1001,
53 "error_state is greater than pci_channel_io_frozen, "
54 "exiting.\n");
50 return QLA_FUNCTION_TIMEOUT; 55 return QLA_FUNCTION_TIMEOUT;
56 }
51 57
52 if (vha->device_flags & DFLG_DEV_FAILED) { 58 if (vha->device_flags & DFLG_DEV_FAILED) {
53 DEBUG2_3_11(qla_printk(KERN_WARNING, ha, 59 ql_log(ql_log_warn, base_vha, 0x1002,
54 "%s(%ld): Device in failed state, " 60 "Device in failed state, exiting.\n");
55 "timeout MBX Exiting.\n",
56 __func__, base_vha->host_no));
57 return QLA_FUNCTION_TIMEOUT; 61 return QLA_FUNCTION_TIMEOUT;
58 } 62 }
59 63
@@ -63,17 +67,18 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
63 rval = QLA_SUCCESS; 67 rval = QLA_SUCCESS;
64 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 68 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
65 69
66 DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no));
67 70
68 if (ha->flags.pci_channel_io_perm_failure) { 71 if (ha->flags.pci_channel_io_perm_failure) {
69 DEBUG(printk("%s(%ld): Perm failure on EEH, timeout MBX " 72 ql_log(ql_log_warn, base_vha, 0x1003,
70 "Exiting.\n", __func__, vha->host_no)); 73 "Perm failure on EEH timeout MBX, exiting.\n");
71 return QLA_FUNCTION_TIMEOUT; 74 return QLA_FUNCTION_TIMEOUT;
72 } 75 }
73 76
74 if (ha->flags.isp82xx_fw_hung) { 77 if (ha->flags.isp82xx_fw_hung) {
75 /* Setting Link-Down error */ 78 /* Setting Link-Down error */
76 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 79 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
80 ql_log(ql_log_warn, base_vha, 0x1004,
81 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
77 rval = QLA_FUNCTION_FAILED; 82 rval = QLA_FUNCTION_FAILED;
78 goto premature_exit; 83 goto premature_exit;
79 } 84 }
@@ -85,8 +90,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
85 */ 90 */
86 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { 91 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
87 /* Timeout occurred. Return error. */ 92 /* Timeout occurred. Return error. */
88 DEBUG2_3_11(printk("%s(%ld): cmd access timeout. " 93 ql_log(ql_log_warn, base_vha, 0x1005,
89 "Exiting.\n", __func__, base_vha->host_no)); 94 "Cmd access timeout, Exiting.\n");
90 return QLA_FUNCTION_TIMEOUT; 95 return QLA_FUNCTION_TIMEOUT;
91 } 96 }
92 97
@@ -94,8 +99,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
94 /* Save mailbox command for debug */ 99 /* Save mailbox command for debug */
95 ha->mcp = mcp; 100 ha->mcp = mcp;
96 101
97 DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n", 102 ql_dbg(ql_dbg_mbx, base_vha, 0x1006,
98 base_vha->host_no, mcp->mb[0])); 103 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
99 104
100 spin_lock_irqsave(&ha->hardware_lock, flags); 105 spin_lock_irqsave(&ha->hardware_lock, flags);
101 106
@@ -123,27 +128,30 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
123 iptr++; 128 iptr++;
124 } 129 }
125 130
126#if defined(QL_DEBUG_LEVEL_1) 131 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1111,
127 printk("%s(%ld): Loaded MBX registers (displayed in bytes) = \n", 132 "Loaded MBX registers (displayed in bytes) =.\n");
128 __func__, base_vha->host_no); 133 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1112,
129 qla2x00_dump_buffer((uint8_t *)mcp->mb, 16); 134 (uint8_t *)mcp->mb, 16);
130 printk("\n"); 135 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1113,
131 qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x10), 16); 136 ".\n");
132 printk("\n"); 137 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1114,
133 qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x20), 8); 138 ((uint8_t *)mcp->mb + 0x10), 16);
134 printk("\n"); 139 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1115,
135 printk("%s(%ld): I/O address = %p.\n", __func__, base_vha->host_no, 140 ".\n");
136 optr); 141 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1116,
137 qla2x00_dump_regs(base_vha); 142 ((uint8_t *)mcp->mb + 0x20), 8);
138#endif 143 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1117,
144 "I/O Address = %p.\n", optr);
145 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x100e);
139 146
140 /* Issue set host interrupt command to send cmd out. */ 147 /* Issue set host interrupt command to send cmd out. */
141 ha->flags.mbox_int = 0; 148 ha->flags.mbox_int = 0;
142 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 149 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
143 150
144 /* Unlock mbx registers and wait for interrupt */ 151 /* Unlock mbx registers and wait for interrupt */
145 DEBUG11(printk("%s(%ld): going to unlock irq & waiting for interrupt. " 152 ql_dbg(ql_dbg_mbx, base_vha, 0x100f,
146 "jiffies=%lx.\n", __func__, base_vha->host_no, jiffies)); 153 "Going to unlock irq & waiting for interrupts. "
154 "jiffies=%lx.\n", jiffies);
147 155
148 /* Wait for mbx cmd completion until timeout */ 156 /* Wait for mbx cmd completion until timeout */
149 157
@@ -155,9 +163,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
155 HINT_MBX_INT_PENDING) { 163 HINT_MBX_INT_PENDING) {
156 spin_unlock_irqrestore(&ha->hardware_lock, 164 spin_unlock_irqrestore(&ha->hardware_lock,
157 flags); 165 flags);
158 DEBUG2_3_11(printk(KERN_INFO 166 ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
159 "%s(%ld): Pending Mailbox timeout. " 167 "Pending mailbox timeout, exiting.\n");
160 "Exiting.\n", __func__, base_vha->host_no));
161 rval = QLA_FUNCTION_TIMEOUT; 168 rval = QLA_FUNCTION_TIMEOUT;
162 goto premature_exit; 169 goto premature_exit;
163 } 170 }
@@ -173,17 +180,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
173 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 180 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
174 181
175 } else { 182 } else {
176 DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__, 183 ql_dbg(ql_dbg_mbx, base_vha, 0x1011,
177 base_vha->host_no, command)); 184 "Cmd=%x Polling Mode.\n", command);
178 185
179 if (IS_QLA82XX(ha)) { 186 if (IS_QLA82XX(ha)) {
180 if (RD_REG_DWORD(&reg->isp82.hint) & 187 if (RD_REG_DWORD(&reg->isp82.hint) &
181 HINT_MBX_INT_PENDING) { 188 HINT_MBX_INT_PENDING) {
182 spin_unlock_irqrestore(&ha->hardware_lock, 189 spin_unlock_irqrestore(&ha->hardware_lock,
183 flags); 190 flags);
184 DEBUG2_3_11(printk(KERN_INFO 191 ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
185 "%s(%ld): Pending Mailbox timeout. " 192 "Pending mailbox timeout, exiting.\n");
186 "Exiting.\n", __func__, base_vha->host_no));
187 rval = QLA_FUNCTION_TIMEOUT; 193 rval = QLA_FUNCTION_TIMEOUT;
188 goto premature_exit; 194 goto premature_exit;
189 } 195 }
@@ -207,17 +213,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
207 command == MBC_LOAD_RISC_RAM_EXTENDED)) 213 command == MBC_LOAD_RISC_RAM_EXTENDED))
208 msleep(10); 214 msleep(10);
209 } /* while */ 215 } /* while */
210 DEBUG17(qla_printk(KERN_WARNING, ha, 216 ql_dbg(ql_dbg_mbx, base_vha, 0x1013,
211 "Waited %d sec\n", 217 "Waited %d sec.\n",
212 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ))); 218 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
213 } 219 }
214 220
215 /* Check whether we timed out */ 221 /* Check whether we timed out */
216 if (ha->flags.mbox_int) { 222 if (ha->flags.mbox_int) {
217 uint16_t *iptr2; 223 uint16_t *iptr2;
218 224
219 DEBUG3_11(printk("%s(%ld): cmd %x completed.\n", __func__, 225 ql_dbg(ql_dbg_mbx, base_vha, 0x1014,
220 base_vha->host_no, command)); 226 "Cmd=%x completed.\n", command);
221 227
222 /* Got interrupt. Clear the flag. */ 228 /* Got interrupt. Clear the flag. */
223 ha->flags.mbox_int = 0; 229 ha->flags.mbox_int = 0;
@@ -229,6 +235,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
229 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 235 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
230 ha->mcp = NULL; 236 ha->mcp = NULL;
231 rval = QLA_FUNCTION_FAILED; 237 rval = QLA_FUNCTION_FAILED;
238 ql_log(ql_log_warn, base_vha, 0x1015,
239 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
232 goto premature_exit; 240 goto premature_exit;
233 } 241 }
234 242
@@ -249,8 +257,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
249 } 257 }
250 } else { 258 } else {
251 259
252#if defined(QL_DEBUG_LEVEL_2) || defined(QL_DEBUG_LEVEL_3) || \
253 defined(QL_DEBUG_LEVEL_11)
254 uint16_t mb0; 260 uint16_t mb0;
255 uint32_t ictrl; 261 uint32_t ictrl;
256 262
@@ -261,14 +267,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
261 mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0); 267 mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0);
262 ictrl = RD_REG_WORD(&reg->isp.ictrl); 268 ictrl = RD_REG_WORD(&reg->isp.ictrl);
263 } 269 }
264 printk("%s(%ld): **** MB Command Timeout for cmd %x ****\n", 270 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1119,
265 __func__, base_vha->host_no, command); 271 "MBX Command timeout for cmd %x.\n", command);
266 printk("%s(%ld): icontrol=%x jiffies=%lx\n", __func__, 272 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111a,
267 base_vha->host_no, ictrl, jiffies); 273 "iocontrol=%x jiffies=%lx.\n", ictrl, jiffies);
268 printk("%s(%ld): *** mailbox[0] = 0x%x ***\n", __func__, 274 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111b,
269 base_vha->host_no, mb0); 275 "mb[0] = 0x%x.\n", mb0);
270 qla2x00_dump_regs(base_vha); 276 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1019);
271#endif
272 277
273 rval = QLA_FUNCTION_TIMEOUT; 278 rval = QLA_FUNCTION_TIMEOUT;
274 } 279 }
@@ -279,8 +284,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
279 ha->mcp = NULL; 284 ha->mcp = NULL;
280 285
281 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { 286 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
282 DEBUG11(printk("%s(%ld): checking for additional resp " 287 ql_dbg(ql_dbg_mbx, base_vha, 0x101a,
283 "interrupt.\n", __func__, base_vha->host_no)); 288 "Checking for additional resp interrupt.\n");
284 289
285 /* polling mode for non isp_abort commands. */ 290 /* polling mode for non isp_abort commands. */
286 qla2x00_poll(ha->rsp_q_map[0]); 291 qla2x00_poll(ha->rsp_q_map[0]);
@@ -291,38 +296,32 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
291 if (!io_lock_on || (mcp->flags & IOCTL_CMD) || 296 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
292 ha->flags.eeh_busy) { 297 ha->flags.eeh_busy) {
293 /* not in dpc. schedule it for dpc to take over. */ 298 /* not in dpc. schedule it for dpc to take over. */
294 DEBUG(printk("%s(%ld): timeout schedule " 299 ql_dbg(ql_dbg_mbx, base_vha, 0x101b,
295 "isp_abort_needed.\n", __func__, 300 "Timeout, schedule isp_abort_needed.\n");
296 base_vha->host_no));
297 DEBUG2_3_11(printk("%s(%ld): timeout schedule "
298 "isp_abort_needed.\n", __func__,
299 base_vha->host_no));
300 301
301 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 302 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
302 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 303 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
303 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 304 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
304 305
305 qla_printk(KERN_WARNING, ha, 306 ql_log(ql_log_info, base_vha, 0x101c,
306 "Mailbox command timeout occurred. " 307 "Mailbox cmd timeout occured. "
307 "Scheduling ISP " "abort. eeh_busy: 0x%x\n", 308 "Scheduling ISP abort eeh_busy=0x%x.\n",
308 ha->flags.eeh_busy); 309 ha->flags.eeh_busy);
309 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 310 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
310 qla2xxx_wake_dpc(vha); 311 qla2xxx_wake_dpc(vha);
311 } 312 }
312 } else if (!abort_active) { 313 } else if (!abort_active) {
313 /* call abort directly since we are in the DPC thread */ 314 /* call abort directly since we are in the DPC thread */
314 DEBUG(printk("%s(%ld): timeout calling abort_isp\n", 315 ql_dbg(ql_dbg_mbx, base_vha, 0x101d,
315 __func__, base_vha->host_no)); 316 "Timeout, calling abort_isp.\n");
316 DEBUG2_3_11(printk("%s(%ld): timeout calling "
317 "abort_isp\n", __func__, base_vha->host_no));
318 317
319 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 318 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
320 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 319 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
321 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 320 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
322 321
323 qla_printk(KERN_WARNING, ha, 322 ql_log(ql_log_info, base_vha, 0x101e,
324 "Mailbox command timeout occurred. " 323 "Mailbox cmd timeout occured. "
325 "Issuing ISP abort.\n"); 324 "Scheduling ISP abort.\n");
326 325
327 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 326 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
328 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 327 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -332,11 +331,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
332 &vha->dpc_flags); 331 &vha->dpc_flags);
333 } 332 }
334 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 333 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
335 DEBUG(printk("%s(%ld): finished abort_isp\n", 334 ql_dbg(ql_dbg_mbx, base_vha, 0x101f,
336 __func__, vha->host_no)); 335 "Finished abort_isp.\n");
337 DEBUG2_3_11(printk(
338 "%s(%ld): finished abort_isp\n",
339 __func__, vha->host_no));
340 } 336 }
341 } 337 }
342 } 338 }
@@ -346,12 +342,11 @@ premature_exit:
346 complete(&ha->mbx_cmd_comp); 342 complete(&ha->mbx_cmd_comp);
347 343
348 if (rval) { 344 if (rval) {
349 DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, " 345 ql_dbg(ql_dbg_mbx, base_vha, 0x1020,
350 "mbx2=%x, cmd=%x ****\n", __func__, base_vha->host_no, 346 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, cmd=%x ****.\n",
351 mcp->mb[0], mcp->mb[1], mcp->mb[2], command)); 347 mcp->mb[0], mcp->mb[1], mcp->mb[2], command);
352 } else { 348 } else {
353 DEBUG11(printk("%s(%ld): done.\n", __func__, 349 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
354 base_vha->host_no));
355 } 350 }
356 351
357 return rval; 352 return rval;
@@ -366,7 +361,7 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
366 mbx_cmd_t mc; 361 mbx_cmd_t mc;
367 mbx_cmd_t *mcp = &mc; 362 mbx_cmd_t *mcp = &mc;
368 363
369 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 364 ql_dbg(ql_dbg_mbx, vha, 0x1022, "Entered %s.\n", __func__);
370 365
371 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { 366 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
372 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 367 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
@@ -397,10 +392,10 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
397 rval = qla2x00_mailbox_command(vha, mcp); 392 rval = qla2x00_mailbox_command(vha, mcp);
398 393
399 if (rval != QLA_SUCCESS) { 394 if (rval != QLA_SUCCESS) {
400 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 395 ql_dbg(ql_dbg_mbx, vha, 0x1023,
401 vha->host_no, rval, mcp->mb[0])); 396 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
402 } else { 397 } else {
403 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 398 ql_dbg(ql_dbg_mbx, vha, 0x1024, "Done %s.\n", __func__);
404 } 399 }
405 400
406 return rval; 401 return rval;
@@ -430,7 +425,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
430 mbx_cmd_t mc; 425 mbx_cmd_t mc;
431 mbx_cmd_t *mcp = &mc; 426 mbx_cmd_t *mcp = &mc;
432 427
433 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 428 ql_dbg(ql_dbg_mbx, vha, 0x1025, "Entered %s.\n", __func__);
434 429
435 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 430 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
436 mcp->out_mb = MBX_0; 431 mcp->out_mb = MBX_0;
@@ -461,15 +456,14 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
461 rval = qla2x00_mailbox_command(vha, mcp); 456 rval = qla2x00_mailbox_command(vha, mcp);
462 457
463 if (rval != QLA_SUCCESS) { 458 if (rval != QLA_SUCCESS) {
464 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 459 ql_dbg(ql_dbg_mbx, vha, 0x1026,
465 vha->host_no, rval, mcp->mb[0])); 460 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
466 } else { 461 } else {
467 if (IS_FWI2_CAPABLE(ha)) { 462 if (IS_FWI2_CAPABLE(ha)) {
468 DEBUG11(printk("%s(%ld): done exchanges=%x.\n", 463 ql_dbg(ql_dbg_mbx, vha, 0x1027,
469 __func__, vha->host_no, mcp->mb[1])); 464 "Done exchanges=%x.\n", mcp->mb[1]);
470 } else { 465 } else {
471 DEBUG11(printk("%s(%ld): done.\n", __func__, 466 ql_dbg(ql_dbg_mbx, vha, 0x1028, "Done %s.\n", __func__);
472 vha->host_no));
473 } 467 }
474 } 468 }
475 469
@@ -501,7 +495,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
501 mbx_cmd_t mc; 495 mbx_cmd_t mc;
502 mbx_cmd_t *mcp = &mc; 496 mbx_cmd_t *mcp = &mc;
503 497
504 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 498 ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__);
505 499
506 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 500 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
507 mcp->out_mb = MBX_0; 501 mcp->out_mb = MBX_0;
@@ -535,11 +529,10 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
535failed: 529failed:
536 if (rval != QLA_SUCCESS) { 530 if (rval != QLA_SUCCESS) {
537 /*EMPTY*/ 531 /*EMPTY*/
538 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 532 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
539 vha->host_no, rval));
540 } else { 533 } else {
541 /*EMPTY*/ 534 /*EMPTY*/
542 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 535 ql_dbg(ql_dbg_mbx, vha, 0x102b, "Done %s.\n", __func__);
543 } 536 }
544 return rval; 537 return rval;
545} 538}
@@ -565,7 +558,7 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
565 mbx_cmd_t mc; 558 mbx_cmd_t mc;
566 mbx_cmd_t *mcp = &mc; 559 mbx_cmd_t *mcp = &mc;
567 560
568 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 561 ql_dbg(ql_dbg_mbx, vha, 0x102c, "Entered %s.\n", __func__);
569 562
570 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; 563 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
571 mcp->out_mb = MBX_0; 564 mcp->out_mb = MBX_0;
@@ -576,15 +569,14 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
576 569
577 if (rval != QLA_SUCCESS) { 570 if (rval != QLA_SUCCESS) {
578 /*EMPTY*/ 571 /*EMPTY*/
579 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 572 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
580 vha->host_no, rval));
581 } else { 573 } else {
582 fwopts[0] = mcp->mb[0]; 574 fwopts[0] = mcp->mb[0];
583 fwopts[1] = mcp->mb[1]; 575 fwopts[1] = mcp->mb[1];
584 fwopts[2] = mcp->mb[2]; 576 fwopts[2] = mcp->mb[2];
585 fwopts[3] = mcp->mb[3]; 577 fwopts[3] = mcp->mb[3];
586 578
587 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 579 ql_dbg(ql_dbg_mbx, vha, 0x102e, "Done %s.\n", __func__);
588 } 580 }
589 581
590 return rval; 582 return rval;
@@ -612,7 +604,7 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
612 mbx_cmd_t mc; 604 mbx_cmd_t mc;
613 mbx_cmd_t *mcp = &mc; 605 mbx_cmd_t *mcp = &mc;
614 606
615 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 607 ql_dbg(ql_dbg_mbx, vha, 0x102f, "Entered %s.\n", __func__);
616 608
617 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; 609 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
618 mcp->mb[1] = fwopts[1]; 610 mcp->mb[1] = fwopts[1];
@@ -636,11 +628,11 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
636 628
637 if (rval != QLA_SUCCESS) { 629 if (rval != QLA_SUCCESS) {
638 /*EMPTY*/ 630 /*EMPTY*/
639 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x/%x).\n", __func__, 631 ql_dbg(ql_dbg_mbx, vha, 0x1030,
640 vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 632 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
641 } else { 633 } else {
642 /*EMPTY*/ 634 /*EMPTY*/
643 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 635 ql_dbg(ql_dbg_mbx, vha, 0x1031, "Done %s.\n", __func__);
644 } 636 }
645 637
646 return rval; 638 return rval;
@@ -668,7 +660,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
668 mbx_cmd_t mc; 660 mbx_cmd_t mc;
669 mbx_cmd_t *mcp = &mc; 661 mbx_cmd_t *mcp = &mc;
670 662
671 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): entered.\n", vha->host_no)); 663 ql_dbg(ql_dbg_mbx, vha, 0x1032, "Entered %s.\n", __func__);
672 664
673 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 665 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
674 mcp->mb[1] = 0xAAAA; 666 mcp->mb[1] = 0xAAAA;
@@ -695,12 +687,10 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
695 687
696 if (rval != QLA_SUCCESS) { 688 if (rval != QLA_SUCCESS) {
697 /*EMPTY*/ 689 /*EMPTY*/
698 DEBUG2_3_11(printk("qla2x00_mbx_reg_test(%ld): failed=%x.\n", 690 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
699 vha->host_no, rval));
700 } else { 691 } else {
701 /*EMPTY*/ 692 /*EMPTY*/
702 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): done.\n", 693 ql_dbg(ql_dbg_mbx, vha, 0x1034, "Done %s.\n", __func__);
703 vha->host_no));
704 } 694 }
705 695
706 return rval; 696 return rval;
@@ -728,7 +718,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
728 mbx_cmd_t mc; 718 mbx_cmd_t mc;
729 mbx_cmd_t *mcp = &mc; 719 mbx_cmd_t *mcp = &mc;
730 720
731 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 721 ql_dbg(ql_dbg_mbx, vha, 0x1035, "Entered %s.\n", __func__);
732 722
733 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 723 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
734 mcp->out_mb = MBX_0; 724 mcp->out_mb = MBX_0;
@@ -749,11 +739,11 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
749 rval = qla2x00_mailbox_command(vha, mcp); 739 rval = qla2x00_mailbox_command(vha, mcp);
750 740
751 if (rval != QLA_SUCCESS) { 741 if (rval != QLA_SUCCESS) {
752 DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__, 742 ql_dbg(ql_dbg_mbx, vha, 0x1036,
753 vha->host_no, rval, IS_FWI2_CAPABLE(vha->hw) ? 743 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
754 (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1])); 744 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
755 } else { 745 } else {
756 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 746 ql_dbg(ql_dbg_mbx, vha, 0x1037, "Done %s.\n", __func__);
757 } 747 }
758 748
759 return rval; 749 return rval;
@@ -785,6 +775,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
785 mbx_cmd_t mc; 775 mbx_cmd_t mc;
786 mbx_cmd_t *mcp = &mc; 776 mbx_cmd_t *mcp = &mc;
787 777
778 ql_dbg(ql_dbg_mbx, vha, 0x1038, "Entered %s.\n", __func__);
779
788 mcp->mb[0] = MBC_IOCB_COMMAND_A64; 780 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
789 mcp->mb[1] = 0; 781 mcp->mb[1] = 0;
790 mcp->mb[2] = MSW(phys_addr); 782 mcp->mb[2] = MSW(phys_addr);
@@ -799,14 +791,14 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
799 791
800 if (rval != QLA_SUCCESS) { 792 if (rval != QLA_SUCCESS) {
801 /*EMPTY*/ 793 /*EMPTY*/
802 DEBUG(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n", 794 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
803 vha->host_no, rval));
804 } else { 795 } else {
805 sts_entry_t *sts_entry = (sts_entry_t *) buffer; 796 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
806 797
807 /* Mask reserved bits. */ 798 /* Mask reserved bits. */
808 sts_entry->entry_status &= 799 sts_entry->entry_status &=
809 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; 800 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
801 ql_dbg(ql_dbg_mbx, vha, 0x103a, "Done %s.\n", __func__);
810 } 802 }
811 803
812 return rval; 804 return rval;
@@ -847,7 +839,7 @@ qla2x00_abort_command(srb_t *sp)
847 struct qla_hw_data *ha = vha->hw; 839 struct qla_hw_data *ha = vha->hw;
848 struct req_que *req = vha->req; 840 struct req_que *req = vha->req;
849 841
850 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no)); 842 ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__);
851 843
852 spin_lock_irqsave(&ha->hardware_lock, flags); 844 spin_lock_irqsave(&ha->hardware_lock, flags);
853 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 845 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -876,11 +868,9 @@ qla2x00_abort_command(srb_t *sp)
876 rval = qla2x00_mailbox_command(vha, mcp); 868 rval = qla2x00_mailbox_command(vha, mcp);
877 869
878 if (rval != QLA_SUCCESS) { 870 if (rval != QLA_SUCCESS) {
879 DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n", 871 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
880 vha->host_no, rval));
881 } else { 872 } else {
882 DEBUG11(printk("qla2x00_abort_command(%ld): done.\n", 873 ql_dbg(ql_dbg_mbx, vha, 0x103d, "Done %s.\n", __func__);
883 vha->host_no));
884 } 874 }
885 875
886 return rval; 876 return rval;
@@ -896,10 +886,11 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
896 struct req_que *req; 886 struct req_que *req;
897 struct rsp_que *rsp; 887 struct rsp_que *rsp;
898 888
899 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
900
901 l = l; 889 l = l;
902 vha = fcport->vha; 890 vha = fcport->vha;
891
892 ql_dbg(ql_dbg_mbx, vha, 0x103e, "Entered %s.\n", __func__);
893
903 req = vha->hw->req_q_map[0]; 894 req = vha->hw->req_q_map[0];
904 rsp = req->rsp; 895 rsp = req->rsp;
905 mcp->mb[0] = MBC_ABORT_TARGET; 896 mcp->mb[0] = MBC_ABORT_TARGET;
@@ -919,18 +910,17 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
919 mcp->flags = 0; 910 mcp->flags = 0;
920 rval = qla2x00_mailbox_command(vha, mcp); 911 rval = qla2x00_mailbox_command(vha, mcp);
921 if (rval != QLA_SUCCESS) { 912 if (rval != QLA_SUCCESS) {
922 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 913 ql_dbg(ql_dbg_mbx, vha, 0x103f, "Failed=%x.\n", rval);
923 vha->host_no, rval));
924 } 914 }
925 915
926 /* Issue marker IOCB. */ 916 /* Issue marker IOCB. */
927 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0, 917 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
928 MK_SYNC_ID); 918 MK_SYNC_ID);
929 if (rval2 != QLA_SUCCESS) { 919 if (rval2 != QLA_SUCCESS) {
930 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " 920 ql_dbg(ql_dbg_mbx, vha, 0x1040,
931 "(%x).\n", __func__, vha->host_no, rval2)); 921 "Failed to issue marker IOCB (%x).\n", rval2);
932 } else { 922 } else {
933 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 923 ql_dbg(ql_dbg_mbx, vha, 0x1041, "Done %s.\n", __func__);
934 } 924 }
935 925
936 return rval; 926 return rval;
@@ -946,9 +936,10 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
946 struct req_que *req; 936 struct req_que *req;
947 struct rsp_que *rsp; 937 struct rsp_que *rsp;
948 938
949 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
950
951 vha = fcport->vha; 939 vha = fcport->vha;
940
941 ql_dbg(ql_dbg_mbx, vha, 0x1042, "Entered %s.\n", __func__);
942
952 req = vha->hw->req_q_map[0]; 943 req = vha->hw->req_q_map[0];
953 rsp = req->rsp; 944 rsp = req->rsp;
954 mcp->mb[0] = MBC_LUN_RESET; 945 mcp->mb[0] = MBC_LUN_RESET;
@@ -966,18 +957,17 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
966 mcp->flags = 0; 957 mcp->flags = 0;
967 rval = qla2x00_mailbox_command(vha, mcp); 958 rval = qla2x00_mailbox_command(vha, mcp);
968 if (rval != QLA_SUCCESS) { 959 if (rval != QLA_SUCCESS) {
969 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 960 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
970 vha->host_no, rval));
971 } 961 }
972 962
973 /* Issue marker IOCB. */ 963 /* Issue marker IOCB. */
974 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l, 964 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
975 MK_SYNC_ID_LUN); 965 MK_SYNC_ID_LUN);
976 if (rval2 != QLA_SUCCESS) { 966 if (rval2 != QLA_SUCCESS) {
977 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " 967 ql_dbg(ql_dbg_mbx, vha, 0x1044,
978 "(%x).\n", __func__, vha->host_no, rval2)); 968 "Failed to issue marker IOCB (%x).\n", rval2);
979 } else { 969 } else {
980 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 970 ql_dbg(ql_dbg_mbx, vha, 0x1045, "Done %s.\n", __func__);
981 } 971 }
982 972
983 return rval; 973 return rval;
@@ -1011,8 +1001,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1011 mbx_cmd_t mc; 1001 mbx_cmd_t mc;
1012 mbx_cmd_t *mcp = &mc; 1002 mbx_cmd_t *mcp = &mc;
1013 1003
1014 DEBUG11(printk("qla2x00_get_adapter_id(%ld): entered.\n", 1004 ql_dbg(ql_dbg_mbx, vha, 0x1046, "Entered %s.\n", __func__);
1015 vha->host_no));
1016 1005
1017 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 1006 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1018 mcp->mb[9] = vha->vp_idx; 1007 mcp->mb[9] = vha->vp_idx;
@@ -1038,11 +1027,9 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1038 1027
1039 if (rval != QLA_SUCCESS) { 1028 if (rval != QLA_SUCCESS) {
1040 /*EMPTY*/ 1029 /*EMPTY*/
1041 DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n", 1030 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1042 vha->host_no, rval));
1043 } else { 1031 } else {
1044 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n", 1032 ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__);
1045 vha->host_no));
1046 1033
1047 if (IS_QLA8XXX_TYPE(vha->hw)) { 1034 if (IS_QLA8XXX_TYPE(vha->hw)) {
1048 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; 1035 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
@@ -1083,8 +1070,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1083 mbx_cmd_t mc; 1070 mbx_cmd_t mc;
1084 mbx_cmd_t *mcp = &mc; 1071 mbx_cmd_t *mcp = &mc;
1085 1072
1086 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): entered.\n", 1073 ql_dbg(ql_dbg_mbx, vha, 0x1049, "Entered %s.\n", __func__);
1087 vha->host_no));
1088 1074
1089 mcp->mb[0] = MBC_GET_RETRY_COUNT; 1075 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1090 mcp->out_mb = MBX_0; 1076 mcp->out_mb = MBX_0;
@@ -1095,8 +1081,8 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1095 1081
1096 if (rval != QLA_SUCCESS) { 1082 if (rval != QLA_SUCCESS) {
1097 /*EMPTY*/ 1083 /*EMPTY*/
1098 DEBUG2_3_11(printk("qla2x00_get_retry_cnt(%ld): failed = %x.\n", 1084 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1099 vha->host_no, mcp->mb[0])); 1085 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1100 } else { 1086 } else {
1101 /* Convert returned data and check our values. */ 1087 /* Convert returned data and check our values. */
1102 *r_a_tov = mcp->mb[3] / 2; 1088 *r_a_tov = mcp->mb[3] / 2;
@@ -1107,8 +1093,8 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1107 *tov = ratov; 1093 *tov = ratov;
1108 } 1094 }
1109 1095
1110 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): done. mb3=%d " 1096 ql_dbg(ql_dbg_mbx, vha, 0x104b,
1111 "ratov=%d.\n", vha->host_no, mcp->mb[3], ratov)); 1097 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1112 } 1098 }
1113 1099
1114 return rval; 1100 return rval;
@@ -1139,8 +1125,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1139 mbx_cmd_t *mcp = &mc; 1125 mbx_cmd_t *mcp = &mc;
1140 struct qla_hw_data *ha = vha->hw; 1126 struct qla_hw_data *ha = vha->hw;
1141 1127
1142 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n", 1128 ql_dbg(ql_dbg_mbx, vha, 0x104c, "Entered %s.\n", __func__);
1143 vha->host_no));
1144 1129
1145 if (IS_QLA82XX(ha) && ql2xdbwr) 1130 if (IS_QLA82XX(ha) && ql2xdbwr)
1146 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, 1131 qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
@@ -1174,13 +1159,11 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1174 1159
1175 if (rval != QLA_SUCCESS) { 1160 if (rval != QLA_SUCCESS) {
1176 /*EMPTY*/ 1161 /*EMPTY*/
1177 DEBUG2_3_11(printk("qla2x00_init_firmware(%ld): failed=%x " 1162 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1178 "mb0=%x.\n", 1163 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1179 vha->host_no, rval, mcp->mb[0]));
1180 } else { 1164 } else {
1181 /*EMPTY*/ 1165 /*EMPTY*/
1182 DEBUG11(printk("qla2x00_init_firmware(%ld): done.\n", 1166 ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__);
1183 vha->host_no));
1184 } 1167 }
1185 1168
1186 return rval; 1169 return rval;
@@ -1213,13 +1196,13 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1213 dma_addr_t pd_dma; 1196 dma_addr_t pd_dma;
1214 struct qla_hw_data *ha = vha->hw; 1197 struct qla_hw_data *ha = vha->hw;
1215 1198
1216 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1199 ql_dbg(ql_dbg_mbx, vha, 0x104f, "Entered %s.\n", __func__);
1217 1200
1218 pd24 = NULL; 1201 pd24 = NULL;
1219 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1202 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1220 if (pd == NULL) { 1203 if (pd == NULL) {
1221 DEBUG2_3(printk("%s(%ld): failed to allocate Port Database " 1204 ql_log(ql_log_warn, vha, 0x1050,
1222 "structure.\n", __func__, vha->host_no)); 1205 "Failed to allocate port database structure.\n");
1223 return QLA_MEMORY_ALLOC_FAILED; 1206 return QLA_MEMORY_ALLOC_FAILED;
1224 } 1207 }
1225 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); 1208 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
@@ -1261,12 +1244,10 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1261 /* Check for logged in state. */ 1244 /* Check for logged in state. */
1262 if (pd24->current_login_state != PDS_PRLI_COMPLETE && 1245 if (pd24->current_login_state != PDS_PRLI_COMPLETE &&
1263 pd24->last_login_state != PDS_PRLI_COMPLETE) { 1246 pd24->last_login_state != PDS_PRLI_COMPLETE) {
1264 DEBUG2(qla_printk(KERN_WARNING, ha, 1247 ql_dbg(ql_dbg_mbx, vha, 0x1051,
1265 "scsi(%ld): Unable to verify login-state (%x/%x) " 1248 "Unable to verify login-state (%x/%x) for "
1266 " - portid=%02x%02x%02x.\n", vha->host_no, 1249 "loop_id %x.\n", pd24->current_login_state,
1267 pd24->current_login_state, pd24->last_login_state, 1250 pd24->last_login_state, fcport->loop_id);
1268 fcport->d_id.b.domain, fcport->d_id.b.area,
1269 fcport->d_id.b.al_pa));
1270 rval = QLA_FUNCTION_FAILED; 1251 rval = QLA_FUNCTION_FAILED;
1271 goto gpd_error_out; 1252 goto gpd_error_out;
1272 } 1253 }
@@ -1290,12 +1271,11 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1290 /* Check for logged in state. */ 1271 /* Check for logged in state. */
1291 if (pd->master_state != PD_STATE_PORT_LOGGED_IN && 1272 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1292 pd->slave_state != PD_STATE_PORT_LOGGED_IN) { 1273 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
1293 DEBUG2(qla_printk(KERN_WARNING, ha, 1274 ql_dbg(ql_dbg_mbx, vha, 0x100a,
1294 "scsi(%ld): Unable to verify login-state (%x/%x) " 1275 "Unable to verify login-state (%x/%x) - "
1295 " - portid=%02x%02x%02x.\n", vha->host_no, 1276 "portid=%02x%02x%02x.\n", pd->master_state,
1296 pd->master_state, pd->slave_state, 1277 pd->slave_state, fcport->d_id.b.domain,
1297 fcport->d_id.b.domain, fcport->d_id.b.area, 1278 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1298 fcport->d_id.b.al_pa));
1299 rval = QLA_FUNCTION_FAILED; 1279 rval = QLA_FUNCTION_FAILED;
1300 goto gpd_error_out; 1280 goto gpd_error_out;
1301 } 1281 }
@@ -1325,10 +1305,11 @@ gpd_error_out:
1325 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 1305 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1326 1306
1327 if (rval != QLA_SUCCESS) { 1307 if (rval != QLA_SUCCESS) {
1328 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 1308 ql_dbg(ql_dbg_mbx, vha, 0x1052,
1329 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 1309 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
1310 mcp->mb[0], mcp->mb[1]);
1330 } else { 1311 } else {
1331 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 1312 ql_dbg(ql_dbg_mbx, vha, 0x1053, "Done %s.\n", __func__);
1332 } 1313 }
1333 1314
1334 return rval; 1315 return rval;
@@ -1357,8 +1338,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1357 mbx_cmd_t mc; 1338 mbx_cmd_t mc;
1358 mbx_cmd_t *mcp = &mc; 1339 mbx_cmd_t *mcp = &mc;
1359 1340
1360 DEBUG11(printk("qla2x00_get_firmware_state(%ld): entered.\n", 1341 ql_dbg(ql_dbg_mbx, vha, 0x1054, "Entered %s.\n", __func__);
1361 vha->host_no));
1362 1342
1363 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 1343 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1364 mcp->out_mb = MBX_0; 1344 mcp->out_mb = MBX_0;
@@ -1381,12 +1361,10 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1381 1361
1382 if (rval != QLA_SUCCESS) { 1362 if (rval != QLA_SUCCESS) {
1383 /*EMPTY*/ 1363 /*EMPTY*/
1384 DEBUG2_3_11(printk("qla2x00_get_firmware_state(%ld): " 1364 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
1385 "failed=%x.\n", vha->host_no, rval));
1386 } else { 1365 } else {
1387 /*EMPTY*/ 1366 /*EMPTY*/
1388 DEBUG11(printk("qla2x00_get_firmware_state(%ld): done.\n", 1367 ql_dbg(ql_dbg_mbx, vha, 0x1056, "Done %s.\n", __func__);
1389 vha->host_no));
1390 } 1368 }
1391 1369
1392 return rval; 1370 return rval;
@@ -1418,8 +1396,7 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1418 mbx_cmd_t mc; 1396 mbx_cmd_t mc;
1419 mbx_cmd_t *mcp = &mc; 1397 mbx_cmd_t *mcp = &mc;
1420 1398
1421 DEBUG11(printk("qla2x00_get_port_name(%ld): entered.\n", 1399 ql_dbg(ql_dbg_mbx, vha, 0x1057, "Entered %s.\n", __func__);
1422 vha->host_no));
1423 1400
1424 mcp->mb[0] = MBC_GET_PORT_NAME; 1401 mcp->mb[0] = MBC_GET_PORT_NAME;
1425 mcp->mb[9] = vha->vp_idx; 1402 mcp->mb[9] = vha->vp_idx;
@@ -1439,8 +1416,7 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1439 1416
1440 if (rval != QLA_SUCCESS) { 1417 if (rval != QLA_SUCCESS) {
1441 /*EMPTY*/ 1418 /*EMPTY*/
1442 DEBUG2_3_11(printk("qla2x00_get_port_name(%ld): failed=%x.\n", 1419 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
1443 vha->host_no, rval));
1444 } else { 1420 } else {
1445 if (name != NULL) { 1421 if (name != NULL) {
1446 /* This function returns name in big endian. */ 1422 /* This function returns name in big endian. */
@@ -1454,8 +1430,7 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1454 name[7] = LSB(mcp->mb[7]); 1430 name[7] = LSB(mcp->mb[7]);
1455 } 1431 }
1456 1432
1457 DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n", 1433 ql_dbg(ql_dbg_mbx, vha, 0x1059, "Done %s.\n", __func__);
1458 vha->host_no));
1459 } 1434 }
1460 1435
1461 return rval; 1436 return rval;
@@ -1483,7 +1458,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
1483 mbx_cmd_t mc; 1458 mbx_cmd_t mc;
1484 mbx_cmd_t *mcp = &mc; 1459 mbx_cmd_t *mcp = &mc;
1485 1460
1486 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1461 ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__);
1487 1462
1488 if (IS_QLA8XXX_TYPE(vha->hw)) { 1463 if (IS_QLA8XXX_TYPE(vha->hw)) {
1489 /* Logout across all FCFs. */ 1464 /* Logout across all FCFs. */
@@ -1517,11 +1492,10 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
1517 1492
1518 if (rval != QLA_SUCCESS) { 1493 if (rval != QLA_SUCCESS) {
1519 /*EMPTY*/ 1494 /*EMPTY*/
1520 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", 1495 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
1521 __func__, vha->host_no, rval));
1522 } else { 1496 } else {
1523 /*EMPTY*/ 1497 /*EMPTY*/
1524 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 1498 ql_dbg(ql_dbg_mbx, vha, 0x105c, "Done %s.\n", __func__);
1525 } 1499 }
1526 1500
1527 return rval; 1501 return rval;
@@ -1553,12 +1527,11 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
1553 mbx_cmd_t mc; 1527 mbx_cmd_t mc;
1554 mbx_cmd_t *mcp = &mc; 1528 mbx_cmd_t *mcp = &mc;
1555 1529
1556 DEBUG11(printk("qla2x00_send_sns(%ld): entered.\n", 1530 ql_dbg(ql_dbg_mbx, vha, 0x105d, "Entered %s.\n", __func__);
1557 vha->host_no));
1558 1531
1559 DEBUG11(printk("qla2x00_send_sns: retry cnt=%d ratov=%d total " 1532 ql_dbg(ql_dbg_mbx, vha, 0x105e,
1560 "tov=%d.\n", vha->hw->retry_count, vha->hw->login_timeout, 1533 "Retry cnt=%d ratov=%d total tov=%d.\n",
1561 mcp->tov)); 1534 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
1562 1535
1563 mcp->mb[0] = MBC_SEND_SNS_COMMAND; 1536 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
1564 mcp->mb[1] = cmd_size; 1537 mcp->mb[1] = cmd_size;
@@ -1575,13 +1548,12 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
1575 1548
1576 if (rval != QLA_SUCCESS) { 1549 if (rval != QLA_SUCCESS) {
1577 /*EMPTY*/ 1550 /*EMPTY*/
1578 DEBUG(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x " 1551 ql_dbg(ql_dbg_mbx, vha, 0x105f,
1579 "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 1552 "Failed=%x mb[0]=%x mb[1]=%x.\n",
1580 DEBUG2_3_11(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x " 1553 rval, mcp->mb[0], mcp->mb[1]);
1581 "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
1582 } else { 1554 } else {
1583 /*EMPTY*/ 1555 /*EMPTY*/
1584 DEBUG11(printk("qla2x00_send_sns(%ld): done.\n", vha->host_no)); 1556 ql_dbg(ql_dbg_mbx, vha, 0x1060, "Done %s.\n", __func__);
1585 } 1557 }
1586 1558
1587 return rval; 1559 return rval;
@@ -1600,7 +1572,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1600 struct req_que *req; 1572 struct req_que *req;
1601 struct rsp_que *rsp; 1573 struct rsp_que *rsp;
1602 1574
1603 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1575 ql_dbg(ql_dbg_mbx, vha, 0x1061, "Entered %s.\n", __func__);
1604 1576
1605 if (ha->flags.cpu_affinity_enabled) 1577 if (ha->flags.cpu_affinity_enabled)
1606 req = ha->req_q_map[0]; 1578 req = ha->req_q_map[0];
@@ -1610,8 +1582,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1610 1582
1611 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1583 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1612 if (lg == NULL) { 1584 if (lg == NULL) {
1613 DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n", 1585 ql_log(ql_log_warn, vha, 0x1062,
1614 __func__, vha->host_no)); 1586 "Failed to allocate login IOCB.\n");
1615 return QLA_MEMORY_ALLOC_FAILED; 1587 return QLA_MEMORY_ALLOC_FAILED;
1616 } 1588 }
1617 memset(lg, 0, sizeof(struct logio_entry_24xx)); 1589 memset(lg, 0, sizeof(struct logio_entry_24xx));
@@ -1631,21 +1603,21 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1631 lg->vp_index = vha->vp_idx; 1603 lg->vp_index = vha->vp_idx;
1632 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0); 1604 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
1633 if (rval != QLA_SUCCESS) { 1605 if (rval != QLA_SUCCESS) {
1634 DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB " 1606 ql_dbg(ql_dbg_mbx, vha, 0x1063,
1635 "(%x).\n", __func__, vha->host_no, rval)); 1607 "Failed to issue login IOCB (%x).\n", rval);
1636 } else if (lg->entry_status != 0) { 1608 } else if (lg->entry_status != 0) {
1637 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1609 ql_dbg(ql_dbg_mbx, vha, 0x1064,
1638 "-- error status (%x).\n", __func__, vha->host_no, 1610 "Failed to complete IOCB -- error status (%x).\n",
1639 lg->entry_status)); 1611 lg->entry_status);
1640 rval = QLA_FUNCTION_FAILED; 1612 rval = QLA_FUNCTION_FAILED;
1641 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1613 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1642 iop[0] = le32_to_cpu(lg->io_parameter[0]); 1614 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1643 iop[1] = le32_to_cpu(lg->io_parameter[1]); 1615 iop[1] = le32_to_cpu(lg->io_parameter[1]);
1644 1616
1645 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1617 ql_dbg(ql_dbg_mbx, vha, 0x1065,
1646 "-- completion status (%x) ioparam=%x/%x.\n", __func__, 1618 "Failed to complete IOCB -- completion status (%x) "
1647 vha->host_no, le16_to_cpu(lg->comp_status), iop[0], 1619 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
1648 iop[1])); 1620 iop[0], iop[1]);
1649 1621
1650 switch (iop[0]) { 1622 switch (iop[0]) {
1651 case LSC_SCODE_PORTID_USED: 1623 case LSC_SCODE_PORTID_USED:
@@ -1673,7 +1645,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1673 break; 1645 break;
1674 } 1646 }
1675 } else { 1647 } else {
1676 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 1648 ql_dbg(ql_dbg_mbx, vha, 0x1066, "Done %s.\n", __func__);
1677 1649
1678 iop[0] = le32_to_cpu(lg->io_parameter[0]); 1650 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1679 1651
@@ -1728,7 +1700,7 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1728 mbx_cmd_t *mcp = &mc; 1700 mbx_cmd_t *mcp = &mc;
1729 struct qla_hw_data *ha = vha->hw; 1701 struct qla_hw_data *ha = vha->hw;
1730 1702
1731 DEBUG11(printk("qla2x00_login_fabric(%ld): entered.\n", vha->host_no)); 1703 ql_dbg(ql_dbg_mbx, vha, 0x1067, "Entered %s.\n", __func__);
1732 1704
1733 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 1705 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
1734 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1706 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1771,13 +1743,12 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1771 rval = QLA_SUCCESS; 1743 rval = QLA_SUCCESS;
1772 1744
1773 /*EMPTY*/ 1745 /*EMPTY*/
1774 DEBUG2_3_11(printk("qla2x00_login_fabric(%ld): failed=%x " 1746 ql_dbg(ql_dbg_mbx, vha, 0x1068,
1775 "mb[0]=%x mb[1]=%x mb[2]=%x.\n", vha->host_no, rval, 1747 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
1776 mcp->mb[0], mcp->mb[1], mcp->mb[2])); 1748 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
1777 } else { 1749 } else {
1778 /*EMPTY*/ 1750 /*EMPTY*/
1779 DEBUG11(printk("qla2x00_login_fabric(%ld): done.\n", 1751 ql_dbg(ql_dbg_mbx, vha, 0x1069, "Done %s.\n", __func__);
1780 vha->host_no));
1781 } 1752 }
1782 1753
1783 return rval; 1754 return rval;
@@ -1808,13 +1779,13 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
1808 mbx_cmd_t *mcp = &mc; 1779 mbx_cmd_t *mcp = &mc;
1809 struct qla_hw_data *ha = vha->hw; 1780 struct qla_hw_data *ha = vha->hw;
1810 1781
1782 ql_dbg(ql_dbg_mbx, vha, 0x106a, "Entered %s.\n", __func__);
1783
1811 if (IS_FWI2_CAPABLE(ha)) 1784 if (IS_FWI2_CAPABLE(ha))
1812 return qla24xx_login_fabric(vha, fcport->loop_id, 1785 return qla24xx_login_fabric(vha, fcport->loop_id,
1813 fcport->d_id.b.domain, fcport->d_id.b.area, 1786 fcport->d_id.b.domain, fcport->d_id.b.area,
1814 fcport->d_id.b.al_pa, mb_ret, opt); 1787 fcport->d_id.b.al_pa, mb_ret, opt);
1815 1788
1816 DEBUG3(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1817
1818 mcp->mb[0] = MBC_LOGIN_LOOP_PORT; 1789 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
1819 if (HAS_EXTENDED_IDS(ha)) 1790 if (HAS_EXTENDED_IDS(ha))
1820 mcp->mb[1] = fcport->loop_id; 1791 mcp->mb[1] = fcport->loop_id;
@@ -1845,15 +1816,12 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
1845 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) 1816 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
1846 rval = QLA_SUCCESS; 1817 rval = QLA_SUCCESS;
1847 1818
1848 DEBUG(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x " 1819 ql_dbg(ql_dbg_mbx, vha, 0x106b,
1849 "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval, 1820 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
1850 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7])); 1821 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
1851 DEBUG2_3(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
1852 "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval,
1853 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]));
1854 } else { 1822 } else {
1855 /*EMPTY*/ 1823 /*EMPTY*/
1856 DEBUG3(printk("%s(%ld): done.\n", __func__, vha->host_no)); 1824 ql_dbg(ql_dbg_mbx, vha, 0x106c, "Done %s.\n", __func__);
1857 } 1825 }
1858 1826
1859 return (rval); 1827 return (rval);
@@ -1870,12 +1838,12 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1870 struct req_que *req; 1838 struct req_que *req;
1871 struct rsp_que *rsp; 1839 struct rsp_que *rsp;
1872 1840
1873 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1841 ql_dbg(ql_dbg_mbx, vha, 0x106d, "Entered %s.\n", __func__);
1874 1842
1875 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1843 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1876 if (lg == NULL) { 1844 if (lg == NULL) {
1877 DEBUG2_3(printk("%s(%ld): failed to allocate Logout IOCB.\n", 1845 ql_log(ql_log_warn, vha, 0x106e,
1878 __func__, vha->host_no)); 1846 "Failed to allocate logout IOCB.\n");
1879 return QLA_MEMORY_ALLOC_FAILED; 1847 return QLA_MEMORY_ALLOC_FAILED;
1880 } 1848 }
1881 memset(lg, 0, sizeof(struct logio_entry_24xx)); 1849 memset(lg, 0, sizeof(struct logio_entry_24xx));
@@ -1899,22 +1867,22 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1899 1867
1900 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0); 1868 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
1901 if (rval != QLA_SUCCESS) { 1869 if (rval != QLA_SUCCESS) {
1902 DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB " 1870 ql_dbg(ql_dbg_mbx, vha, 0x106f,
1903 "(%x).\n", __func__, vha->host_no, rval)); 1871 "Failed to issue logout IOCB (%x).\n", rval);
1904 } else if (lg->entry_status != 0) { 1872 } else if (lg->entry_status != 0) {
1905 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1873 ql_dbg(ql_dbg_mbx, vha, 0x1070,
1906 "-- error status (%x).\n", __func__, vha->host_no, 1874 "Failed to complete IOCB -- error status (%x).\n",
1907 lg->entry_status)); 1875 lg->entry_status);
1908 rval = QLA_FUNCTION_FAILED; 1876 rval = QLA_FUNCTION_FAILED;
1909 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1877 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1910 DEBUG2_3_11(printk("%s(%ld %d): failed to complete IOCB " 1878 ql_dbg(ql_dbg_mbx, vha, 0x1071,
1911 "-- completion status (%x) ioparam=%x/%x.\n", __func__, 1879 "Failed to complete IOCB -- completion status (%x) "
1912 vha->host_no, vha->vp_idx, le16_to_cpu(lg->comp_status), 1880 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
1913 le32_to_cpu(lg->io_parameter[0]), 1881 le32_to_cpu(lg->io_parameter[0]),
1914 le32_to_cpu(lg->io_parameter[1]))); 1882 le32_to_cpu(lg->io_parameter[1]));
1915 } else { 1883 } else {
1916 /*EMPTY*/ 1884 /*EMPTY*/
1917 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 1885 ql_dbg(ql_dbg_mbx, vha, 0x1072, "Done %s.\n", __func__);
1918 } 1886 }
1919 1887
1920 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 1888 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1946,8 +1914,7 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1946 mbx_cmd_t mc; 1914 mbx_cmd_t mc;
1947 mbx_cmd_t *mcp = &mc; 1915 mbx_cmd_t *mcp = &mc;
1948 1916
1949 DEBUG11(printk("qla2x00_fabric_logout(%ld): entered.\n", 1917 ql_dbg(ql_dbg_mbx, vha, 0x1073, "Entered %s.\n", __func__);
1950 vha->host_no));
1951 1918
1952 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 1919 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
1953 mcp->out_mb = MBX_1|MBX_0; 1920 mcp->out_mb = MBX_1|MBX_0;
@@ -1966,12 +1933,11 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1966 1933
1967 if (rval != QLA_SUCCESS) { 1934 if (rval != QLA_SUCCESS) {
1968 /*EMPTY*/ 1935 /*EMPTY*/
1969 DEBUG2_3_11(printk("qla2x00_fabric_logout(%ld): failed=%x " 1936 ql_dbg(ql_dbg_mbx, vha, 0x1074,
1970 "mbx1=%x.\n", vha->host_no, rval, mcp->mb[1])); 1937 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
1971 } else { 1938 } else {
1972 /*EMPTY*/ 1939 /*EMPTY*/
1973 DEBUG11(printk("qla2x00_fabric_logout(%ld): done.\n", 1940 ql_dbg(ql_dbg_mbx, vha, 0x1075, "Done %s.\n", __func__);
1974 vha->host_no));
1975 } 1941 }
1976 1942
1977 return rval; 1943 return rval;
@@ -1999,8 +1965,7 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
1999 mbx_cmd_t mc; 1965 mbx_cmd_t mc;
2000 mbx_cmd_t *mcp = &mc; 1966 mbx_cmd_t *mcp = &mc;
2001 1967
2002 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n", 1968 ql_dbg(ql_dbg_mbx, vha, 0x1076, "Entered %s.\n", __func__);
2003 vha->host_no));
2004 1969
2005 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 1970 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2006 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0; 1971 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
@@ -2014,12 +1979,10 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
2014 1979
2015 if (rval != QLA_SUCCESS) { 1980 if (rval != QLA_SUCCESS) {
2016 /*EMPTY*/ 1981 /*EMPTY*/
2017 DEBUG2_3_11(printk("qla2x00_full_login_lip(%ld): failed=%x.\n", 1982 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2018 vha->host_no, rval));
2019 } else { 1983 } else {
2020 /*EMPTY*/ 1984 /*EMPTY*/
2021 DEBUG11(printk("qla2x00_full_login_lip(%ld): done.\n", 1985 ql_dbg(ql_dbg_mbx, vha, 0x1078, "Done %s.\n", __func__);
2022 vha->host_no));
2023 } 1986 }
2024 1987
2025 return rval; 1988 return rval;
@@ -2045,8 +2008,7 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2045 mbx_cmd_t mc; 2008 mbx_cmd_t mc;
2046 mbx_cmd_t *mcp = &mc; 2009 mbx_cmd_t *mcp = &mc;
2047 2010
2048 DEBUG11(printk("qla2x00_get_id_list(%ld): entered.\n", 2011 ql_dbg(ql_dbg_mbx, vha, 0x1079, "Entered %s.\n", __func__);
2049 vha->host_no));
2050 2012
2051 if (id_list == NULL) 2013 if (id_list == NULL)
2052 return QLA_FUNCTION_FAILED; 2014 return QLA_FUNCTION_FAILED;
@@ -2075,12 +2037,10 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2075 2037
2076 if (rval != QLA_SUCCESS) { 2038 if (rval != QLA_SUCCESS) {
2077 /*EMPTY*/ 2039 /*EMPTY*/
2078 DEBUG2_3_11(printk("qla2x00_get_id_list(%ld): failed=%x.\n", 2040 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2079 vha->host_no, rval));
2080 } else { 2041 } else {
2081 *entries = mcp->mb[1]; 2042 *entries = mcp->mb[1];
2082 DEBUG11(printk("qla2x00_get_id_list(%ld): done.\n", 2043 ql_dbg(ql_dbg_mbx, vha, 0x107b, "Done %s.\n", __func__);
2083 vha->host_no));
2084 } 2044 }
2085 2045
2086 return rval; 2046 return rval;
@@ -2108,7 +2068,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2108 mbx_cmd_t mc; 2068 mbx_cmd_t mc;
2109 mbx_cmd_t *mcp = &mc; 2069 mbx_cmd_t *mcp = &mc;
2110 2070
2111 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2071 ql_dbg(ql_dbg_mbx, vha, 0x107c, "Entered %s.\n", __func__);
2112 2072
2113 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 2073 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2114 mcp->out_mb = MBX_0; 2074 mcp->out_mb = MBX_0;
@@ -2121,14 +2081,14 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2121 2081
2122 if (rval != QLA_SUCCESS) { 2082 if (rval != QLA_SUCCESS) {
2123 /*EMPTY*/ 2083 /*EMPTY*/
2124 DEBUG2_3_11(printk("%s(%ld): failed = %x.\n", __func__, 2084 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2125 vha->host_no, mcp->mb[0])); 2085 "Failed mb[0]=%x.\n", mcp->mb[0]);
2126 } else { 2086 } else {
2127 DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x " 2087 ql_dbg(ql_dbg_mbx, vha, 0x107e,
2128 "mb7=%x mb10=%x mb11=%x mb12=%x.\n", __func__, 2088 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2129 vha->host_no, mcp->mb[1], mcp->mb[2], mcp->mb[3], 2089 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2130 mcp->mb[6], mcp->mb[7], mcp->mb[10], mcp->mb[11], 2090 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2131 mcp->mb[12])); 2091 mcp->mb[11], mcp->mb[12]);
2132 2092
2133 if (cur_xchg_cnt) 2093 if (cur_xchg_cnt)
2134 *cur_xchg_cnt = mcp->mb[3]; 2094 *cur_xchg_cnt = mcp->mb[3];
@@ -2147,7 +2107,6 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2147 return (rval); 2107 return (rval);
2148} 2108}
2149 2109
2150#if defined(QL_DEBUG_LEVEL_3)
2151/* 2110/*
2152 * qla2x00_get_fcal_position_map 2111 * qla2x00_get_fcal_position_map
2153 * Get FCAL (LILP) position map using mailbox command 2112 * Get FCAL (LILP) position map using mailbox command
@@ -2172,10 +2131,12 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2172 dma_addr_t pmap_dma; 2131 dma_addr_t pmap_dma;
2173 struct qla_hw_data *ha = vha->hw; 2132 struct qla_hw_data *ha = vha->hw;
2174 2133
2134 ql_dbg(ql_dbg_mbx, vha, 0x107f, "Entered %s.\n", __func__);
2135
2175 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); 2136 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2176 if (pmap == NULL) { 2137 if (pmap == NULL) {
2177 DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****", 2138 ql_log(ql_log_warn, vha, 0x1080,
2178 __func__, vha->host_no)); 2139 "Memory alloc failed.\n");
2179 return QLA_MEMORY_ALLOC_FAILED; 2140 return QLA_MEMORY_ALLOC_FAILED;
2180 } 2141 }
2181 memset(pmap, 0, FCAL_MAP_SIZE); 2142 memset(pmap, 0, FCAL_MAP_SIZE);
@@ -2193,10 +2154,11 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2193 rval = qla2x00_mailbox_command(vha, mcp); 2154 rval = qla2x00_mailbox_command(vha, mcp);
2194 2155
2195 if (rval == QLA_SUCCESS) { 2156 if (rval == QLA_SUCCESS) {
2196 DEBUG11(printk("%s(%ld): (mb0=%x/mb1=%x) FC/AL Position Map " 2157 ql_dbg(ql_dbg_mbx, vha, 0x1081,
2197 "size (%x)\n", __func__, vha->host_no, mcp->mb[0], 2158 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2198 mcp->mb[1], (unsigned)pmap[0])); 2159 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2199 DEBUG11(qla2x00_dump_buffer(pmap, pmap[0] + 1)); 2160 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
2161 pmap, pmap[0] + 1);
2200 2162
2201 if (pos_map) 2163 if (pos_map)
2202 memcpy(pos_map, pmap, FCAL_MAP_SIZE); 2164 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
@@ -2204,15 +2166,13 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2204 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); 2166 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
2205 2167
2206 if (rval != QLA_SUCCESS) { 2168 if (rval != QLA_SUCCESS) {
2207 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2169 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2208 vha->host_no, rval));
2209 } else { 2170 } else {
2210 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2171 ql_dbg(ql_dbg_mbx, vha, 0x1083, "Done %s.\n", __func__);
2211 } 2172 }
2212 2173
2213 return rval; 2174 return rval;
2214} 2175}
2215#endif
2216 2176
2217/* 2177/*
2218 * qla2x00_get_link_status 2178 * qla2x00_get_link_status
@@ -2237,7 +2197,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2237 uint32_t *siter, *diter, dwords; 2197 uint32_t *siter, *diter, dwords;
2238 struct qla_hw_data *ha = vha->hw; 2198 struct qla_hw_data *ha = vha->hw;
2239 2199
2240 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2200 ql_dbg(ql_dbg_mbx, vha, 0x1084, "Entered %s.\n", __func__);
2241 2201
2242 mcp->mb[0] = MBC_GET_LINK_STATUS; 2202 mcp->mb[0] = MBC_GET_LINK_STATUS;
2243 mcp->mb[2] = MSW(stats_dma); 2203 mcp->mb[2] = MSW(stats_dma);
@@ -2266,11 +2226,12 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2266 2226
2267 if (rval == QLA_SUCCESS) { 2227 if (rval == QLA_SUCCESS) {
2268 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2228 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2269 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", 2229 ql_dbg(ql_dbg_mbx, vha, 0x1085,
2270 __func__, vha->host_no, mcp->mb[0])); 2230 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2271 rval = QLA_FUNCTION_FAILED; 2231 rval = QLA_FUNCTION_FAILED;
2272 } else { 2232 } else {
2273 /* Copy over data -- firmware data is LE. */ 2233 /* Copy over data -- firmware data is LE. */
2234 ql_dbg(ql_dbg_mbx, vha, 0x1086, "Done %s.\n", __func__);
2274 dwords = offsetof(struct link_statistics, unused1) / 4; 2235 dwords = offsetof(struct link_statistics, unused1) / 4;
2275 siter = diter = &stats->link_fail_cnt; 2236 siter = diter = &stats->link_fail_cnt;
2276 while (dwords--) 2237 while (dwords--)
@@ -2278,8 +2239,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2278 } 2239 }
2279 } else { 2240 } else {
2280 /* Failed. */ 2241 /* Failed. */
2281 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2242 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
2282 vha->host_no, rval));
2283 } 2243 }
2284 2244
2285 return rval; 2245 return rval;
@@ -2294,7 +2254,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2294 mbx_cmd_t *mcp = &mc; 2254 mbx_cmd_t *mcp = &mc;
2295 uint32_t *siter, *diter, dwords; 2255 uint32_t *siter, *diter, dwords;
2296 2256
2297 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2257 ql_dbg(ql_dbg_mbx, vha, 0x1088, "Entered %s.\n", __func__);
2298 2258
2299 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; 2259 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
2300 mcp->mb[2] = MSW(stats_dma); 2260 mcp->mb[2] = MSW(stats_dma);
@@ -2312,10 +2272,11 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2312 2272
2313 if (rval == QLA_SUCCESS) { 2273 if (rval == QLA_SUCCESS) {
2314 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2274 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2315 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", 2275 ql_dbg(ql_dbg_mbx, vha, 0x1089,
2316 __func__, vha->host_no, mcp->mb[0])); 2276 "Failed mb[0]=%x.\n", mcp->mb[0]);
2317 rval = QLA_FUNCTION_FAILED; 2277 rval = QLA_FUNCTION_FAILED;
2318 } else { 2278 } else {
2279 ql_dbg(ql_dbg_mbx, vha, 0x108a, "Done %s.\n", __func__);
2319 /* Copy over data -- firmware data is LE. */ 2280 /* Copy over data -- firmware data is LE. */
2320 dwords = sizeof(struct link_statistics) / 4; 2281 dwords = sizeof(struct link_statistics) / 4;
2321 siter = diter = &stats->link_fail_cnt; 2282 siter = diter = &stats->link_fail_cnt;
@@ -2324,8 +2285,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2324 } 2285 }
2325 } else { 2286 } else {
2326 /* Failed. */ 2287 /* Failed. */
2327 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2288 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
2328 vha->host_no, rval));
2329 } 2289 }
2330 2290
2331 return rval; 2291 return rval;
@@ -2345,7 +2305,7 @@ qla24xx_abort_command(srb_t *sp)
2345 struct qla_hw_data *ha = vha->hw; 2305 struct qla_hw_data *ha = vha->hw;
2346 struct req_que *req = vha->req; 2306 struct req_que *req = vha->req;
2347 2307
2348 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2308 ql_dbg(ql_dbg_mbx, vha, 0x108c, "Entered %s.\n", __func__);
2349 2309
2350 spin_lock_irqsave(&ha->hardware_lock, flags); 2310 spin_lock_irqsave(&ha->hardware_lock, flags);
2351 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 2311 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -2360,8 +2320,8 @@ qla24xx_abort_command(srb_t *sp)
2360 2320
2361 abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); 2321 abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
2362 if (abt == NULL) { 2322 if (abt == NULL) {
2363 DEBUG2_3(printk("%s(%ld): failed to allocate Abort IOCB.\n", 2323 ql_log(ql_log_warn, vha, 0x108d,
2364 __func__, vha->host_no)); 2324 "Failed to allocate abort IOCB.\n");
2365 return QLA_MEMORY_ALLOC_FAILED; 2325 return QLA_MEMORY_ALLOC_FAILED;
2366 } 2326 }
2367 memset(abt, 0, sizeof(struct abort_entry_24xx)); 2327 memset(abt, 0, sizeof(struct abort_entry_24xx));
@@ -2380,20 +2340,20 @@ qla24xx_abort_command(srb_t *sp)
2380 2340
2381 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); 2341 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
2382 if (rval != QLA_SUCCESS) { 2342 if (rval != QLA_SUCCESS) {
2383 DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n", 2343 ql_dbg(ql_dbg_mbx, vha, 0x108e,
2384 __func__, vha->host_no, rval)); 2344 "Failed to issue IOCB (%x).\n", rval);
2385 } else if (abt->entry_status != 0) { 2345 } else if (abt->entry_status != 0) {
2386 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2346 ql_dbg(ql_dbg_mbx, vha, 0x108f,
2387 "-- error status (%x).\n", __func__, vha->host_no, 2347 "Failed to complete IOCB -- error status (%x).\n",
2388 abt->entry_status)); 2348 abt->entry_status);
2389 rval = QLA_FUNCTION_FAILED; 2349 rval = QLA_FUNCTION_FAILED;
2390 } else if (abt->nport_handle != __constant_cpu_to_le16(0)) { 2350 } else if (abt->nport_handle != __constant_cpu_to_le16(0)) {
2391 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2351 ql_dbg(ql_dbg_mbx, vha, 0x1090,
2392 "-- completion status (%x).\n", __func__, vha->host_no, 2352 "Failed to complete IOCB -- completion status (%x).\n",
2393 le16_to_cpu(abt->nport_handle))); 2353 le16_to_cpu(abt->nport_handle));
2394 rval = QLA_FUNCTION_FAILED; 2354 rval = QLA_FUNCTION_FAILED;
2395 } else { 2355 } else {
2396 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2356 ql_dbg(ql_dbg_mbx, vha, 0x1091, "Done %s.\n", __func__);
2397 } 2357 }
2398 2358
2399 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 2359 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2421,19 +2381,20 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2421 struct req_que *req; 2381 struct req_que *req;
2422 struct rsp_que *rsp; 2382 struct rsp_que *rsp;
2423 2383
2424 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
2425
2426 vha = fcport->vha; 2384 vha = fcport->vha;
2427 ha = vha->hw; 2385 ha = vha->hw;
2428 req = vha->req; 2386 req = vha->req;
2387
2388 ql_dbg(ql_dbg_mbx, vha, 0x1092, "Entered %s.\n", __func__);
2389
2429 if (ha->flags.cpu_affinity_enabled) 2390 if (ha->flags.cpu_affinity_enabled)
2430 rsp = ha->rsp_q_map[tag + 1]; 2391 rsp = ha->rsp_q_map[tag + 1];
2431 else 2392 else
2432 rsp = req->rsp; 2393 rsp = req->rsp;
2433 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 2394 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
2434 if (tsk == NULL) { 2395 if (tsk == NULL) {
2435 DEBUG2_3(printk("%s(%ld): failed to allocate Task Management " 2396 ql_log(ql_log_warn, vha, 0x1093,
2436 "IOCB.\n", __func__, vha->host_no)); 2397 "Failed to allocate task management IOCB.\n");
2437 return QLA_MEMORY_ALLOC_FAILED; 2398 return QLA_MEMORY_ALLOC_FAILED;
2438 } 2399 }
2439 memset(tsk, 0, sizeof(struct tsk_mgmt_cmd)); 2400 memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
@@ -2457,30 +2418,30 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2457 sts = &tsk->p.sts; 2418 sts = &tsk->p.sts;
2458 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); 2419 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
2459 if (rval != QLA_SUCCESS) { 2420 if (rval != QLA_SUCCESS) {
2460 DEBUG2_3_11(printk("%s(%ld): failed to issue %s Reset IOCB " 2421 ql_dbg(ql_dbg_mbx, vha, 0x1094,
2461 "(%x).\n", __func__, vha->host_no, name, rval)); 2422 "Failed to issue %s reset IOCB (%x).\n", name, rval);
2462 } else if (sts->entry_status != 0) { 2423 } else if (sts->entry_status != 0) {
2463 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2424 ql_dbg(ql_dbg_mbx, vha, 0x1095,
2464 "-- error status (%x).\n", __func__, vha->host_no, 2425 "Failed to complete IOCB -- error status (%x).\n",
2465 sts->entry_status)); 2426 sts->entry_status);
2466 rval = QLA_FUNCTION_FAILED; 2427 rval = QLA_FUNCTION_FAILED;
2467 } else if (sts->comp_status != 2428 } else if (sts->comp_status !=
2468 __constant_cpu_to_le16(CS_COMPLETE)) { 2429 __constant_cpu_to_le16(CS_COMPLETE)) {
2469 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2430 ql_dbg(ql_dbg_mbx, vha, 0x1096,
2470 "-- completion status (%x).\n", __func__, 2431 "Failed to complete IOCB -- completion status (%x).\n",
2471 vha->host_no, le16_to_cpu(sts->comp_status))); 2432 le16_to_cpu(sts->comp_status));
2472 rval = QLA_FUNCTION_FAILED; 2433 rval = QLA_FUNCTION_FAILED;
2473 } else if (le16_to_cpu(sts->scsi_status) & 2434 } else if (le16_to_cpu(sts->scsi_status) &
2474 SS_RESPONSE_INFO_LEN_VALID) { 2435 SS_RESPONSE_INFO_LEN_VALID) {
2475 if (le32_to_cpu(sts->rsp_data_len) < 4) { 2436 if (le32_to_cpu(sts->rsp_data_len) < 4) {
2476 DEBUG2_3_11(printk("%s(%ld): ignoring inconsistent " 2437 ql_dbg(ql_dbg_mbx, vha, 0x1097,
2477 "data length -- not enough response info (%d).\n", 2438 "Ignoring inconsistent data length -- not enough "
2478 __func__, vha->host_no, 2439 "response info (%d).\n",
2479 le32_to_cpu(sts->rsp_data_len))); 2440 le32_to_cpu(sts->rsp_data_len));
2480 } else if (sts->data[3]) { 2441 } else if (sts->data[3]) {
2481 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2442 ql_dbg(ql_dbg_mbx, vha, 0x1098,
2482 "-- response (%x).\n", __func__, 2443 "Failed to complete IOCB -- response (%x).\n",
2483 vha->host_no, sts->data[3])); 2444 sts->data[3]);
2484 rval = QLA_FUNCTION_FAILED; 2445 rval = QLA_FUNCTION_FAILED;
2485 } 2446 }
2486 } 2447 }
@@ -2489,10 +2450,10 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2489 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l, 2450 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
2490 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID); 2451 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
2491 if (rval2 != QLA_SUCCESS) { 2452 if (rval2 != QLA_SUCCESS) {
2492 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " 2453 ql_dbg(ql_dbg_mbx, vha, 0x1099,
2493 "(%x).\n", __func__, vha->host_no, rval2)); 2454 "Failed to issue marker IOCB (%x).\n", rval2);
2494 } else { 2455 } else {
2495 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2456 ql_dbg(ql_dbg_mbx, vha, 0x109a, "Done %s.\n", __func__);
2496 } 2457 }
2497 2458
2498 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); 2459 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
@@ -2533,7 +2494,7 @@ qla2x00_system_error(scsi_qla_host_t *vha)
2533 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) 2494 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
2534 return QLA_FUNCTION_FAILED; 2495 return QLA_FUNCTION_FAILED;
2535 2496
2536 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2497 ql_dbg(ql_dbg_mbx, vha, 0x109b, "Entered %s.\n", __func__);
2537 2498
2538 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; 2499 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
2539 mcp->out_mb = MBX_0; 2500 mcp->out_mb = MBX_0;
@@ -2543,10 +2504,9 @@ qla2x00_system_error(scsi_qla_host_t *vha)
2543 rval = qla2x00_mailbox_command(vha, mcp); 2504 rval = qla2x00_mailbox_command(vha, mcp);
2544 2505
2545 if (rval != QLA_SUCCESS) { 2506 if (rval != QLA_SUCCESS) {
2546 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2507 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
2547 vha->host_no, rval));
2548 } else { 2508 } else {
2549 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2509 ql_dbg(ql_dbg_mbx, vha, 0x109d, "Done %s.\n", __func__);
2550 } 2510 }
2551 2511
2552 return rval; 2512 return rval;
@@ -2566,7 +2526,7 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
2566 mbx_cmd_t mc; 2526 mbx_cmd_t mc;
2567 mbx_cmd_t *mcp = &mc; 2527 mbx_cmd_t *mcp = &mc;
2568 2528
2569 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2529 ql_dbg(ql_dbg_mbx, vha, 0x109e, "Entered %s.\n", __func__);
2570 2530
2571 mcp->mb[0] = MBC_SERDES_PARAMS; 2531 mcp->mb[0] = MBC_SERDES_PARAMS;
2572 mcp->mb[1] = BIT_0; 2532 mcp->mb[1] = BIT_0;
@@ -2581,11 +2541,11 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
2581 2541
2582 if (rval != QLA_SUCCESS) { 2542 if (rval != QLA_SUCCESS) {
2583 /*EMPTY*/ 2543 /*EMPTY*/
2584 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 2544 ql_dbg(ql_dbg_mbx, vha, 0x109f,
2585 vha->host_no, rval, mcp->mb[0])); 2545 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2586 } else { 2546 } else {
2587 /*EMPTY*/ 2547 /*EMPTY*/
2588 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2548 ql_dbg(ql_dbg_mbx, vha, 0x10a0, "Done %s.\n", __func__);
2589 } 2549 }
2590 2550
2591 return rval; 2551 return rval;
@@ -2601,7 +2561,7 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
2601 if (!IS_FWI2_CAPABLE(vha->hw)) 2561 if (!IS_FWI2_CAPABLE(vha->hw))
2602 return QLA_FUNCTION_FAILED; 2562 return QLA_FUNCTION_FAILED;
2603 2563
2604 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2564 ql_dbg(ql_dbg_mbx, vha, 0x10a1, "Entered %s.\n", __func__);
2605 2565
2606 mcp->mb[0] = MBC_STOP_FIRMWARE; 2566 mcp->mb[0] = MBC_STOP_FIRMWARE;
2607 mcp->out_mb = MBX_0; 2567 mcp->out_mb = MBX_0;
@@ -2611,12 +2571,11 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
2611 rval = qla2x00_mailbox_command(vha, mcp); 2571 rval = qla2x00_mailbox_command(vha, mcp);
2612 2572
2613 if (rval != QLA_SUCCESS) { 2573 if (rval != QLA_SUCCESS) {
2614 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2574 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
2615 vha->host_no, rval));
2616 if (mcp->mb[0] == MBS_INVALID_COMMAND) 2575 if (mcp->mb[0] == MBS_INVALID_COMMAND)
2617 rval = QLA_INVALID_COMMAND; 2576 rval = QLA_INVALID_COMMAND;
2618 } else { 2577 } else {
2619 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2578 ql_dbg(ql_dbg_mbx, vha, 0x10a3, "Done %s.\n", __func__);
2620 } 2579 }
2621 2580
2622 return rval; 2581 return rval;
@@ -2630,14 +2589,14 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2630 mbx_cmd_t mc; 2589 mbx_cmd_t mc;
2631 mbx_cmd_t *mcp = &mc; 2590 mbx_cmd_t *mcp = &mc;
2632 2591
2592 ql_dbg(ql_dbg_mbx, vha, 0x10a4, "Entered %s.\n", __func__);
2593
2633 if (!IS_FWI2_CAPABLE(vha->hw)) 2594 if (!IS_FWI2_CAPABLE(vha->hw))
2634 return QLA_FUNCTION_FAILED; 2595 return QLA_FUNCTION_FAILED;
2635 2596
2636 if (unlikely(pci_channel_offline(vha->hw->pdev))) 2597 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2637 return QLA_FUNCTION_FAILED; 2598 return QLA_FUNCTION_FAILED;
2638 2599
2639 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2640
2641 mcp->mb[0] = MBC_TRACE_CONTROL; 2600 mcp->mb[0] = MBC_TRACE_CONTROL;
2642 mcp->mb[1] = TC_EFT_ENABLE; 2601 mcp->mb[1] = TC_EFT_ENABLE;
2643 mcp->mb[2] = LSW(eft_dma); 2602 mcp->mb[2] = LSW(eft_dma);
@@ -2652,10 +2611,11 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2652 mcp->flags = 0; 2611 mcp->flags = 0;
2653 rval = qla2x00_mailbox_command(vha, mcp); 2612 rval = qla2x00_mailbox_command(vha, mcp);
2654 if (rval != QLA_SUCCESS) { 2613 if (rval != QLA_SUCCESS) {
2655 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2614 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
2656 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2615 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2616 rval, mcp->mb[0], mcp->mb[1]);
2657 } else { 2617 } else {
2658 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2618 ql_dbg(ql_dbg_mbx, vha, 0x10a6, "Done %s.\n", __func__);
2659 } 2619 }
2660 2620
2661 return rval; 2621 return rval;
@@ -2668,14 +2628,14 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2668 mbx_cmd_t mc; 2628 mbx_cmd_t mc;
2669 mbx_cmd_t *mcp = &mc; 2629 mbx_cmd_t *mcp = &mc;
2670 2630
2631 ql_dbg(ql_dbg_mbx, vha, 0x10a7, "Entered %s.\n", __func__);
2632
2671 if (!IS_FWI2_CAPABLE(vha->hw)) 2633 if (!IS_FWI2_CAPABLE(vha->hw))
2672 return QLA_FUNCTION_FAILED; 2634 return QLA_FUNCTION_FAILED;
2673 2635
2674 if (unlikely(pci_channel_offline(vha->hw->pdev))) 2636 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2675 return QLA_FUNCTION_FAILED; 2637 return QLA_FUNCTION_FAILED;
2676 2638
2677 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2678
2679 mcp->mb[0] = MBC_TRACE_CONTROL; 2639 mcp->mb[0] = MBC_TRACE_CONTROL;
2680 mcp->mb[1] = TC_EFT_DISABLE; 2640 mcp->mb[1] = TC_EFT_DISABLE;
2681 mcp->out_mb = MBX_1|MBX_0; 2641 mcp->out_mb = MBX_1|MBX_0;
@@ -2684,10 +2644,11 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2684 mcp->flags = 0; 2644 mcp->flags = 0;
2685 rval = qla2x00_mailbox_command(vha, mcp); 2645 rval = qla2x00_mailbox_command(vha, mcp);
2686 if (rval != QLA_SUCCESS) { 2646 if (rval != QLA_SUCCESS) {
2687 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2647 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
2688 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2648 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2649 rval, mcp->mb[0], mcp->mb[1]);
2689 } else { 2650 } else {
2690 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2651 ql_dbg(ql_dbg_mbx, vha, 0x10a9, "Done %s.\n", __func__);
2691 } 2652 }
2692 2653
2693 return rval; 2654 return rval;
@@ -2701,14 +2662,14 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2701 mbx_cmd_t mc; 2662 mbx_cmd_t mc;
2702 mbx_cmd_t *mcp = &mc; 2663 mbx_cmd_t *mcp = &mc;
2703 2664
2665 ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__);
2666
2704 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw)) 2667 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw))
2705 return QLA_FUNCTION_FAILED; 2668 return QLA_FUNCTION_FAILED;
2706 2669
2707 if (unlikely(pci_channel_offline(vha->hw->pdev))) 2670 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2708 return QLA_FUNCTION_FAILED; 2671 return QLA_FUNCTION_FAILED;
2709 2672
2710 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2711
2712 mcp->mb[0] = MBC_TRACE_CONTROL; 2673 mcp->mb[0] = MBC_TRACE_CONTROL;
2713 mcp->mb[1] = TC_FCE_ENABLE; 2674 mcp->mb[1] = TC_FCE_ENABLE;
2714 mcp->mb[2] = LSW(fce_dma); 2675 mcp->mb[2] = LSW(fce_dma);
@@ -2727,10 +2688,11 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2727 mcp->flags = 0; 2688 mcp->flags = 0;
2728 rval = qla2x00_mailbox_command(vha, mcp); 2689 rval = qla2x00_mailbox_command(vha, mcp);
2729 if (rval != QLA_SUCCESS) { 2690 if (rval != QLA_SUCCESS) {
2730 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2691 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
2731 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2692 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2693 rval, mcp->mb[0], mcp->mb[1]);
2732 } else { 2694 } else {
2733 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2695 ql_dbg(ql_dbg_mbx, vha, 0x10ac, "Done %s.\n", __func__);
2734 2696
2735 if (mb) 2697 if (mb)
2736 memcpy(mb, mcp->mb, 8 * sizeof(*mb)); 2698 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
@@ -2748,14 +2710,14 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2748 mbx_cmd_t mc; 2710 mbx_cmd_t mc;
2749 mbx_cmd_t *mcp = &mc; 2711 mbx_cmd_t *mcp = &mc;
2750 2712
2713 ql_dbg(ql_dbg_mbx, vha, 0x10ad, "Entered %s.\n", __func__);
2714
2751 if (!IS_FWI2_CAPABLE(vha->hw)) 2715 if (!IS_FWI2_CAPABLE(vha->hw))
2752 return QLA_FUNCTION_FAILED; 2716 return QLA_FUNCTION_FAILED;
2753 2717
2754 if (unlikely(pci_channel_offline(vha->hw->pdev))) 2718 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2755 return QLA_FUNCTION_FAILED; 2719 return QLA_FUNCTION_FAILED;
2756 2720
2757 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2758
2759 mcp->mb[0] = MBC_TRACE_CONTROL; 2721 mcp->mb[0] = MBC_TRACE_CONTROL;
2760 mcp->mb[1] = TC_FCE_DISABLE; 2722 mcp->mb[1] = TC_FCE_DISABLE;
2761 mcp->mb[2] = TC_FCE_DISABLE_TRACE; 2723 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
@@ -2766,10 +2728,11 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2766 mcp->flags = 0; 2728 mcp->flags = 0;
2767 rval = qla2x00_mailbox_command(vha, mcp); 2729 rval = qla2x00_mailbox_command(vha, mcp);
2768 if (rval != QLA_SUCCESS) { 2730 if (rval != QLA_SUCCESS) {
2769 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2731 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
2770 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2732 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2733 rval, mcp->mb[0], mcp->mb[1]);
2771 } else { 2734 } else {
2772 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2735 ql_dbg(ql_dbg_mbx, vha, 0x10af, "Done %s.\n", __func__);
2773 2736
2774 if (wr) 2737 if (wr)
2775 *wr = (uint64_t) mcp->mb[5] << 48 | 2738 *wr = (uint64_t) mcp->mb[5] << 48 |
@@ -2794,11 +2757,11 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2794 mbx_cmd_t mc; 2757 mbx_cmd_t mc;
2795 mbx_cmd_t *mcp = &mc; 2758 mbx_cmd_t *mcp = &mc;
2796 2759
2760 ql_dbg(ql_dbg_mbx, vha, 0x10b0, "Entered %s.\n", __func__);
2761
2797 if (!IS_IIDMA_CAPABLE(vha->hw)) 2762 if (!IS_IIDMA_CAPABLE(vha->hw))
2798 return QLA_FUNCTION_FAILED; 2763 return QLA_FUNCTION_FAILED;
2799 2764
2800 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2801
2802 mcp->mb[0] = MBC_PORT_PARAMS; 2765 mcp->mb[0] = MBC_PORT_PARAMS;
2803 mcp->mb[1] = loop_id; 2766 mcp->mb[1] = loop_id;
2804 mcp->mb[2] = mcp->mb[3] = 0; 2767 mcp->mb[2] = mcp->mb[3] = 0;
@@ -2817,10 +2780,9 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2817 } 2780 }
2818 2781
2819 if (rval != QLA_SUCCESS) { 2782 if (rval != QLA_SUCCESS) {
2820 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2783 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
2821 vha->host_no, rval));
2822 } else { 2784 } else {
2823 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2785 ql_dbg(ql_dbg_mbx, vha, 0x10b2, "Done %s.\n", __func__);
2824 if (port_speed) 2786 if (port_speed)
2825 *port_speed = mcp->mb[3]; 2787 *port_speed = mcp->mb[3];
2826 } 2788 }
@@ -2836,11 +2798,11 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2836 mbx_cmd_t mc; 2798 mbx_cmd_t mc;
2837 mbx_cmd_t *mcp = &mc; 2799 mbx_cmd_t *mcp = &mc;
2838 2800
2801 ql_dbg(ql_dbg_mbx, vha, 0x10b3, "Entered %s.\n", __func__);
2802
2839 if (!IS_IIDMA_CAPABLE(vha->hw)) 2803 if (!IS_IIDMA_CAPABLE(vha->hw))
2840 return QLA_FUNCTION_FAILED; 2804 return QLA_FUNCTION_FAILED;
2841 2805
2842 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2843
2844 mcp->mb[0] = MBC_PORT_PARAMS; 2806 mcp->mb[0] = MBC_PORT_PARAMS;
2845 mcp->mb[1] = loop_id; 2807 mcp->mb[1] = loop_id;
2846 mcp->mb[2] = BIT_0; 2808 mcp->mb[2] = BIT_0;
@@ -2863,10 +2825,9 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2863 } 2825 }
2864 2826
2865 if (rval != QLA_SUCCESS) { 2827 if (rval != QLA_SUCCESS) {
2866 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2828 ql_dbg(ql_dbg_mbx, vha, 0x10b4, "Failed=%x.\n", rval);
2867 vha->host_no, rval));
2868 } else { 2829 } else {
2869 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2830 ql_dbg(ql_dbg_mbx, vha, 0x10b5, "Done %s.\n", __func__);
2870 } 2831 }
2871 2832
2872 return rval; 2833 return rval;
@@ -2882,33 +2843,36 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2882 scsi_qla_host_t *vp; 2843 scsi_qla_host_t *vp;
2883 unsigned long flags; 2844 unsigned long flags;
2884 2845
2846 ql_dbg(ql_dbg_mbx, vha, 0x10b6, "Entered %s.\n", __func__);
2847
2885 if (rptid_entry->entry_status != 0) 2848 if (rptid_entry->entry_status != 0)
2886 return; 2849 return;
2887 2850
2888 if (rptid_entry->format == 0) { 2851 if (rptid_entry->format == 0) {
2889 DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d," 2852 ql_dbg(ql_dbg_mbx, vha, 0x10b7,
2890 " number of VPs acquired %d\n", __func__, vha->host_no, 2853 "Format 0 : Number of VPs setup %d, number of "
2891 MSB(le16_to_cpu(rptid_entry->vp_count)), 2854 "VPs acquired %d.\n",
2892 LSB(le16_to_cpu(rptid_entry->vp_count)))); 2855 MSB(le16_to_cpu(rptid_entry->vp_count)),
2893 DEBUG15(printk("%s primary port id %02x%02x%02x\n", __func__, 2856 LSB(le16_to_cpu(rptid_entry->vp_count)));
2894 rptid_entry->port_id[2], rptid_entry->port_id[1], 2857 ql_dbg(ql_dbg_mbx, vha, 0x10b8,
2895 rptid_entry->port_id[0])); 2858 "Primary port id %02x%02x%02x.\n",
2859 rptid_entry->port_id[2], rptid_entry->port_id[1],
2860 rptid_entry->port_id[0]);
2896 } else if (rptid_entry->format == 1) { 2861 } else if (rptid_entry->format == 1) {
2897 vp_idx = LSB(stat); 2862 vp_idx = LSB(stat);
2898 DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled " 2863 ql_dbg(ql_dbg_mbx, vha, 0x10b9,
2899 "- status %d - " 2864 "Format 1: VP[%d] enabled - status %d - with "
2900 "with port id %02x%02x%02x\n", __func__, vha->host_no, 2865 "port id %02x%02x%02x.\n", vp_idx, MSB(stat),
2901 vp_idx, MSB(stat),
2902 rptid_entry->port_id[2], rptid_entry->port_id[1], 2866 rptid_entry->port_id[2], rptid_entry->port_id[1],
2903 rptid_entry->port_id[0])); 2867 rptid_entry->port_id[0]);
2904 2868
2905 vp = vha; 2869 vp = vha;
2906 if (vp_idx == 0 && (MSB(stat) != 1)) 2870 if (vp_idx == 0 && (MSB(stat) != 1))
2907 goto reg_needed; 2871 goto reg_needed;
2908 2872
2909 if (MSB(stat) == 1) { 2873 if (MSB(stat) == 1) {
2910 DEBUG2(printk("scsi(%ld): Could not acquire ID for " 2874 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
2911 "VP[%d].\n", vha->host_no, vp_idx)); 2875 "Could not acquire ID for VP[%d].\n", vp_idx);
2912 return; 2876 return;
2913 } 2877 }
2914 2878
@@ -2963,10 +2927,12 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
2963 2927
2964 /* This can be called by the parent */ 2928 /* This can be called by the parent */
2965 2929
2930 ql_dbg(ql_dbg_mbx, vha, 0x10bb, "Entered %s.\n", __func__);
2931
2966 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); 2932 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
2967 if (!vpmod) { 2933 if (!vpmod) {
2968 DEBUG2_3(printk("%s(%ld): failed to allocate Modify VP " 2934 ql_log(ql_log_warn, vha, 0x10bc,
2969 "IOCB.\n", __func__, vha->host_no)); 2935 "Failed to allocate modify VP IOCB.\n");
2970 return QLA_MEMORY_ALLOC_FAILED; 2936 return QLA_MEMORY_ALLOC_FAILED;
2971 } 2937 }
2972 2938
@@ -2983,22 +2949,21 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
2983 2949
2984 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0); 2950 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
2985 if (rval != QLA_SUCCESS) { 2951 if (rval != QLA_SUCCESS) {
2986 DEBUG2_3_11(printk("%s(%ld): failed to issue VP config IOCB" 2952 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
2987 "(%x).\n", __func__, base_vha->host_no, rval)); 2953 "Failed to issue VP config IOCB (%x).\n", rval);
2988 } else if (vpmod->comp_status != 0) { 2954 } else if (vpmod->comp_status != 0) {
2989 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2955 ql_dbg(ql_dbg_mbx, vha, 0x10be,
2990 "-- error status (%x).\n", __func__, base_vha->host_no, 2956 "Failed to complete IOCB -- error status (%x).\n",
2991 vpmod->comp_status)); 2957 vpmod->comp_status);
2992 rval = QLA_FUNCTION_FAILED; 2958 rval = QLA_FUNCTION_FAILED;
2993 } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 2959 } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
2994 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2960 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
2995 "-- completion status (%x).\n", __func__, base_vha->host_no, 2961 "Failed to complete IOCB -- completion status (%x).\n",
2996 le16_to_cpu(vpmod->comp_status))); 2962 le16_to_cpu(vpmod->comp_status));
2997 rval = QLA_FUNCTION_FAILED; 2963 rval = QLA_FUNCTION_FAILED;
2998 } else { 2964 } else {
2999 /* EMPTY */ 2965 /* EMPTY */
3000 DEBUG11(printk("%s(%ld): done.\n", __func__, 2966 ql_dbg(ql_dbg_mbx, vha, 0x10c0, "Done %s.\n", __func__);
3001 base_vha->host_no));
3002 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); 2967 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
3003 } 2968 }
3004 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); 2969 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
@@ -3032,17 +2997,16 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3032 int vp_index = vha->vp_idx; 2997 int vp_index = vha->vp_idx;
3033 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2998 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3034 2999
3035 DEBUG11(printk("%s(%ld): entered. Enabling index %d\n", __func__, 3000 ql_dbg(ql_dbg_mbx, vha, 0x10c1,
3036 vha->host_no, vp_index)); 3001 "Entered %s enabling index %d.\n", __func__, vp_index);
3037 3002
3038 if (vp_index == 0 || vp_index >= ha->max_npiv_vports) 3003 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
3039 return QLA_PARAMETER_ERROR; 3004 return QLA_PARAMETER_ERROR;
3040 3005
3041 vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma); 3006 vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
3042 if (!vce) { 3007 if (!vce) {
3043 DEBUG2_3(printk("%s(%ld): " 3008 ql_log(ql_log_warn, vha, 0x10c2,
3044 "failed to allocate VP Control IOCB.\n", __func__, 3009 "Failed to allocate VP control IOCB.\n");
3045 base_vha->host_no));
3046 return QLA_MEMORY_ALLOC_FAILED; 3010 return QLA_MEMORY_ALLOC_FAILED;
3047 } 3011 }
3048 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx)); 3012 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
@@ -3063,28 +3027,20 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3063 3027
3064 rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0); 3028 rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
3065 if (rval != QLA_SUCCESS) { 3029 if (rval != QLA_SUCCESS) {
3066 DEBUG2_3_11(printk("%s(%ld): failed to issue VP control IOCB" 3030 ql_dbg(ql_dbg_mbx, vha, 0x10c3,
3067 "(%x).\n", __func__, base_vha->host_no, rval)); 3031 "Failed to issue VP control IOCB (%x).\n", rval);
3068 printk("%s(%ld): failed to issue VP control IOCB"
3069 "(%x).\n", __func__, base_vha->host_no, rval);
3070 } else if (vce->entry_status != 0) { 3032 } else if (vce->entry_status != 0) {
3071 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 3033 ql_dbg(ql_dbg_mbx, vha, 0x10c4,
3072 "-- error status (%x).\n", __func__, base_vha->host_no, 3034 "Failed to complete IOCB -- error status (%x).\n",
3073 vce->entry_status));
3074 printk("%s(%ld): failed to complete IOCB "
3075 "-- error status (%x).\n", __func__, base_vha->host_no,
3076 vce->entry_status); 3035 vce->entry_status);
3077 rval = QLA_FUNCTION_FAILED; 3036 rval = QLA_FUNCTION_FAILED;
3078 } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 3037 } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
3079 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 3038 ql_dbg(ql_dbg_mbx, vha, 0x10c5,
3080 "-- completion status (%x).\n", __func__, base_vha->host_no, 3039 "Failed to complet IOCB -- completion status (%x).\n",
3081 le16_to_cpu(vce->comp_status)));
3082 printk("%s(%ld): failed to complete IOCB "
3083 "-- completion status (%x).\n", __func__, base_vha->host_no,
3084 le16_to_cpu(vce->comp_status)); 3040 le16_to_cpu(vce->comp_status));
3085 rval = QLA_FUNCTION_FAILED; 3041 rval = QLA_FUNCTION_FAILED;
3086 } else { 3042 } else {
3087 DEBUG2(printk("%s(%ld): done.\n", __func__, base_vha->host_no)); 3043 ql_dbg(ql_dbg_mbx, vha, 0x10c6, "Done %s.\n", __func__);
3088 } 3044 }
3089 3045
3090 dma_pool_free(ha->s_dma_pool, vce, vce_dma); 3046 dma_pool_free(ha->s_dma_pool, vce, vce_dma);
@@ -3121,6 +3077,8 @@ qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
3121 mbx_cmd_t mc; 3077 mbx_cmd_t mc;
3122 mbx_cmd_t *mcp = &mc; 3078 mbx_cmd_t *mcp = &mc;
3123 3079
3080 ql_dbg(ql_dbg_mbx, vha, 0x10c7, "Entered %s.\n", __func__);
3081
3124 /* 3082 /*
3125 * This command is implicitly executed by firmware during login for the 3083 * This command is implicitly executed by firmware during login for the
3126 * physical hosts 3084 * physical hosts
@@ -3155,7 +3113,7 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
3155 mbx_cmd_t mc; 3113 mbx_cmd_t mc;
3156 mbx_cmd_t *mcp = &mc; 3114 mbx_cmd_t *mcp = &mc;
3157 3115
3158 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3116 ql_dbg(ql_dbg_mbx, vha, 0x1009, "Entered %s.\n", __func__);
3159 3117
3160 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { 3118 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
3161 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 3119 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
@@ -3186,10 +3144,10 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
3186 rval = qla2x00_mailbox_command(vha, mcp); 3144 rval = qla2x00_mailbox_command(vha, mcp);
3187 3145
3188 if (rval != QLA_SUCCESS) { 3146 if (rval != QLA_SUCCESS) {
3189 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 3147 ql_dbg(ql_dbg_mbx, vha, 0x1008,
3190 vha->host_no, rval, mcp->mb[0])); 3148 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3191 } else { 3149 } else {
3192 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3150 ql_dbg(ql_dbg_mbx, vha, 0x1007, "Done %s.\n", __func__);
3193 } 3151 }
3194 3152
3195 return rval; 3153 return rval;
@@ -3214,12 +3172,10 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
3214 unsigned long flags; 3172 unsigned long flags;
3215 struct qla_hw_data *ha = vha->hw; 3173 struct qla_hw_data *ha = vha->hw;
3216 3174
3217 DEBUG16(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3175 ql_dbg(ql_dbg_mbx, vha, 0x10c8, "Entered %s.\n", __func__);
3218 3176
3219 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 3177 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
3220 if (mn == NULL) { 3178 if (mn == NULL) {
3221 DEBUG2_3(printk("%s(%ld): failed to allocate Verify ISP84XX "
3222 "IOCB.\n", __func__, vha->host_no));
3223 return QLA_MEMORY_ALLOC_FAILED; 3179 return QLA_MEMORY_ALLOC_FAILED;
3224 } 3180 }
3225 3181
@@ -3237,43 +3193,43 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
3237 mn->p.req.entry_count = 1; 3193 mn->p.req.entry_count = 1;
3238 mn->p.req.options = cpu_to_le16(options); 3194 mn->p.req.options = cpu_to_le16(options);
3239 3195
3240 DEBUG16(printk("%s(%ld): Dump of Verify Request.\n", __func__, 3196 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
3241 vha->host_no)); 3197 "Dump of Verify Request.\n");
3242 DEBUG16(qla2x00_dump_buffer((uint8_t *)mn, 3198 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
3243 sizeof(*mn))); 3199 (uint8_t *)mn, sizeof(*mn));
3244 3200
3245 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 3201 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
3246 if (rval != QLA_SUCCESS) { 3202 if (rval != QLA_SUCCESS) {
3247 DEBUG2_16(printk("%s(%ld): failed to issue Verify " 3203 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
3248 "IOCB (%x).\n", __func__, vha->host_no, rval)); 3204 "Failed to issue verify IOCB (%x).\n", rval);
3249 goto verify_done; 3205 goto verify_done;
3250 } 3206 }
3251 3207
3252 DEBUG16(printk("%s(%ld): Dump of Verify Response.\n", __func__, 3208 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
3253 vha->host_no)); 3209 "Dump of Verify Response.\n");
3254 DEBUG16(qla2x00_dump_buffer((uint8_t *)mn, 3210 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
3255 sizeof(*mn))); 3211 (uint8_t *)mn, sizeof(*mn));
3256 3212
3257 status[0] = le16_to_cpu(mn->p.rsp.comp_status); 3213 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
3258 status[1] = status[0] == CS_VCS_CHIP_FAILURE ? 3214 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
3259 le16_to_cpu(mn->p.rsp.failure_code) : 0; 3215 le16_to_cpu(mn->p.rsp.failure_code) : 0;
3260 DEBUG2_16(printk("%s(%ld): cs=%x fc=%x\n", __func__, 3216 ql_dbg(ql_dbg_mbx, vha, 0x10ce,
3261 vha->host_no, status[0], status[1])); 3217 "cs=%x fc=%x.\n", status[0], status[1]);
3262 3218
3263 if (status[0] != CS_COMPLETE) { 3219 if (status[0] != CS_COMPLETE) {
3264 rval = QLA_FUNCTION_FAILED; 3220 rval = QLA_FUNCTION_FAILED;
3265 if (!(options & VCO_DONT_UPDATE_FW)) { 3221 if (!(options & VCO_DONT_UPDATE_FW)) {
3266 DEBUG2_16(printk("%s(%ld): Firmware update " 3222 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
3267 "failed. Retrying without update " 3223 "Firmware update failed. Retrying "
3268 "firmware.\n", __func__, vha->host_no)); 3224 "without update firmware.\n");
3269 options |= VCO_DONT_UPDATE_FW; 3225 options |= VCO_DONT_UPDATE_FW;
3270 options &= ~VCO_FORCE_UPDATE; 3226 options &= ~VCO_FORCE_UPDATE;
3271 retry = 1; 3227 retry = 1;
3272 } 3228 }
3273 } else { 3229 } else {
3274 DEBUG2_16(printk("%s(%ld): firmware updated to %x.\n", 3230 ql_dbg(ql_dbg_mbx, vha, 0x10d0,
3275 __func__, vha->host_no, 3231 "Firmware updated to %x.\n",
3276 le32_to_cpu(mn->p.rsp.fw_ver))); 3232 le32_to_cpu(mn->p.rsp.fw_ver));
3277 3233
3278 /* NOTE: we only update OP firmware. */ 3234 /* NOTE: we only update OP firmware. */
3279 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 3235 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
@@ -3288,10 +3244,9 @@ verify_done:
3288 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 3244 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
3289 3245
3290 if (rval != QLA_SUCCESS) { 3246 if (rval != QLA_SUCCESS) {
3291 DEBUG2_16(printk("%s(%ld): failed=%x.\n", __func__, 3247 ql_dbg(ql_dbg_mbx, vha, 0x10d1, "Failed=%x.\n", rval);
3292 vha->host_no, rval));
3293 } else { 3248 } else {
3294 DEBUG16(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3249 ql_dbg(ql_dbg_mbx, vha, 0x10d2, "Done %s.\n", __func__);
3295 } 3250 }
3296 3251
3297 return rval; 3252 return rval;
@@ -3307,6 +3262,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3307 struct device_reg_25xxmq __iomem *reg; 3262 struct device_reg_25xxmq __iomem *reg;
3308 struct qla_hw_data *ha = vha->hw; 3263 struct qla_hw_data *ha = vha->hw;
3309 3264
3265 ql_dbg(ql_dbg_mbx, vha, 0x10d3, "Entered %s.\n", __func__);
3266
3310 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 3267 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3311 mcp->mb[1] = req->options; 3268 mcp->mb[1] = req->options;
3312 mcp->mb[2] = MSW(LSD(req->dma)); 3269 mcp->mb[2] = MSW(LSD(req->dma));
@@ -3344,9 +3301,13 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3344 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3301 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3345 3302
3346 rval = qla2x00_mailbox_command(vha, mcp); 3303 rval = qla2x00_mailbox_command(vha, mcp);
3347 if (rval != QLA_SUCCESS) 3304 if (rval != QLA_SUCCESS) {
3348 DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x mb0=%x.\n", 3305 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
3349 __func__, vha->host_no, rval, mcp->mb[0])); 3306 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3307 } else {
3308 ql_dbg(ql_dbg_mbx, vha, 0x10d5, "Done %s.\n", __func__);
3309 }
3310
3350 return rval; 3311 return rval;
3351} 3312}
3352 3313
@@ -3360,6 +3321,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3360 struct device_reg_25xxmq __iomem *reg; 3321 struct device_reg_25xxmq __iomem *reg;
3361 struct qla_hw_data *ha = vha->hw; 3322 struct qla_hw_data *ha = vha->hw;
3362 3323
3324 ql_dbg(ql_dbg_mbx, vha, 0x10d6, "Entered %s.\n", __func__);
3325
3363 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 3326 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3364 mcp->mb[1] = rsp->options; 3327 mcp->mb[1] = rsp->options;
3365 mcp->mb[2] = MSW(LSD(rsp->dma)); 3328 mcp->mb[2] = MSW(LSD(rsp->dma));
@@ -3393,10 +3356,13 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3393 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3356 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3394 3357
3395 rval = qla2x00_mailbox_command(vha, mcp); 3358 rval = qla2x00_mailbox_command(vha, mcp);
3396 if (rval != QLA_SUCCESS) 3359 if (rval != QLA_SUCCESS) {
3397 DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x " 3360 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
3398 "mb0=%x.\n", __func__, 3361 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3399 vha->host_no, rval, mcp->mb[0])); 3362 } else {
3363 ql_dbg(ql_dbg_mbx, vha, 0x10d8, "Done %s.\n", __func__);
3364 }
3365
3400 return rval; 3366 return rval;
3401} 3367}
3402 3368
@@ -3407,7 +3373,7 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
3407 mbx_cmd_t mc; 3373 mbx_cmd_t mc;
3408 mbx_cmd_t *mcp = &mc; 3374 mbx_cmd_t *mcp = &mc;
3409 3375
3410 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3376 ql_dbg(ql_dbg_mbx, vha, 0x10d9, "Entered %s.\n", __func__);
3411 3377
3412 mcp->mb[0] = MBC_IDC_ACK; 3378 mcp->mb[0] = MBC_IDC_ACK;
3413 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 3379 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
@@ -3418,10 +3384,10 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
3418 rval = qla2x00_mailbox_command(vha, mcp); 3384 rval = qla2x00_mailbox_command(vha, mcp);
3419 3385
3420 if (rval != QLA_SUCCESS) { 3386 if (rval != QLA_SUCCESS) {
3421 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 3387 ql_dbg(ql_dbg_mbx, vha, 0x10da,
3422 vha->host_no, rval, mcp->mb[0])); 3388 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3423 } else { 3389 } else {
3424 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3390 ql_dbg(ql_dbg_mbx, vha, 0x10db, "Done %s.\n", __func__);
3425 } 3391 }
3426 3392
3427 return rval; 3393 return rval;
@@ -3434,11 +3400,11 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
3434 mbx_cmd_t mc; 3400 mbx_cmd_t mc;
3435 mbx_cmd_t *mcp = &mc; 3401 mbx_cmd_t *mcp = &mc;
3436 3402
3403 ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__);
3404
3437 if (!IS_QLA81XX(vha->hw)) 3405 if (!IS_QLA81XX(vha->hw))
3438 return QLA_FUNCTION_FAILED; 3406 return QLA_FUNCTION_FAILED;
3439 3407
3440 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3441
3442 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 3408 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3443 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE; 3409 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
3444 mcp->out_mb = MBX_1|MBX_0; 3410 mcp->out_mb = MBX_1|MBX_0;
@@ -3448,10 +3414,11 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
3448 rval = qla2x00_mailbox_command(vha, mcp); 3414 rval = qla2x00_mailbox_command(vha, mcp);
3449 3415
3450 if (rval != QLA_SUCCESS) { 3416 if (rval != QLA_SUCCESS) {
3451 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 3417 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
3452 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 3418 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3419 rval, mcp->mb[0], mcp->mb[1]);
3453 } else { 3420 } else {
3454 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3421 ql_dbg(ql_dbg_mbx, vha, 0x10de, "Done %s.\n", __func__);
3455 *sector_size = mcp->mb[1]; 3422 *sector_size = mcp->mb[1];
3456 } 3423 }
3457 3424
@@ -3468,7 +3435,7 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
3468 if (!IS_QLA81XX(vha->hw)) 3435 if (!IS_QLA81XX(vha->hw))
3469 return QLA_FUNCTION_FAILED; 3436 return QLA_FUNCTION_FAILED;
3470 3437
3471 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3438 ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__);
3472 3439
3473 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 3440 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3474 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : 3441 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
@@ -3480,10 +3447,11 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
3480 rval = qla2x00_mailbox_command(vha, mcp); 3447 rval = qla2x00_mailbox_command(vha, mcp);
3481 3448
3482 if (rval != QLA_SUCCESS) { 3449 if (rval != QLA_SUCCESS) {
3483 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 3450 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
3484 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 3451 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3452 rval, mcp->mb[0], mcp->mb[1]);
3485 } else { 3453 } else {
3486 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3454 ql_dbg(ql_dbg_mbx, vha, 0x10e1, "Done %s.\n", __func__);
3487 } 3455 }
3488 3456
3489 return rval; 3457 return rval;
@@ -3499,7 +3467,7 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
3499 if (!IS_QLA81XX(vha->hw)) 3467 if (!IS_QLA81XX(vha->hw))
3500 return QLA_FUNCTION_FAILED; 3468 return QLA_FUNCTION_FAILED;
3501 3469
3502 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3470 ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__);
3503 3471
3504 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 3472 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3505 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; 3473 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
@@ -3514,11 +3482,11 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
3514 rval = qla2x00_mailbox_command(vha, mcp); 3482 rval = qla2x00_mailbox_command(vha, mcp);
3515 3483
3516 if (rval != QLA_SUCCESS) { 3484 if (rval != QLA_SUCCESS) {
3517 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x " 3485 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
3518 "mb[2]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0], 3486 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3519 mcp->mb[1], mcp->mb[2])); 3487 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3520 } else { 3488 } else {
3521 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3489 ql_dbg(ql_dbg_mbx, vha, 0x10e4, "Done %s.\n", __func__);
3522 } 3490 }
3523 3491
3524 return rval; 3492 return rval;
@@ -3531,7 +3499,7 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
3531 mbx_cmd_t mc; 3499 mbx_cmd_t mc;
3532 mbx_cmd_t *mcp = &mc; 3500 mbx_cmd_t *mcp = &mc;
3533 3501
3534 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3502 ql_dbg(ql_dbg_mbx, vha, 0x10e5, "Entered %s.\n", __func__);
3535 3503
3536 mcp->mb[0] = MBC_RESTART_MPI_FW; 3504 mcp->mb[0] = MBC_RESTART_MPI_FW;
3537 mcp->out_mb = MBX_0; 3505 mcp->out_mb = MBX_0;
@@ -3541,10 +3509,11 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
3541 rval = qla2x00_mailbox_command(vha, mcp); 3509 rval = qla2x00_mailbox_command(vha, mcp);
3542 3510
3543 if (rval != QLA_SUCCESS) { 3511 if (rval != QLA_SUCCESS) {
3544 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x mb[1]=0x%x.\n", 3512 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
3545 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 3513 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3514 rval, mcp->mb[0], mcp->mb[1]);
3546 } else { 3515 } else {
3547 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3516 ql_dbg(ql_dbg_mbx, vha, 0x10e7, "Done %s.\n", __func__);
3548 } 3517 }
3549 3518
3550 return rval; 3519 return rval;
@@ -3559,11 +3528,11 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3559 mbx_cmd_t *mcp = &mc; 3528 mbx_cmd_t *mcp = &mc;
3560 struct qla_hw_data *ha = vha->hw; 3529 struct qla_hw_data *ha = vha->hw;
3561 3530
3531 ql_dbg(ql_dbg_mbx, vha, 0x10e8, "Entered %s.\n", __func__);
3532
3562 if (!IS_FWI2_CAPABLE(ha)) 3533 if (!IS_FWI2_CAPABLE(ha))
3563 return QLA_FUNCTION_FAILED; 3534 return QLA_FUNCTION_FAILED;
3564 3535
3565 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3566
3567 if (len == 1) 3536 if (len == 1)
3568 opt |= BIT_0; 3537 opt |= BIT_0;
3569 3538
@@ -3586,10 +3555,10 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3586 *sfp = mcp->mb[1]; 3555 *sfp = mcp->mb[1];
3587 3556
3588 if (rval != QLA_SUCCESS) { 3557 if (rval != QLA_SUCCESS) {
3589 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 3558 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
3590 vha->host_no, rval, mcp->mb[0])); 3559 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3591 } else { 3560 } else {
3592 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3561 ql_dbg(ql_dbg_mbx, vha, 0x10ea, "Done %s.\n", __func__);
3593 } 3562 }
3594 3563
3595 return rval; 3564 return rval;
@@ -3604,11 +3573,11 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3604 mbx_cmd_t *mcp = &mc; 3573 mbx_cmd_t *mcp = &mc;
3605 struct qla_hw_data *ha = vha->hw; 3574 struct qla_hw_data *ha = vha->hw;
3606 3575
3576 ql_dbg(ql_dbg_mbx, vha, 0x10eb, "Entered %s.\n", __func__);
3577
3607 if (!IS_FWI2_CAPABLE(ha)) 3578 if (!IS_FWI2_CAPABLE(ha))
3608 return QLA_FUNCTION_FAILED; 3579 return QLA_FUNCTION_FAILED;
3609 3580
3610 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3611
3612 if (len == 1) 3581 if (len == 1)
3613 opt |= BIT_0; 3582 opt |= BIT_0;
3614 3583
@@ -3631,10 +3600,10 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3631 rval = qla2x00_mailbox_command(vha, mcp); 3600 rval = qla2x00_mailbox_command(vha, mcp);
3632 3601
3633 if (rval != QLA_SUCCESS) { 3602 if (rval != QLA_SUCCESS) {
3634 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 3603 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
3635 vha->host_no, rval, mcp->mb[0])); 3604 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3636 } else { 3605 } else {
3637 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3606 ql_dbg(ql_dbg_mbx, vha, 0x10ed, "Done %s.\n", __func__);
3638 } 3607 }
3639 3608
3640 return rval; 3609 return rval;
@@ -3648,11 +3617,11 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3648 mbx_cmd_t mc; 3617 mbx_cmd_t mc;
3649 mbx_cmd_t *mcp = &mc; 3618 mbx_cmd_t *mcp = &mc;
3650 3619
3620 ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__);
3621
3651 if (!IS_QLA8XXX_TYPE(vha->hw)) 3622 if (!IS_QLA8XXX_TYPE(vha->hw))
3652 return QLA_FUNCTION_FAILED; 3623 return QLA_FUNCTION_FAILED;
3653 3624
3654 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3655
3656 mcp->mb[0] = MBC_GET_XGMAC_STATS; 3625 mcp->mb[0] = MBC_GET_XGMAC_STATS;
3657 mcp->mb[2] = MSW(stats_dma); 3626 mcp->mb[2] = MSW(stats_dma);
3658 mcp->mb[3] = LSW(stats_dma); 3627 mcp->mb[3] = LSW(stats_dma);
@@ -3666,11 +3635,12 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3666 rval = qla2x00_mailbox_command(vha, mcp); 3635 rval = qla2x00_mailbox_command(vha, mcp);
3667 3636
3668 if (rval != QLA_SUCCESS) { 3637 if (rval != QLA_SUCCESS) {
3669 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x " 3638 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
3670 "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval, 3639 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3671 mcp->mb[0], mcp->mb[1], mcp->mb[2])); 3640 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3672 } else { 3641 } else {
3673 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3642 ql_dbg(ql_dbg_mbx, vha, 0x10f0, "Done %s.\n", __func__);
3643
3674 3644
3675 *actual_size = mcp->mb[2] << 2; 3645 *actual_size = mcp->mb[2] << 2;
3676 } 3646 }
@@ -3686,11 +3656,11 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3686 mbx_cmd_t mc; 3656 mbx_cmd_t mc;
3687 mbx_cmd_t *mcp = &mc; 3657 mbx_cmd_t *mcp = &mc;
3688 3658
3659 ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__);
3660
3689 if (!IS_QLA8XXX_TYPE(vha->hw)) 3661 if (!IS_QLA8XXX_TYPE(vha->hw))
3690 return QLA_FUNCTION_FAILED; 3662 return QLA_FUNCTION_FAILED;
3691 3663
3692 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3693
3694 mcp->mb[0] = MBC_GET_DCBX_PARAMS; 3664 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
3695 mcp->mb[1] = 0; 3665 mcp->mb[1] = 0;
3696 mcp->mb[2] = MSW(tlv_dma); 3666 mcp->mb[2] = MSW(tlv_dma);
@@ -3705,11 +3675,11 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3705 rval = qla2x00_mailbox_command(vha, mcp); 3675 rval = qla2x00_mailbox_command(vha, mcp);
3706 3676
3707 if (rval != QLA_SUCCESS) { 3677 if (rval != QLA_SUCCESS) {
3708 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x " 3678 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
3709 "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval, 3679 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3710 mcp->mb[0], mcp->mb[1], mcp->mb[2])); 3680 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3711 } else { 3681 } else {
3712 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3682 ql_dbg(ql_dbg_mbx, vha, 0x10f3, "Done %s.\n", __func__);
3713 } 3683 }
3714 3684
3715 return rval; 3685 return rval;
@@ -3722,11 +3692,11 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3722 mbx_cmd_t mc; 3692 mbx_cmd_t mc;
3723 mbx_cmd_t *mcp = &mc; 3693 mbx_cmd_t *mcp = &mc;
3724 3694
3695 ql_dbg(ql_dbg_mbx, vha, 0x10f4, "Entered %s.\n", __func__);
3696
3725 if (!IS_FWI2_CAPABLE(vha->hw)) 3697 if (!IS_FWI2_CAPABLE(vha->hw))
3726 return QLA_FUNCTION_FAILED; 3698 return QLA_FUNCTION_FAILED;
3727 3699
3728 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3729
3730 mcp->mb[0] = MBC_READ_RAM_EXTENDED; 3700 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
3731 mcp->mb[1] = LSW(risc_addr); 3701 mcp->mb[1] = LSW(risc_addr);
3732 mcp->mb[8] = MSW(risc_addr); 3702 mcp->mb[8] = MSW(risc_addr);
@@ -3736,10 +3706,10 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3736 mcp->flags = 0; 3706 mcp->flags = 0;
3737 rval = qla2x00_mailbox_command(vha, mcp); 3707 rval = qla2x00_mailbox_command(vha, mcp);
3738 if (rval != QLA_SUCCESS) { 3708 if (rval != QLA_SUCCESS) {
3739 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 3709 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
3740 vha->host_no, rval, mcp->mb[0])); 3710 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3741 } else { 3711 } else {
3742 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3712 ql_dbg(ql_dbg_mbx, vha, 0x10f6, "Done %s.\n", __func__);
3743 *data = mcp->mb[3] << 16 | mcp->mb[2]; 3713 *data = mcp->mb[3] << 16 | mcp->mb[2];
3744 } 3714 }
3745 3715
@@ -3755,7 +3725,7 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3755 mbx_cmd_t *mcp = &mc; 3725 mbx_cmd_t *mcp = &mc;
3756 uint32_t iter_cnt = 0x1; 3726 uint32_t iter_cnt = 0x1;
3757 3727
3758 DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no)); 3728 ql_dbg(ql_dbg_mbx, vha, 0x10f7, "Entered %s.\n", __func__);
3759 3729
3760 memset(mcp->mb, 0 , sizeof(mcp->mb)); 3730 memset(mcp->mb, 0 , sizeof(mcp->mb));
3761 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; 3731 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
@@ -3794,15 +3764,12 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3794 rval = qla2x00_mailbox_command(vha, mcp); 3764 rval = qla2x00_mailbox_command(vha, mcp);
3795 3765
3796 if (rval != QLA_SUCCESS) { 3766 if (rval != QLA_SUCCESS) {
3797 DEBUG2(printk(KERN_WARNING 3767 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
3798 "(%ld): failed=%x mb[0]=0x%x " 3768 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
3799 "mb[1]=0x%x mb[2]=0x%x mb[3]=0x%x mb[18]=0x%x " 3769 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
3800 "mb[19]=0x%x.\n", 3770 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
3801 vha->host_no, rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
3802 mcp->mb[3], mcp->mb[18], mcp->mb[19]));
3803 } else { 3771 } else {
3804 DEBUG2(printk(KERN_WARNING 3772 ql_dbg(ql_dbg_mbx, vha, 0x10f9, "Done %s.\n", __func__);
3805 "scsi(%ld): done.\n", vha->host_no));
3806 } 3773 }
3807 3774
3808 /* Copy mailbox information */ 3775 /* Copy mailbox information */
@@ -3819,7 +3786,7 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3819 mbx_cmd_t *mcp = &mc; 3786 mbx_cmd_t *mcp = &mc;
3820 struct qla_hw_data *ha = vha->hw; 3787 struct qla_hw_data *ha = vha->hw;
3821 3788
3822 DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no)); 3789 ql_dbg(ql_dbg_mbx, vha, 0x10fa, "Entered %s.\n", __func__);
3823 3790
3824 memset(mcp->mb, 0 , sizeof(mcp->mb)); 3791 memset(mcp->mb, 0 , sizeof(mcp->mb));
3825 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 3792 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
@@ -3858,12 +3825,11 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3858 rval = qla2x00_mailbox_command(vha, mcp); 3825 rval = qla2x00_mailbox_command(vha, mcp);
3859 3826
3860 if (rval != QLA_SUCCESS) { 3827 if (rval != QLA_SUCCESS) {
3861 DEBUG2(printk(KERN_WARNING 3828 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
3862 "(%ld): failed=%x mb[0]=0x%x mb[1]=0x%x.\n", 3829 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3863 vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 3830 rval, mcp->mb[0], mcp->mb[1]);
3864 } else { 3831 } else {
3865 DEBUG2(printk(KERN_WARNING 3832 ql_dbg(ql_dbg_mbx, vha, 0x10fc, "Done %s.\n", __func__);
3866 "scsi(%ld): done.\n", vha->host_no));
3867 } 3833 }
3868 3834
3869 /* Copy mailbox information */ 3835 /* Copy mailbox information */
@@ -3872,14 +3838,14 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3872} 3838}
3873 3839
3874int 3840int
3875qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic) 3841qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
3876{ 3842{
3877 int rval; 3843 int rval;
3878 mbx_cmd_t mc; 3844 mbx_cmd_t mc;
3879 mbx_cmd_t *mcp = &mc; 3845 mbx_cmd_t *mcp = &mc;
3880 3846
3881 DEBUG16(printk("%s(%ld): enable_diag=%d entered.\n", __func__, 3847 ql_dbg(ql_dbg_mbx, vha, 0x10fd,
3882 ha->host_no, enable_diagnostic)); 3848 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
3883 3849
3884 mcp->mb[0] = MBC_ISP84XX_RESET; 3850 mcp->mb[0] = MBC_ISP84XX_RESET;
3885 mcp->mb[1] = enable_diagnostic; 3851 mcp->mb[1] = enable_diagnostic;
@@ -3887,13 +3853,12 @@ qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic)
3887 mcp->in_mb = MBX_1|MBX_0; 3853 mcp->in_mb = MBX_1|MBX_0;
3888 mcp->tov = MBX_TOV_SECONDS; 3854 mcp->tov = MBX_TOV_SECONDS;
3889 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 3855 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
3890 rval = qla2x00_mailbox_command(ha, mcp); 3856 rval = qla2x00_mailbox_command(vha, mcp);
3891 3857
3892 if (rval != QLA_SUCCESS) 3858 if (rval != QLA_SUCCESS)
3893 DEBUG16(printk("%s(%ld): failed=%x.\n", __func__, ha->host_no, 3859 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
3894 rval));
3895 else 3860 else
3896 DEBUG16(printk("%s(%ld): done.\n", __func__, ha->host_no)); 3861 ql_dbg(ql_dbg_mbx, vha, 0x10ff, "Done %s.\n", __func__);
3897 3862
3898 return rval; 3863 return rval;
3899} 3864}
@@ -3905,11 +3870,11 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3905 mbx_cmd_t mc; 3870 mbx_cmd_t mc;
3906 mbx_cmd_t *mcp = &mc; 3871 mbx_cmd_t *mcp = &mc;
3907 3872
3873 ql_dbg(ql_dbg_mbx, vha, 0x1100, "Entered %s.\n", __func__);
3874
3908 if (!IS_FWI2_CAPABLE(vha->hw)) 3875 if (!IS_FWI2_CAPABLE(vha->hw))
3909 return QLA_FUNCTION_FAILED; 3876 return QLA_FUNCTION_FAILED;
3910 3877
3911 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3912
3913 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; 3878 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
3914 mcp->mb[1] = LSW(risc_addr); 3879 mcp->mb[1] = LSW(risc_addr);
3915 mcp->mb[2] = LSW(data); 3880 mcp->mb[2] = LSW(data);
@@ -3921,10 +3886,10 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3921 mcp->flags = 0; 3886 mcp->flags = 0;
3922 rval = qla2x00_mailbox_command(vha, mcp); 3887 rval = qla2x00_mailbox_command(vha, mcp);
3923 if (rval != QLA_SUCCESS) { 3888 if (rval != QLA_SUCCESS) {
3924 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 3889 ql_dbg(ql_dbg_mbx, vha, 0x1101,
3925 vha->host_no, rval, mcp->mb[0])); 3890 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3926 } else { 3891 } else {
3927 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3892 ql_dbg(ql_dbg_mbx, vha, 0x1102, "Done %s.\n", __func__);
3928 } 3893 }
3929 3894
3930 return rval; 3895 return rval;
@@ -3941,8 +3906,7 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
3941 3906
3942 rval = QLA_SUCCESS; 3907 rval = QLA_SUCCESS;
3943 3908
3944 DEBUG11(qla_printk(KERN_INFO, ha, 3909 ql_dbg(ql_dbg_mbx, vha, 0x1103, "Entered %s.\n", __func__);
3945 "%s(%ld): entered.\n", __func__, vha->host_no));
3946 3910
3947 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 3911 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
3948 3912
@@ -3982,11 +3946,10 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
3982 rval = QLA_FUNCTION_FAILED; 3946 rval = QLA_FUNCTION_FAILED;
3983 3947
3984 if (rval != QLA_SUCCESS) { 3948 if (rval != QLA_SUCCESS) {
3985 DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n", 3949 ql_dbg(ql_dbg_mbx, vha, 0x1104,
3986 __func__, vha->host_no, rval, mb[0])); 3950 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
3987 } else { 3951 } else {
3988 DEBUG11(printk(KERN_INFO 3952 ql_dbg(ql_dbg_mbx, vha, 0x1105, "Done %s.\n", __func__);
3989 "%s(%ld): done.\n", __func__, vha->host_no));
3990 } 3953 }
3991 3954
3992 return rval; 3955 return rval;
@@ -3999,12 +3962,11 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
3999 mbx_cmd_t *mcp = &mc; 3962 mbx_cmd_t *mcp = &mc;
4000 struct qla_hw_data *ha = vha->hw; 3963 struct qla_hw_data *ha = vha->hw;
4001 3964
3965 ql_dbg(ql_dbg_mbx, vha, 0x1106, "Entered %s.\n", __func__);
3966
4002 if (!IS_FWI2_CAPABLE(ha)) 3967 if (!IS_FWI2_CAPABLE(ha))
4003 return QLA_FUNCTION_FAILED; 3968 return QLA_FUNCTION_FAILED;
4004 3969
4005 DEBUG11(qla_printk(KERN_INFO, ha,
4006 "%s(%ld): entered.\n", __func__, vha->host_no));
4007
4008 mcp->mb[0] = MBC_DATA_RATE; 3970 mcp->mb[0] = MBC_DATA_RATE;
4009 mcp->mb[1] = 0; 3971 mcp->mb[1] = 0;
4010 mcp->out_mb = MBX_1|MBX_0; 3972 mcp->out_mb = MBX_1|MBX_0;
@@ -4013,11 +3975,10 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
4013 mcp->flags = 0; 3975 mcp->flags = 0;
4014 rval = qla2x00_mailbox_command(vha, mcp); 3976 rval = qla2x00_mailbox_command(vha, mcp);
4015 if (rval != QLA_SUCCESS) { 3977 if (rval != QLA_SUCCESS) {
4016 DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n", 3978 ql_dbg(ql_dbg_mbx, vha, 0x1107,
4017 __func__, vha->host_no, rval, mcp->mb[0])); 3979 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4018 } else { 3980 } else {
4019 DEBUG11(printk(KERN_INFO 3981 ql_dbg(ql_dbg_mbx, vha, 0x1108, "Done %s.\n", __func__);
4020 "%s(%ld): done.\n", __func__, vha->host_no));
4021 if (mcp->mb[1] != 0x7) 3982 if (mcp->mb[1] != 0x7)
4022 ha->link_data_rate = mcp->mb[1]; 3983 ha->link_data_rate = mcp->mb[1];
4023 } 3984 }
@@ -4033,8 +3994,7 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4033 mbx_cmd_t *mcp = &mc; 3994 mbx_cmd_t *mcp = &mc;
4034 struct qla_hw_data *ha = vha->hw; 3995 struct qla_hw_data *ha = vha->hw;
4035 3996
4036 DEBUG11(printk(KERN_INFO 3997 ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__);
4037 "%s(%ld): entered.\n", __func__, vha->host_no));
4038 3998
4039 if (!IS_QLA81XX(ha)) 3999 if (!IS_QLA81XX(ha))
4040 return QLA_FUNCTION_FAILED; 4000 return QLA_FUNCTION_FAILED;
@@ -4047,15 +4007,13 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4047 rval = qla2x00_mailbox_command(vha, mcp); 4007 rval = qla2x00_mailbox_command(vha, mcp);
4048 4008
4049 if (rval != QLA_SUCCESS) { 4009 if (rval != QLA_SUCCESS) {
4050 DEBUG2_3_11(printk(KERN_WARNING 4010 ql_dbg(ql_dbg_mbx, vha, 0x110a,
4051 "%s(%ld): failed=%x (%x).\n", __func__, 4011 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4052 vha->host_no, rval, mcp->mb[0]));
4053 } else { 4012 } else {
4054 /* Copy all bits to preserve original value */ 4013 /* Copy all bits to preserve original value */
4055 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); 4014 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
4056 4015
4057 DEBUG11(printk(KERN_INFO 4016 ql_dbg(ql_dbg_mbx, vha, 0x110b, "Done %s.\n", __func__);
4058 "%s(%ld): done.\n", __func__, vha->host_no));
4059 } 4017 }
4060 return rval; 4018 return rval;
4061} 4019}
@@ -4067,8 +4025,7 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4067 mbx_cmd_t mc; 4025 mbx_cmd_t mc;
4068 mbx_cmd_t *mcp = &mc; 4026 mbx_cmd_t *mcp = &mc;
4069 4027
4070 DEBUG11(printk(KERN_INFO 4028 ql_dbg(ql_dbg_mbx, vha, 0x110c, "Entered %s.\n", __func__);
4071 "%s(%ld): entered.\n", __func__, vha->host_no));
4072 4029
4073 mcp->mb[0] = MBC_SET_PORT_CONFIG; 4030 mcp->mb[0] = MBC_SET_PORT_CONFIG;
4074 /* Copy all bits to preserve original setting */ 4031 /* Copy all bits to preserve original setting */
@@ -4080,12 +4037,10 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4080 rval = qla2x00_mailbox_command(vha, mcp); 4037 rval = qla2x00_mailbox_command(vha, mcp);
4081 4038
4082 if (rval != QLA_SUCCESS) { 4039 if (rval != QLA_SUCCESS) {
4083 DEBUG2_3_11(printk(KERN_WARNING 4040 ql_dbg(ql_dbg_mbx, vha, 0x110d,
4084 "%s(%ld): failed=%x (%x).\n", __func__, 4041 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4085 vha->host_no, rval, mcp->mb[0]));
4086 } else 4042 } else
4087 DEBUG11(printk(KERN_INFO 4043 ql_dbg(ql_dbg_mbx, vha, 0x110e, "Done %s.\n", __func__);
4088 "%s(%ld): done.\n", __func__, vha->host_no));
4089 4044
4090 return rval; 4045 return rval;
4091} 4046}
@@ -4100,12 +4055,11 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
4100 mbx_cmd_t *mcp = &mc; 4055 mbx_cmd_t *mcp = &mc;
4101 struct qla_hw_data *ha = vha->hw; 4056 struct qla_hw_data *ha = vha->hw;
4102 4057
4058 ql_dbg(ql_dbg_mbx, vha, 0x110f, "Entered %s.\n", __func__);
4059
4103 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 4060 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
4104 return QLA_FUNCTION_FAILED; 4061 return QLA_FUNCTION_FAILED;
4105 4062
4106 DEBUG11(printk(KERN_INFO
4107 "%s(%ld): entered.\n", __func__, vha->host_no));
4108
4109 mcp->mb[0] = MBC_PORT_PARAMS; 4063 mcp->mb[0] = MBC_PORT_PARAMS;
4110 mcp->mb[1] = loop_id; 4064 mcp->mb[1] = loop_id;
4111 if (ha->flags.fcp_prio_enabled) 4065 if (ha->flags.fcp_prio_enabled)
@@ -4127,12 +4081,9 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
4127 } 4081 }
4128 4082
4129 if (rval != QLA_SUCCESS) { 4083 if (rval != QLA_SUCCESS) {
4130 DEBUG2_3_11(printk(KERN_WARNING 4084 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
4131 "%s(%ld): failed=%x.\n", __func__,
4132 vha->host_no, rval));
4133 } else { 4085 } else {
4134 DEBUG11(printk(KERN_INFO 4086 ql_dbg(ql_dbg_mbx, vha, 0x10cc, "Done %s.\n", __func__);
4135 "%s(%ld): done.\n", __func__, vha->host_no));
4136 } 4087 }
4137 4088
4138 return rval; 4089 return rval;
@@ -4145,13 +4096,12 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4145 uint8_t byte; 4096 uint8_t byte;
4146 struct qla_hw_data *ha = vha->hw; 4097 struct qla_hw_data *ha = vha->hw;
4147 4098
4148 DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no)); 4099 ql_dbg(ql_dbg_mbx, vha, 0x10ca, "Entered %s.\n", __func__);
4149 4100
4150 /* Integer part */ 4101 /* Integer part */
4151 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0); 4102 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0);
4152 if (rval != QLA_SUCCESS) { 4103 if (rval != QLA_SUCCESS) {
4153 DEBUG2_3_11(printk(KERN_WARNING 4104 ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval);
4154 "%s(%ld): failed=%x.\n", __func__, vha->host_no, rval));
4155 ha->flags.thermal_supported = 0; 4105 ha->flags.thermal_supported = 0;
4156 goto fail; 4106 goto fail;
4157 } 4107 }
@@ -4160,14 +4110,13 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4160 /* Fraction part */ 4110 /* Fraction part */
4161 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1, BIT_13|BIT_0); 4111 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1, BIT_13|BIT_0);
4162 if (rval != QLA_SUCCESS) { 4112 if (rval != QLA_SUCCESS) {
4163 DEBUG2_3_11(printk(KERN_WARNING 4113 ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval);
4164 "%s(%ld): failed=%x.\n", __func__, vha->host_no, rval));
4165 ha->flags.thermal_supported = 0; 4114 ha->flags.thermal_supported = 0;
4166 goto fail; 4115 goto fail;
4167 } 4116 }
4168 *frac = (byte >> 6) * 25; 4117 *frac = (byte >> 6) * 25;
4169 4118
4170 DEBUG11(printk(KERN_INFO "%s(%ld): done.\n", __func__, vha->host_no)); 4119 ql_dbg(ql_dbg_mbx, vha, 0x1018, "Done %s.\n", __func__);
4171fail: 4120fail:
4172 return rval; 4121 return rval;
4173} 4122}
@@ -4180,12 +4129,11 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
4180 mbx_cmd_t mc; 4129 mbx_cmd_t mc;
4181 mbx_cmd_t *mcp = &mc; 4130 mbx_cmd_t *mcp = &mc;
4182 4131
4132 ql_dbg(ql_dbg_mbx, vha, 0x1017, "Entered %s.\n", __func__);
4133
4183 if (!IS_FWI2_CAPABLE(ha)) 4134 if (!IS_FWI2_CAPABLE(ha))
4184 return QLA_FUNCTION_FAILED; 4135 return QLA_FUNCTION_FAILED;
4185 4136
4186 DEBUG11(qla_printk(KERN_INFO, ha,
4187 "%s(%ld): entered.\n", __func__, vha->host_no));
4188
4189 memset(mcp, 0, sizeof(mbx_cmd_t)); 4137 memset(mcp, 0, sizeof(mbx_cmd_t));
4190 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 4138 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
4191 mcp->mb[1] = 1; 4139 mcp->mb[1] = 1;
@@ -4197,12 +4145,10 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
4197 4145
4198 rval = qla2x00_mailbox_command(vha, mcp); 4146 rval = qla2x00_mailbox_command(vha, mcp);
4199 if (rval != QLA_SUCCESS) { 4147 if (rval != QLA_SUCCESS) {
4200 DEBUG2_3_11(qla_printk(KERN_WARNING, ha, 4148 ql_dbg(ql_dbg_mbx, vha, 0x1016,
4201 "%s(%ld): failed=%x mb[0]=%x.\n", __func__, 4149 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4202 vha->host_no, rval, mcp->mb[0]));
4203 } else { 4150 } else {
4204 DEBUG11(qla_printk(KERN_INFO, ha, 4151 ql_dbg(ql_dbg_mbx, vha, 0x100e, "Done %s.\n", __func__);
4205 "%s(%ld): done.\n", __func__, vha->host_no));
4206 } 4152 }
4207 4153
4208 return rval; 4154 return rval;
@@ -4216,12 +4162,11 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4216 mbx_cmd_t mc; 4162 mbx_cmd_t mc;
4217 mbx_cmd_t *mcp = &mc; 4163 mbx_cmd_t *mcp = &mc;
4218 4164
4165 ql_dbg(ql_dbg_mbx, vha, 0x100d, "Entered %s.\n", __func__);
4166
4219 if (!IS_QLA82XX(ha)) 4167 if (!IS_QLA82XX(ha))
4220 return QLA_FUNCTION_FAILED; 4168 return QLA_FUNCTION_FAILED;
4221 4169
4222 DEBUG11(qla_printk(KERN_INFO, ha,
4223 "%s(%ld): entered.\n", __func__, vha->host_no));
4224
4225 memset(mcp, 0, sizeof(mbx_cmd_t)); 4170 memset(mcp, 0, sizeof(mbx_cmd_t));
4226 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 4171 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
4227 mcp->mb[1] = 0; 4172 mcp->mb[1] = 0;
@@ -4233,12 +4178,10 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4233 4178
4234 rval = qla2x00_mailbox_command(vha, mcp); 4179 rval = qla2x00_mailbox_command(vha, mcp);
4235 if (rval != QLA_SUCCESS) { 4180 if (rval != QLA_SUCCESS) {
4236 DEBUG2_3_11(qla_printk(KERN_WARNING, ha, 4181 ql_dbg(ql_dbg_mbx, vha, 0x100c,
4237 "%s(%ld): failed=%x mb[0]=%x.\n", __func__, 4182 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4238 vha->host_no, rval, mcp->mb[0]));
4239 } else { 4183 } else {
4240 DEBUG11(qla_printk(KERN_INFO, ha, 4184 ql_dbg(ql_dbg_mbx, vha, 0x100b, "Done %s.\n", __func__);
4241 "%s(%ld): done.\n", __func__, vha->host_no));
4242 } 4185 }
4243 4186
4244 return rval; 4187 return rval;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 5e343919acad..c706ed370000 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -36,8 +36,9 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
36 mutex_lock(&ha->vport_lock); 36 mutex_lock(&ha->vport_lock);
37 vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1); 37 vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
38 if (vp_id > ha->max_npiv_vports) { 38 if (vp_id > ha->max_npiv_vports) {
39 DEBUG15(printk ("vp_id %d is bigger than max-supported %d.\n", 39 ql_dbg(ql_dbg_vport, vha, 0xa000,
40 vp_id, ha->max_npiv_vports)); 40 "vp_id %d is bigger than max-supported %d.\n",
41 vp_id, ha->max_npiv_vports);
41 mutex_unlock(&ha->vport_lock); 42 mutex_unlock(&ha->vport_lock);
42 return vp_id; 43 return vp_id;
43 } 44 }
@@ -131,9 +132,9 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
131 fc_port_t *fcport; 132 fc_port_t *fcport;
132 133
133 list_for_each_entry(fcport, &vha->vp_fcports, list) { 134 list_for_each_entry(fcport, &vha->vp_fcports, list) {
134 DEBUG15(printk("scsi(%ld): Marking port dead, " 135 ql_dbg(ql_dbg_vport, vha, 0xa001,
135 "loop_id=0x%04x :%x\n", 136 "Marking port dead, loop_id=0x%04x : %x.\n",
136 vha->host_no, fcport->loop_id, fcport->vp_idx)); 137 fcport->loop_id, fcport->vp_idx);
137 138
138 qla2x00_mark_device_lost(vha, fcport, 0, 0); 139 qla2x00_mark_device_lost(vha, fcport, 0, 0);
139 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); 140 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
@@ -187,13 +188,13 @@ qla24xx_enable_vp(scsi_qla_host_t *vha)
187 goto enable_failed; 188 goto enable_failed;
188 } 189 }
189 190
190 DEBUG15(qla_printk(KERN_INFO, ha, 191 ql_dbg(ql_dbg_taskm, vha, 0x801a,
191 "Virtual port with id: %d - Enabled\n", vha->vp_idx)); 192 "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
192 return 0; 193 return 0;
193 194
194enable_failed: 195enable_failed:
195 DEBUG15(qla_printk(KERN_INFO, ha, 196 ql_dbg(ql_dbg_taskm, vha, 0x801b,
196 "Virtual port with id: %d - Disabled\n", vha->vp_idx)); 197 "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
197 return 1; 198 return 1;
198} 199}
199 200
@@ -205,12 +206,12 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
205 206
206 fc_vport = vha->fc_vport; 207 fc_vport = vha->fc_vport;
207 208
208 DEBUG15(printk("scsi(%ld): %s: change request #3 for this host.\n", 209 ql_dbg(ql_dbg_vport, vha, 0xa002,
209 vha->host_no, __func__)); 210 "%s: change request #3.\n", __func__);
210 ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx); 211 ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
211 if (ret != QLA_SUCCESS) { 212 if (ret != QLA_SUCCESS) {
212 DEBUG15(qla_printk(KERN_ERR, vha->hw, "Failed to enable " 213 ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
213 "receiving of RSCN requests: 0x%x\n", ret)); 214 "receiving of RSCN requests: 0x%x.\n", ret);
214 return; 215 return;
215 } else { 216 } else {
216 /* Corresponds to SCR enabled */ 217 /* Corresponds to SCR enabled */
@@ -248,9 +249,9 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
248 case MBA_CHG_IN_CONNECTION: 249 case MBA_CHG_IN_CONNECTION:
249 case MBA_PORT_UPDATE: 250 case MBA_PORT_UPDATE:
250 case MBA_RSCN_UPDATE: 251 case MBA_RSCN_UPDATE:
251 DEBUG15(printk("scsi(%ld)%s: Async_event for" 252 ql_dbg(ql_dbg_async, vha, 0x5024,
252 " VP[%d], mb = 0x%x, vha=%p\n", 253 "Async_event for VP[%d], mb=0x%x vha=%p.\n",
253 vha->host_no, __func__, i, *mb, vha)); 254 i, *mb, vha);
254 qla2x00_async_event(vha, rsp, mb); 255 qla2x00_async_event(vha, rsp, mb);
255 break; 256 break;
256 } 257 }
@@ -286,37 +287,49 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
286 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) 287 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
287 qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); 288 qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
288 289
289 DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n", 290 ql_dbg(ql_dbg_taskm, vha, 0x801d,
290 vha->host_no, vha->vp_idx)); 291 "Scheduling enable of Vport %d.\n", vha->vp_idx);
291 return qla24xx_enable_vp(vha); 292 return qla24xx_enable_vp(vha);
292} 293}
293 294
294static int 295static int
295qla2x00_do_dpc_vp(scsi_qla_host_t *vha) 296qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
296{ 297{
298 ql_dbg(ql_dbg_dpc, vha, 0x4012,
299 "Entering %s.\n", __func__);
300 ql_dbg(ql_dbg_dpc, vha, 0x4013,
301 "vp_flags: 0x%lx.\n", vha->vp_flags);
302
297 qla2x00_do_work(vha); 303 qla2x00_do_work(vha);
298 304
299 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { 305 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
300 /* VP acquired. complete port configuration */ 306 /* VP acquired. complete port configuration */
307 ql_dbg(ql_dbg_dpc, vha, 0x4014,
308 "Configure VP scheduled.\n");
301 qla24xx_configure_vp(vha); 309 qla24xx_configure_vp(vha);
310 ql_dbg(ql_dbg_dpc, vha, 0x4015,
311 "Configure VP end.\n");
302 return 0; 312 return 0;
303 } 313 }
304 314
305 if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) { 315 if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
316 ql_dbg(ql_dbg_dpc, vha, 0x4016,
317 "FCPort update scheduled.\n");
306 qla2x00_update_fcports(vha); 318 qla2x00_update_fcports(vha);
307 clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags); 319 clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
320 ql_dbg(ql_dbg_dpc, vha, 0x4017,
321 "FCPort update end.\n");
308 } 322 }
309 323
310 if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) && 324 if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
311 !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) && 325 !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
312 atomic_read(&vha->loop_state) != LOOP_DOWN) { 326 atomic_read(&vha->loop_state) != LOOP_DOWN) {
313 327
314 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n", 328 ql_dbg(ql_dbg_dpc, vha, 0x4018,
315 vha->host_no)); 329 "Relogin needed scheduled.\n");
316 qla2x00_relogin(vha); 330 qla2x00_relogin(vha);
317 331 ql_dbg(ql_dbg_dpc, vha, 0x4019,
318 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n", 332 "Relogin needed end.\n");
319 vha->host_no));
320 } 333 }
321 334
322 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) && 335 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
@@ -326,11 +339,17 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
326 339
327 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 340 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
328 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) { 341 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
342 ql_dbg(ql_dbg_dpc, vha, 0x401a,
343 "Loop resync scheduled.\n");
329 qla2x00_loop_resync(vha); 344 qla2x00_loop_resync(vha);
330 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags); 345 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
346 ql_dbg(ql_dbg_dpc, vha, 0x401b,
347 "Loop resync end.\n");
331 } 348 }
332 } 349 }
333 350
351 ql_dbg(ql_dbg_dpc, vha, 0x401c,
352 "Exiting %s.\n", __func__);
334 return 0; 353 return 0;
335} 354}
336 355
@@ -396,9 +415,10 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
396 415
397 /* Check up max-npiv-supports */ 416 /* Check up max-npiv-supports */
398 if (ha->num_vhosts > ha->max_npiv_vports) { 417 if (ha->num_vhosts > ha->max_npiv_vports) {
399 DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than " 418 ql_dbg(ql_dbg_vport, vha, 0xa004,
400 "max_npv_vports %ud.\n", base_vha->host_no, 419 "num_vhosts %ud is bigger "
401 ha->num_vhosts, ha->max_npiv_vports)); 420 "than max_npiv_vports %ud.\n",
421 ha->num_vhosts, ha->max_npiv_vports);
402 return VPCERR_UNSUPPORTED; 422 return VPCERR_UNSUPPORTED;
403 } 423 }
404 return 0; 424 return 0;
@@ -415,7 +435,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
415 435
416 vha = qla2x00_create_host(sht, ha); 436 vha = qla2x00_create_host(sht, ha);
417 if (!vha) { 437 if (!vha) {
418 DEBUG(printk("qla2xxx: scsi_host_alloc() failed for vport\n")); 438 ql_log(ql_log_warn, vha, 0xa005,
439 "scsi_host_alloc() failed for vport.\n");
419 return(NULL); 440 return(NULL);
420 } 441 }
421 442
@@ -429,8 +450,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
429 vha->device_flags = 0; 450 vha->device_flags = 0;
430 vha->vp_idx = qla24xx_allocate_vp_id(vha); 451 vha->vp_idx = qla24xx_allocate_vp_id(vha);
431 if (vha->vp_idx > ha->max_npiv_vports) { 452 if (vha->vp_idx > ha->max_npiv_vports) {
432 DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n", 453 ql_dbg(ql_dbg_vport, vha, 0xa006,
433 vha->host_no)); 454 "Couldn't allocate vp_id.\n");
434 goto create_vhost_failed; 455 goto create_vhost_failed;
435 } 456 }
436 vha->mgmt_svr_loop_id = 10 + vha->vp_idx; 457 vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
@@ -461,8 +482,9 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
461 host->max_id = MAX_TARGETS_2200; 482 host->max_id = MAX_TARGETS_2200;
462 host->transportt = qla2xxx_transport_vport_template; 483 host->transportt = qla2xxx_transport_vport_template;
463 484
464 DEBUG15(printk("DEBUG: detect vport hba %ld at address = %p\n", 485 ql_dbg(ql_dbg_vport, vha, 0xa007,
465 vha->host_no, vha)); 486 "Detect vport hba %ld at address = %p.\n",
487 vha->host_no, vha);
466 488
467 vha->flags.init_done = 1; 489 vha->flags.init_done = 1;
468 490
@@ -567,9 +589,9 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
567 if (req) { 589 if (req) {
568 ret = qla25xx_delete_req_que(vha, req); 590 ret = qla25xx_delete_req_que(vha, req);
569 if (ret != QLA_SUCCESS) { 591 if (ret != QLA_SUCCESS) {
570 qla_printk(KERN_WARNING, ha, 592 ql_log(ql_log_warn, vha, 0x00ea,
571 "Couldn't delete req que %d\n", 593 "Couldn't delete req que %d.\n",
572 req->id); 594 req->id);
573 return ret; 595 return ret;
574 } 596 }
575 } 597 }
@@ -581,9 +603,9 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
581 if (rsp) { 603 if (rsp) {
582 ret = qla25xx_delete_rsp_que(vha, rsp); 604 ret = qla25xx_delete_rsp_que(vha, rsp);
583 if (ret != QLA_SUCCESS) { 605 if (ret != QLA_SUCCESS) {
584 qla_printk(KERN_WARNING, ha, 606 ql_log(ql_log_warn, vha, 0x00eb,
585 "Couldn't delete rsp que %d\n", 607 "Couldn't delete rsp que %d.\n",
586 rsp->id); 608 rsp->id);
587 return ret; 609 return ret;
588 } 610 }
589 } 611 }
@@ -604,8 +626,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
604 626
605 req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 627 req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
606 if (req == NULL) { 628 if (req == NULL) {
607 qla_printk(KERN_WARNING, ha, "could not allocate memory" 629 ql_log(ql_log_fatal, base_vha, 0x00d9,
608 "for request que\n"); 630 "Failed to allocate memory for request queue.\n");
609 goto failed; 631 goto failed;
610 } 632 }
611 633
@@ -614,8 +636,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
614 (req->length + 1) * sizeof(request_t), 636 (req->length + 1) * sizeof(request_t),
615 &req->dma, GFP_KERNEL); 637 &req->dma, GFP_KERNEL);
616 if (req->ring == NULL) { 638 if (req->ring == NULL) {
617 qla_printk(KERN_WARNING, ha, 639 ql_log(ql_log_fatal, base_vha, 0x00da,
618 "Memory Allocation failed - request_ring\n"); 640 "Failed to allocte memory for request_ring.\n");
619 goto que_failed; 641 goto que_failed;
620 } 642 }
621 643
@@ -623,8 +645,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
623 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues); 645 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
624 if (que_id >= ha->max_req_queues) { 646 if (que_id >= ha->max_req_queues) {
625 mutex_unlock(&ha->vport_lock); 647 mutex_unlock(&ha->vport_lock);
626 qla_printk(KERN_INFO, ha, "No resources to create " 648 ql_log(ql_log_warn, base_vha, 0x00db,
627 "additional request queue\n"); 649 "No resources to create additional request queue.\n");
628 goto que_failed; 650 goto que_failed;
629 } 651 }
630 set_bit(que_id, ha->req_qid_map); 652 set_bit(que_id, ha->req_qid_map);
@@ -633,6 +655,12 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
633 req->vp_idx = vp_idx; 655 req->vp_idx = vp_idx;
634 req->qos = qos; 656 req->qos = qos;
635 657
658 ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
659 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
660 que_id, req->rid, req->vp_idx, req->qos);
661 ql_dbg(ql_dbg_init, base_vha, 0x00dc,
662 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
663 que_id, req->rid, req->vp_idx, req->qos);
636 if (rsp_que < 0) 664 if (rsp_que < 0)
637 req->rsp = NULL; 665 req->rsp = NULL;
638 else 666 else
@@ -645,6 +673,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
645 options |= BIT_5; 673 options |= BIT_5;
646 req->options = options; 674 req->options = options;
647 675
676 ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
677 "options=0x%x.\n", req->options);
678 ql_dbg(ql_dbg_init, base_vha, 0x00dd,
679 "options=0x%x.\n", req->options);
648 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) 680 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
649 req->outstanding_cmds[cnt] = NULL; 681 req->outstanding_cmds[cnt] = NULL;
650 req->current_outstanding_cmd = 1; 682 req->current_outstanding_cmd = 1;
@@ -656,10 +688,21 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
656 reg = ISP_QUE_REG(ha, que_id); 688 reg = ISP_QUE_REG(ha, que_id);
657 req->max_q_depth = ha->req_q_map[0]->max_q_depth; 689 req->max_q_depth = ha->req_q_map[0]->max_q_depth;
658 mutex_unlock(&ha->vport_lock); 690 mutex_unlock(&ha->vport_lock);
691 ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
692 "ring_ptr=%p ring_index=%d, "
693 "cnt=%d id=%d max_q_depth=%d.\n",
694 req->ring_ptr, req->ring_index,
695 req->cnt, req->id, req->max_q_depth);
696 ql_dbg(ql_dbg_init, base_vha, 0x00de,
697 "ring_ptr=%p ring_index=%d, "
698 "cnt=%d id=%d max_q_depth=%d.\n",
699 req->ring_ptr, req->ring_index, req->cnt,
700 req->id, req->max_q_depth);
659 701
660 ret = qla25xx_init_req_que(base_vha, req); 702 ret = qla25xx_init_req_que(base_vha, req);
661 if (ret != QLA_SUCCESS) { 703 if (ret != QLA_SUCCESS) {
662 qla_printk(KERN_WARNING, ha, "%s failed\n", __func__); 704 ql_log(ql_log_fatal, base_vha, 0x00df,
705 "%s failed.\n", __func__);
663 mutex_lock(&ha->vport_lock); 706 mutex_lock(&ha->vport_lock);
664 clear_bit(que_id, ha->req_qid_map); 707 clear_bit(que_id, ha->req_qid_map);
665 mutex_unlock(&ha->vport_lock); 708 mutex_unlock(&ha->vport_lock);
@@ -700,8 +743,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
700 743
701 rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 744 rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
702 if (rsp == NULL) { 745 if (rsp == NULL) {
703 qla_printk(KERN_WARNING, ha, "could not allocate memory for" 746 ql_log(ql_log_warn, base_vha, 0x0066,
704 " response que\n"); 747 "Failed to allocate memory for response queue.\n");
705 goto failed; 748 goto failed;
706 } 749 }
707 750
@@ -710,8 +753,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
710 (rsp->length + 1) * sizeof(response_t), 753 (rsp->length + 1) * sizeof(response_t),
711 &rsp->dma, GFP_KERNEL); 754 &rsp->dma, GFP_KERNEL);
712 if (rsp->ring == NULL) { 755 if (rsp->ring == NULL) {
713 qla_printk(KERN_WARNING, ha, 756 ql_log(ql_log_warn, base_vha, 0x00e1,
714 "Memory Allocation failed - response_ring\n"); 757 "Failed to allocate memory for response ring.\n");
715 goto que_failed; 758 goto que_failed;
716 } 759 }
717 760
@@ -719,8 +762,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
719 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues); 762 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
720 if (que_id >= ha->max_rsp_queues) { 763 if (que_id >= ha->max_rsp_queues) {
721 mutex_unlock(&ha->vport_lock); 764 mutex_unlock(&ha->vport_lock);
722 qla_printk(KERN_INFO, ha, "No resources to create " 765 ql_log(ql_log_warn, base_vha, 0x00e2,
723 "additional response queue\n"); 766 "No resources to create additional request queue.\n");
724 goto que_failed; 767 goto que_failed;
725 } 768 }
726 set_bit(que_id, ha->rsp_qid_map); 769 set_bit(que_id, ha->rsp_qid_map);
@@ -728,12 +771,16 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
728 if (ha->flags.msix_enabled) 771 if (ha->flags.msix_enabled)
729 rsp->msix = &ha->msix_entries[que_id + 1]; 772 rsp->msix = &ha->msix_entries[que_id + 1];
730 else 773 else
731 qla_printk(KERN_WARNING, ha, "msix not enabled\n"); 774 ql_log(ql_log_warn, base_vha, 0x00e3,
775 "MSIX not enalbled.\n");
732 776
733 ha->rsp_q_map[que_id] = rsp; 777 ha->rsp_q_map[que_id] = rsp;
734 rsp->rid = rid; 778 rsp->rid = rid;
735 rsp->vp_idx = vp_idx; 779 rsp->vp_idx = vp_idx;
736 rsp->hw = ha; 780 rsp->hw = ha;
781 ql_dbg(ql_dbg_init, base_vha, 0x00e4,
782 "queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
783 que_id, rsp->rid, rsp->vp_idx, rsp->hw);
737 /* Use alternate PCI bus number */ 784 /* Use alternate PCI bus number */
738 if (MSB(rsp->rid)) 785 if (MSB(rsp->rid))
739 options |= BIT_4; 786 options |= BIT_4;
@@ -750,6 +797,14 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
750 rsp->rsp_q_in = &reg->isp25mq.rsp_q_in; 797 rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
751 rsp->rsp_q_out = &reg->isp25mq.rsp_q_out; 798 rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
752 mutex_unlock(&ha->vport_lock); 799 mutex_unlock(&ha->vport_lock);
800 ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
801 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
802 rsp->options, rsp->id, rsp->rsp_q_in,
803 rsp->rsp_q_out);
804 ql_dbg(ql_dbg_init, base_vha, 0x00e5,
805 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
806 rsp->options, rsp->id, rsp->rsp_q_in,
807 rsp->rsp_q_out);
753 808
754 ret = qla25xx_request_irq(rsp); 809 ret = qla25xx_request_irq(rsp);
755 if (ret) 810 if (ret)
@@ -757,7 +812,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
757 812
758 ret = qla25xx_init_rsp_que(base_vha, rsp); 813 ret = qla25xx_init_rsp_que(base_vha, rsp);
759 if (ret != QLA_SUCCESS) { 814 if (ret != QLA_SUCCESS) {
760 qla_printk(KERN_WARNING, ha, "%s failed\n", __func__); 815 ql_log(ql_log_fatal, base_vha, 0x00e7,
816 "%s failed.\n", __func__);
761 mutex_lock(&ha->vport_lock); 817 mutex_lock(&ha->vport_lock);
762 clear_bit(que_id, ha->rsp_qid_map); 818 clear_bit(que_id, ha->rsp_qid_map);
763 mutex_unlock(&ha->vport_lock); 819 mutex_unlock(&ha->vport_lock);
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index e1138bcc834c..5cbf33a50b14 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -348,6 +348,7 @@ static void
348qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off) 348qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
349{ 349{
350 u32 win_read; 350 u32 win_read;
351 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
351 352
352 ha->crb_win = CRB_HI(*off); 353 ha->crb_win = CRB_HI(*off);
353 writel(ha->crb_win, 354 writel(ha->crb_win,
@@ -358,9 +359,10 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
358 */ 359 */
359 win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase)); 360 win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
360 if (win_read != ha->crb_win) { 361 if (win_read != ha->crb_win) {
361 DEBUG2(qla_printk(KERN_INFO, ha, 362 ql_dbg(ql_dbg_p3p, vha, 0xb000,
362 "%s: Written crbwin (0x%x) != Read crbwin (0x%x), " 363 "%s: Written crbwin (0x%x) "
363 "off=0x%lx\n", __func__, ha->crb_win, win_read, *off)); 364 "!= Read crbwin (0x%x), off=0x%lx.\n",
365 ha->crb_win, win_read, *off);
364 } 366 }
365 *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase; 367 *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
366} 368}
@@ -368,6 +370,7 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
368static inline unsigned long 370static inline unsigned long
369qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off) 371qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
370{ 372{
373 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
371 /* See if we are currently pointing to the region we want to use next */ 374 /* See if we are currently pointing to the region we want to use next */
372 if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) { 375 if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
373 /* No need to change window. PCIX and PCIEregs are in both 376 /* No need to change window. PCIX and PCIEregs are in both
@@ -398,9 +401,10 @@ qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
398 return off; 401 return off;
399 } 402 }
400 /* strange address given */ 403 /* strange address given */
401 qla_printk(KERN_WARNING, ha, 404 ql_dbg(ql_dbg_p3p, vha, 0xb001,
402 "%s: Warning: unm_nic_pci_set_crbwindow called with" 405 "%x: Warning: unm_nic_pci_set_crbwindow "
403 " an unknown address(%llx)\n", QLA2XXX_DRIVER_NAME, off); 406 "called with an unknown address(%llx).\n",
407 QLA2XXX_DRIVER_NAME, off);
404 return off; 408 return off;
405} 409}
406 410
@@ -563,6 +567,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
563{ 567{
564 int window; 568 int window;
565 u32 win_read; 569 u32 win_read;
570 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
566 571
567 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, 572 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
568 QLA82XX_ADDR_DDR_NET_MAX)) { 573 QLA82XX_ADDR_DDR_NET_MAX)) {
@@ -574,8 +579,8 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
574 win_read = qla82xx_rd_32(ha, 579 win_read = qla82xx_rd_32(ha,
575 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE); 580 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
576 if ((win_read << 17) != window) { 581 if ((win_read << 17) != window) {
577 qla_printk(KERN_WARNING, ha, 582 ql_dbg(ql_dbg_p3p, vha, 0xb003,
578 "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n", 583 "%s: Written MNwin (0x%x) != Read MNwin (0x%x).\n",
579 __func__, window, win_read); 584 __func__, window, win_read);
580 } 585 }
581 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET; 586 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
@@ -583,7 +588,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
583 QLA82XX_ADDR_OCM0_MAX)) { 588 QLA82XX_ADDR_OCM0_MAX)) {
584 unsigned int temp1; 589 unsigned int temp1;
585 if ((addr & 0x00ff800) == 0xff800) { 590 if ((addr & 0x00ff800) == 0xff800) {
586 qla_printk(KERN_WARNING, ha, 591 ql_log(ql_log_warn, vha, 0xb004,
587 "%s: QM access not handled.\n", __func__); 592 "%s: QM access not handled.\n", __func__);
588 addr = -1UL; 593 addr = -1UL;
589 } 594 }
@@ -596,8 +601,8 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
596 temp1 = ((window & 0x1FF) << 7) | 601 temp1 = ((window & 0x1FF) << 7) |
597 ((window & 0x0FFFE0000) >> 17); 602 ((window & 0x0FFFE0000) >> 17);
598 if (win_read != temp1) { 603 if (win_read != temp1) {
599 qla_printk(KERN_WARNING, ha, 604 ql_log(ql_log_warn, vha, 0xb005,
600 "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x)\n", 605 "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x).\n",
601 __func__, temp1, win_read); 606 __func__, temp1, win_read);
602 } 607 }
603 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M; 608 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
@@ -612,8 +617,8 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
612 win_read = qla82xx_rd_32(ha, 617 win_read = qla82xx_rd_32(ha,
613 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE); 618 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
614 if (win_read != window) { 619 if (win_read != window) {
615 qla_printk(KERN_WARNING, ha, 620 ql_log(ql_log_warn, vha, 0xb006,
616 "%s: Written MSwin (0x%x) != Read MSwin (0x%x)\n", 621 "%s: Written MSwin (0x%x) != Read MSwin (0x%x).\n",
617 __func__, window, win_read); 622 __func__, window, win_read);
618 } 623 }
619 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET; 624 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
@@ -624,9 +629,9 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
624 */ 629 */
625 if ((qla82xx_pci_set_window_warning_count++ < 8) || 630 if ((qla82xx_pci_set_window_warning_count++ < 8) ||
626 (qla82xx_pci_set_window_warning_count%64 == 0)) { 631 (qla82xx_pci_set_window_warning_count%64 == 0)) {
627 qla_printk(KERN_WARNING, ha, 632 ql_log(ql_log_warn, vha, 0xb007,
628 "%s: Warning:%s Unknown address range!\n", __func__, 633 "%s: Warning:%s Unknown address range!.\n",
629 QLA2XXX_DRIVER_NAME); 634 __func__, QLA2XXX_DRIVER_NAME);
630 } 635 }
631 addr = -1UL; 636 addr = -1UL;
632 } 637 }
@@ -671,6 +676,7 @@ static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
671 uint8_t *mem_ptr = NULL; 676 uint8_t *mem_ptr = NULL;
672 unsigned long mem_base; 677 unsigned long mem_base;
673 unsigned long mem_page; 678 unsigned long mem_page;
679 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
674 680
675 write_lock_irqsave(&ha->hw_lock, flags); 681 write_lock_irqsave(&ha->hw_lock, flags);
676 682
@@ -682,9 +688,10 @@ static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
682 if ((start == -1UL) || 688 if ((start == -1UL) ||
683 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { 689 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
684 write_unlock_irqrestore(&ha->hw_lock, flags); 690 write_unlock_irqrestore(&ha->hw_lock, flags);
685 qla_printk(KERN_ERR, ha, 691 ql_log(ql_log_fatal, vha, 0xb008,
686 "%s out of bound pci memory access. " 692 "%s out of bound pci memory "
687 "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off); 693 "access, offset is 0x%llx.\n",
694 QLA2XXX_DRIVER_NAME, off);
688 return -1; 695 return -1;
689 } 696 }
690 697
@@ -741,6 +748,7 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
741 uint8_t *mem_ptr = NULL; 748 uint8_t *mem_ptr = NULL;
742 unsigned long mem_base; 749 unsigned long mem_base;
743 unsigned long mem_page; 750 unsigned long mem_page;
751 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
744 752
745 write_lock_irqsave(&ha->hw_lock, flags); 753 write_lock_irqsave(&ha->hw_lock, flags);
746 754
@@ -752,9 +760,10 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
752 if ((start == -1UL) || 760 if ((start == -1UL) ||
753 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { 761 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
754 write_unlock_irqrestore(&ha->hw_lock, flags); 762 write_unlock_irqrestore(&ha->hw_lock, flags);
755 qla_printk(KERN_ERR, ha, 763 ql_log(ql_log_fatal, vha, 0xb009,
756 "%s out of bound pci memory access. " 764 "%s out of bount memory "
757 "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off); 765 "access, offset is 0x%llx.\n",
766 QLA2XXX_DRIVER_NAME, off);
758 return -1; 767 return -1;
759 } 768 }
760 769
@@ -855,15 +864,16 @@ qla82xx_wait_rom_busy(struct qla_hw_data *ha)
855{ 864{
856 long timeout = 0; 865 long timeout = 0;
857 long done = 0 ; 866 long done = 0 ;
867 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
858 868
859 while (done == 0) { 869 while (done == 0) {
860 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); 870 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
861 done &= 4; 871 done &= 4;
862 timeout++; 872 timeout++;
863 if (timeout >= rom_max_timeout) { 873 if (timeout >= rom_max_timeout) {
864 DEBUG(qla_printk(KERN_INFO, ha, 874 ql_dbg(ql_dbg_p3p, vha, 0xb00a,
865 "%s: Timeout reached waiting for rom busy", 875 "%s: Timeout reached waiting for rom busy.\n",
866 QLA2XXX_DRIVER_NAME)); 876 QLA2XXX_DRIVER_NAME);
867 return -1; 877 return -1;
868 } 878 }
869 } 879 }
@@ -875,15 +885,16 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha)
875{ 885{
876 long timeout = 0; 886 long timeout = 0;
877 long done = 0 ; 887 long done = 0 ;
888 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
878 889
879 while (done == 0) { 890 while (done == 0) {
880 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); 891 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
881 done &= 2; 892 done &= 2;
882 timeout++; 893 timeout++;
883 if (timeout >= rom_max_timeout) { 894 if (timeout >= rom_max_timeout) {
884 DEBUG(qla_printk(KERN_INFO, ha, 895 ql_dbg(ql_dbg_p3p, vha, 0xb00b,
885 "%s: Timeout reached waiting for rom done", 896 "%s: Timeout reached waiting for rom done.\n",
886 QLA2XXX_DRIVER_NAME)); 897 QLA2XXX_DRIVER_NAME);
887 return -1; 898 return -1;
888 } 899 }
889 } 900 }
@@ -893,15 +904,16 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha)
893static int 904static int
894qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) 905qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
895{ 906{
907 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
908
896 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr); 909 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
897 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 910 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
898 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); 911 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
899 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb); 912 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
900 qla82xx_wait_rom_busy(ha); 913 qla82xx_wait_rom_busy(ha);
901 if (qla82xx_wait_rom_done(ha)) { 914 if (qla82xx_wait_rom_done(ha)) {
902 qla_printk(KERN_WARNING, ha, 915 ql_log(ql_log_fatal, vha, 0x00ba,
903 "%s: Error waiting for rom done\n", 916 "Error waiting for rom done.\n");
904 QLA2XXX_DRIVER_NAME);
905 return -1; 917 return -1;
906 } 918 }
907 /* Reset abyte_cnt and dummy_byte_cnt */ 919 /* Reset abyte_cnt and dummy_byte_cnt */
@@ -917,6 +929,7 @@ static int
917qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) 929qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
918{ 930{
919 int ret, loops = 0; 931 int ret, loops = 0;
932 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
920 933
921 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { 934 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
922 udelay(100); 935 udelay(100);
@@ -924,9 +937,8 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
924 loops++; 937 loops++;
925 } 938 }
926 if (loops >= 50000) { 939 if (loops >= 50000) {
927 qla_printk(KERN_INFO, ha, 940 ql_log(ql_log_fatal, vha, 0x00b9,
928 "%s: qla82xx_rom_lock failed\n", 941 "Failed to aquire SEM2 lock.\n");
929 QLA2XXX_DRIVER_NAME);
930 return -1; 942 return -1;
931 } 943 }
932 ret = qla82xx_do_rom_fast_read(ha, addr, valp); 944 ret = qla82xx_do_rom_fast_read(ha, addr, valp);
@@ -937,11 +949,12 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
937static int 949static int
938qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val) 950qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
939{ 951{
952 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
940 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR); 953 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
941 qla82xx_wait_rom_busy(ha); 954 qla82xx_wait_rom_busy(ha);
942 if (qla82xx_wait_rom_done(ha)) { 955 if (qla82xx_wait_rom_done(ha)) {
943 qla_printk(KERN_WARNING, ha, 956 ql_log(ql_log_warn, vha, 0xb00c,
944 "Error waiting for rom done\n"); 957 "Error waiting for rom done.\n");
945 return -1; 958 return -1;
946 } 959 }
947 *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA); 960 *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
@@ -955,6 +968,7 @@ qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
955 uint32_t done = 1 ; 968 uint32_t done = 1 ;
956 uint32_t val; 969 uint32_t val;
957 int ret = 0; 970 int ret = 0;
971 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
958 972
959 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); 973 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
960 while ((done != 0) && (ret == 0)) { 974 while ((done != 0) && (ret == 0)) {
@@ -964,8 +978,8 @@ qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
964 udelay(10); 978 udelay(10);
965 cond_resched(); 979 cond_resched();
966 if (timeout >= 50000) { 980 if (timeout >= 50000) {
967 qla_printk(KERN_WARNING, ha, 981 ql_log(ql_log_warn, vha, 0xb00d,
968 "Timeout reached waiting for write finish"); 982 "Timeout reached waiting for write finish.\n");
969 return -1; 983 return -1;
970 } 984 }
971 } 985 }
@@ -992,13 +1006,14 @@ qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
992static int 1006static int
993qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val) 1007qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
994{ 1008{
1009 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
995 if (qla82xx_flash_set_write_enable(ha)) 1010 if (qla82xx_flash_set_write_enable(ha))
996 return -1; 1011 return -1;
997 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val); 1012 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
998 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1); 1013 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1);
999 if (qla82xx_wait_rom_done(ha)) { 1014 if (qla82xx_wait_rom_done(ha)) {
1000 qla_printk(KERN_WARNING, ha, 1015 ql_log(ql_log_warn, vha, 0xb00e,
1001 "Error waiting for rom done\n"); 1016 "Error waiting for rom done.\n");
1002 return -1; 1017 return -1;
1003 } 1018 }
1004 return qla82xx_flash_wait_write_finish(ha); 1019 return qla82xx_flash_wait_write_finish(ha);
@@ -1007,10 +1022,11 @@ qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
1007static int 1022static int
1008qla82xx_write_disable_flash(struct qla_hw_data *ha) 1023qla82xx_write_disable_flash(struct qla_hw_data *ha)
1009{ 1024{
1025 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1010 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI); 1026 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
1011 if (qla82xx_wait_rom_done(ha)) { 1027 if (qla82xx_wait_rom_done(ha)) {
1012 qla_printk(KERN_WARNING, ha, 1028 ql_log(ql_log_warn, vha, 0xb00f,
1013 "Error waiting for rom done\n"); 1029 "Error waiting for rom done.\n");
1014 return -1; 1030 return -1;
1015 } 1031 }
1016 return 0; 1032 return 0;
@@ -1020,13 +1036,16 @@ static int
1020ql82xx_rom_lock_d(struct qla_hw_data *ha) 1036ql82xx_rom_lock_d(struct qla_hw_data *ha)
1021{ 1037{
1022 int loops = 0; 1038 int loops = 0;
1039 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1040
1023 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { 1041 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
1024 udelay(100); 1042 udelay(100);
1025 cond_resched(); 1043 cond_resched();
1026 loops++; 1044 loops++;
1027 } 1045 }
1028 if (loops >= 50000) { 1046 if (loops >= 50000) {
1029 qla_printk(KERN_WARNING, ha, "ROM lock failed\n"); 1047 ql_log(ql_log_warn, vha, 0xb010,
1048 "ROM lock failed.\n");
1030 return -1; 1049 return -1;
1031 } 1050 }
1032 return 0;; 1051 return 0;;
@@ -1037,10 +1056,12 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
1037 uint32_t data) 1056 uint32_t data)
1038{ 1057{
1039 int ret = 0; 1058 int ret = 0;
1059 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1040 1060
1041 ret = ql82xx_rom_lock_d(ha); 1061 ret = ql82xx_rom_lock_d(ha);
1042 if (ret < 0) { 1062 if (ret < 0) {
1043 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); 1063 ql_log(ql_log_warn, vha, 0xb011,
1064 "ROM lock failed.\n");
1044 return ret; 1065 return ret;
1045 } 1066 }
1046 1067
@@ -1053,8 +1074,8 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
1053 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP); 1074 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP);
1054 qla82xx_wait_rom_busy(ha); 1075 qla82xx_wait_rom_busy(ha);
1055 if (qla82xx_wait_rom_done(ha)) { 1076 if (qla82xx_wait_rom_done(ha)) {
1056 qla_printk(KERN_WARNING, ha, 1077 ql_log(ql_log_warn, vha, 0xb012,
1057 "Error waiting for rom done\n"); 1078 "Error waiting for rom done.\n");
1058 ret = -1; 1079 ret = -1;
1059 goto done_write; 1080 goto done_write;
1060 } 1081 }
@@ -1159,8 +1180,8 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1159 */ 1180 */
1160 if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL || 1181 if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
1161 qla82xx_rom_fast_read(ha, 4, &n) != 0) { 1182 qla82xx_rom_fast_read(ha, 4, &n) != 0) {
1162 qla_printk(KERN_WARNING, ha, 1183 ql_log(ql_log_fatal, vha, 0x006e,
1163 "[ERROR] Reading crb_init area: n: %08x\n", n); 1184 "Error Reading crb_init area: n: %08x.\n", n);
1164 return -1; 1185 return -1;
1165 } 1186 }
1166 1187
@@ -1172,20 +1193,18 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1172 1193
1173 /* number of addr/value pair should not exceed 1024 enteries */ 1194 /* number of addr/value pair should not exceed 1024 enteries */
1174 if (n >= 1024) { 1195 if (n >= 1024) {
1175 qla_printk(KERN_WARNING, ha, 1196 ql_log(ql_log_fatal, vha, 0x0071,
1176 "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n", 1197 "Card flash not initialized:n=0x%x.\n", n);
1177 QLA2XXX_DRIVER_NAME, __func__, n);
1178 return -1; 1198 return -1;
1179 } 1199 }
1180 1200
1181 qla_printk(KERN_INFO, ha, 1201 ql_log(ql_log_info, vha, 0x0072,
1182 "%s: %d CRB init values found in ROM.\n", QLA2XXX_DRIVER_NAME, n); 1202 "%d CRB init values found in ROM.\n", n);
1183 1203
1184 buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL); 1204 buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
1185 if (buf == NULL) { 1205 if (buf == NULL) {
1186 qla_printk(KERN_WARNING, ha, 1206 ql_log(ql_log_fatal, vha, 0x010c,
1187 "%s: [ERROR] Unable to malloc memory.\n", 1207 "Unable to allocate memory.\n");
1188 QLA2XXX_DRIVER_NAME);
1189 return -1; 1208 return -1;
1190 } 1209 }
1191 1210
@@ -1236,9 +1255,8 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1236 continue; 1255 continue;
1237 1256
1238 if (off == ADDR_ERROR) { 1257 if (off == ADDR_ERROR) {
1239 qla_printk(KERN_WARNING, ha, 1258 ql_log(ql_log_fatal, vha, 0x0116,
1240 "%s: [ERROR] Unknown addr: 0x%08lx\n", 1259 "Unknow addr: 0x%08lx.\n", buf[i].addr);
1241 QLA2XXX_DRIVER_NAME, buf[i].addr);
1242 continue; 1260 continue;
1243 } 1261 }
1244 1262
@@ -1370,7 +1388,7 @@ qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1370 if (j >= MAX_CTL_CHECK) { 1388 if (j >= MAX_CTL_CHECK) {
1371 if (printk_ratelimit()) 1389 if (printk_ratelimit())
1372 dev_err(&ha->pdev->dev, 1390 dev_err(&ha->pdev->dev,
1373 "failed to write through agent\n"); 1391 "failed to write through agent.\n");
1374 ret = -1; 1392 ret = -1;
1375 break; 1393 break;
1376 } 1394 }
@@ -1460,7 +1478,7 @@ qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
1460 if (j >= MAX_CTL_CHECK) { 1478 if (j >= MAX_CTL_CHECK) {
1461 if (printk_ratelimit()) 1479 if (printk_ratelimit())
1462 dev_err(&ha->pdev->dev, 1480 dev_err(&ha->pdev->dev,
1463 "failed to read through agent\n"); 1481 "failed to read through agent.\n");
1464 break; 1482 break;
1465 } 1483 }
1466 1484
@@ -1633,17 +1651,15 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
1633 uint32_t len = 0; 1651 uint32_t len = 0;
1634 1652
1635 if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) { 1653 if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) {
1636 qla_printk(KERN_WARNING, ha, 1654 ql_log_pci(ql_log_fatal, ha->pdev, 0x000c,
1637 "Failed to reserve selected regions (%s)\n", 1655 "Failed to reserver selected regions.\n");
1638 pci_name(ha->pdev));
1639 goto iospace_error_exit; 1656 goto iospace_error_exit;
1640 } 1657 }
1641 1658
1642 /* Use MMIO operations for all accesses. */ 1659 /* Use MMIO operations for all accesses. */
1643 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { 1660 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
1644 qla_printk(KERN_ERR, ha, 1661 ql_log_pci(ql_log_fatal, ha->pdev, 0x000d,
1645 "region #0 not an MMIO resource (%s), aborting\n", 1662 "Region #0 not an MMIO resource, aborting.\n");
1646 pci_name(ha->pdev));
1647 goto iospace_error_exit; 1663 goto iospace_error_exit;
1648 } 1664 }
1649 1665
@@ -1651,9 +1667,8 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
1651 ha->nx_pcibase = 1667 ha->nx_pcibase =
1652 (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len); 1668 (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
1653 if (!ha->nx_pcibase) { 1669 if (!ha->nx_pcibase) {
1654 qla_printk(KERN_ERR, ha, 1670 ql_log_pci(ql_log_fatal, ha->pdev, 0x000e,
1655 "cannot remap pcibase MMIO (%s), aborting\n", 1671 "Cannot remap pcibase MMIO, aborting.\n");
1656 pci_name(ha->pdev));
1657 pci_release_regions(ha->pdev); 1672 pci_release_regions(ha->pdev);
1658 goto iospace_error_exit; 1673 goto iospace_error_exit;
1659 } 1674 }
@@ -1667,9 +1682,8 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
1667 (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) + 1682 (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
1668 (ha->pdev->devfn << 12)), 4); 1683 (ha->pdev->devfn << 12)), 4);
1669 if (!ha->nxdb_wr_ptr) { 1684 if (!ha->nxdb_wr_ptr) {
1670 qla_printk(KERN_ERR, ha, 1685 ql_log_pci(ql_log_fatal, ha->pdev, 0x000f,
1671 "cannot remap MMIO (%s), aborting\n", 1686 "Cannot remap MMIO, aborting.\n");
1672 pci_name(ha->pdev));
1673 pci_release_regions(ha->pdev); 1687 pci_release_regions(ha->pdev);
1674 goto iospace_error_exit; 1688 goto iospace_error_exit;
1675 } 1689 }
@@ -1687,6 +1701,16 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
1687 1701
1688 ha->max_req_queues = ha->max_rsp_queues = 1; 1702 ha->max_req_queues = ha->max_rsp_queues = 1;
1689 ha->msix_count = ha->max_rsp_queues + 1; 1703 ha->msix_count = ha->max_rsp_queues + 1;
1704 ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006,
1705 "nx_pci_base=%p iobase=%p "
1706 "max_req_queues=%d msix_count=%d.\n",
1707 ha->nx_pcibase, ha->iobase,
1708 ha->max_req_queues, ha->msix_count);
1709 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010,
1710 "nx_pci_base=%p iobase=%p "
1711 "max_req_queues=%d msix_count=%d.\n",
1712 ha->nx_pcibase, ha->iobase,
1713 ha->max_req_queues, ha->msix_count);
1690 return 0; 1714 return 0;
1691 1715
1692iospace_error_exit: 1716iospace_error_exit:
@@ -1712,6 +1736,9 @@ qla82xx_pci_config(scsi_qla_host_t *vha)
1712 pci_set_master(ha->pdev); 1736 pci_set_master(ha->pdev);
1713 ret = pci_set_mwi(ha->pdev); 1737 ret = pci_set_mwi(ha->pdev);
1714 ha->chip_revision = ha->pdev->revision; 1738 ha->chip_revision = ha->pdev->revision;
1739 ql_dbg(ql_dbg_init, vha, 0x0043,
1740 "Chip revision:%ld.\n",
1741 ha->chip_revision);
1715 return 0; 1742 return 0;
1716} 1743}
1717 1744
@@ -1877,6 +1904,7 @@ qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
1877{ 1904{
1878 u32 val = 0; 1905 u32 val = 0;
1879 int retries = 60; 1906 int retries = 60;
1907 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1880 1908
1881 do { 1909 do {
1882 read_lock(&ha->hw_lock); 1910 read_lock(&ha->hw_lock);
@@ -1892,15 +1920,15 @@ qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
1892 default: 1920 default:
1893 break; 1921 break;
1894 } 1922 }
1895 qla_printk(KERN_WARNING, ha, 1923 ql_log(ql_log_info, vha, 0x00a8,
1896 "CRB_CMDPEG_STATE: 0x%x and retries: 0x%x\n", 1924 "CRB_CMDPEG_STATE: 0x%x and retries:0x%x.\n",
1897 val, retries); 1925 val, retries);
1898 1926
1899 msleep(500); 1927 msleep(500);
1900 1928
1901 } while (--retries); 1929 } while (--retries);
1902 1930
1903 qla_printk(KERN_INFO, ha, 1931 ql_log(ql_log_fatal, vha, 0x00a9,
1904 "Cmd Peg initialization failed: 0x%x.\n", val); 1932 "Cmd Peg initialization failed: 0x%x.\n", val);
1905 1933
1906 val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE); 1934 val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
@@ -1915,6 +1943,7 @@ qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
1915{ 1943{
1916 u32 val = 0; 1944 u32 val = 0;
1917 int retries = 60; 1945 int retries = 60;
1946 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1918 1947
1919 do { 1948 do {
1920 read_lock(&ha->hw_lock); 1949 read_lock(&ha->hw_lock);
@@ -1930,17 +1959,16 @@ qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
1930 default: 1959 default:
1931 break; 1960 break;
1932 } 1961 }
1933 1962 ql_log(ql_log_info, vha, 0x00ab,
1934 qla_printk(KERN_WARNING, ha, 1963 "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x.\n",
1935 "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x\n", 1964 val, retries);
1936 val, retries);
1937 1965
1938 msleep(500); 1966 msleep(500);
1939 1967
1940 } while (--retries); 1968 } while (--retries);
1941 1969
1942 qla_printk(KERN_INFO, ha, 1970 ql_log(ql_log_fatal, vha, 0x00ac,
1943 "Rcv Peg initialization failed: 0x%x.\n", val); 1971 "Rcv Peg initializatin failed: 0x%x.\n", val);
1944 read_lock(&ha->hw_lock); 1972 read_lock(&ha->hw_lock);
1945 qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED); 1973 qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
1946 read_unlock(&ha->hw_lock); 1974 read_unlock(&ha->hw_lock);
@@ -1989,13 +2017,11 @@ qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1989 } 2017 }
1990 2018
1991 if (ha->mcp) { 2019 if (ha->mcp) {
1992 DEBUG3_11(printk(KERN_INFO "%s(%ld): " 2020 ql_dbg(ql_dbg_async, vha, 0x5052,
1993 "Got mailbox completion. cmd=%x.\n", 2021 "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
1994 __func__, vha->host_no, ha->mcp->mb[0]));
1995 } else { 2022 } else {
1996 qla_printk(KERN_INFO, ha, 2023 ql_dbg(ql_dbg_async, vha, 0x5053,
1997 "%s(%ld): MBX pointer ERROR!\n", 2024 "MBX pointer ERROR.\n");
1998 __func__, vha->host_no);
1999 } 2025 }
2000} 2026}
2001 2027
@@ -2019,13 +2045,13 @@ qla82xx_intr_handler(int irq, void *dev_id)
2019 int status = 0, status1 = 0; 2045 int status = 0, status1 = 0;
2020 unsigned long flags; 2046 unsigned long flags;
2021 unsigned long iter; 2047 unsigned long iter;
2022 uint32_t stat; 2048 uint32_t stat = 0;
2023 uint16_t mb[4]; 2049 uint16_t mb[4];
2024 2050
2025 rsp = (struct rsp_que *) dev_id; 2051 rsp = (struct rsp_que *) dev_id;
2026 if (!rsp) { 2052 if (!rsp) {
2027 printk(KERN_INFO 2053 printk(KERN_INFO
2028 "%s(): NULL response queue pointer\n", __func__); 2054 "%s(): NULL response queue pointer.\n", __func__);
2029 return IRQ_NONE; 2055 return IRQ_NONE;
2030 } 2056 }
2031 ha = rsp->hw; 2057 ha = rsp->hw;
@@ -2075,9 +2101,9 @@ qla82xx_intr_handler(int irq, void *dev_id)
2075 qla24xx_process_response_queue(vha, rsp); 2101 qla24xx_process_response_queue(vha, rsp);
2076 break; 2102 break;
2077 default: 2103 default:
2078 DEBUG2(printk("scsi(%ld): " 2104 ql_dbg(ql_dbg_async, vha, 0x5054,
2079 " Unrecognized interrupt type (%d).\n", 2105 "Unrecognized interrupt type (%d).\n",
2080 vha->host_no, stat & 0xff)); 2106 stat & 0xff);
2081 break; 2107 break;
2082 } 2108 }
2083 } 2109 }
@@ -2089,8 +2115,8 @@ qla82xx_intr_handler(int irq, void *dev_id)
2089 2115
2090#ifdef QL_DEBUG_LEVEL_17 2116#ifdef QL_DEBUG_LEVEL_17
2091 if (!irq && ha->flags.eeh_busy) 2117 if (!irq && ha->flags.eeh_busy)
2092 qla_printk(KERN_WARNING, ha, 2118 ql_log(ql_log_warn, vha, 0x503d,
2093 "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n", 2119 "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
2094 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); 2120 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2095#endif 2121#endif
2096 2122
@@ -2111,13 +2137,13 @@ qla82xx_msix_default(int irq, void *dev_id)
2111 struct device_reg_82xx __iomem *reg; 2137 struct device_reg_82xx __iomem *reg;
2112 int status = 0; 2138 int status = 0;
2113 unsigned long flags; 2139 unsigned long flags;
2114 uint32_t stat; 2140 uint32_t stat = 0;
2115 uint16_t mb[4]; 2141 uint16_t mb[4];
2116 2142
2117 rsp = (struct rsp_que *) dev_id; 2143 rsp = (struct rsp_que *) dev_id;
2118 if (!rsp) { 2144 if (!rsp) {
2119 printk(KERN_INFO 2145 printk(KERN_INFO
2120 "%s(): NULL response queue pointer\n", __func__); 2146 "%s(): NULL response queue pointer.\n", __func__);
2121 return IRQ_NONE; 2147 return IRQ_NONE;
2122 } 2148 }
2123 ha = rsp->hw; 2149 ha = rsp->hw;
@@ -2149,9 +2175,9 @@ qla82xx_msix_default(int irq, void *dev_id)
2149 qla24xx_process_response_queue(vha, rsp); 2175 qla24xx_process_response_queue(vha, rsp);
2150 break; 2176 break;
2151 default: 2177 default:
2152 DEBUG2(printk("scsi(%ld): " 2178 ql_dbg(ql_dbg_async, vha, 0x5041,
2153 " Unrecognized interrupt type (%d).\n", 2179 "Unrecognized interrupt type (%d).\n",
2154 vha->host_no, stat & 0xff)); 2180 stat & 0xff);
2155 break; 2181 break;
2156 } 2182 }
2157 } 2183 }
@@ -2162,9 +2188,9 @@ qla82xx_msix_default(int irq, void *dev_id)
2162 2188
2163#ifdef QL_DEBUG_LEVEL_17 2189#ifdef QL_DEBUG_LEVEL_17
2164 if (!irq && ha->flags.eeh_busy) 2190 if (!irq && ha->flags.eeh_busy)
2165 qla_printk(KERN_WARNING, ha, 2191 ql_log(ql_log_warn, vha, 0x5044,
2166 "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n", 2192 "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
2167 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); 2193 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2168#endif 2194#endif
2169 2195
2170 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2196 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
@@ -2186,7 +2212,7 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
2186 rsp = (struct rsp_que *) dev_id; 2212 rsp = (struct rsp_que *) dev_id;
2187 if (!rsp) { 2213 if (!rsp) {
2188 printk(KERN_INFO 2214 printk(KERN_INFO
2189 "%s(): NULL response queue pointer\n", __func__); 2215 "%s(): NULL response queue pointer.\n", __func__);
2190 return IRQ_NONE; 2216 return IRQ_NONE;
2191 } 2217 }
2192 2218
@@ -2215,7 +2241,7 @@ qla82xx_poll(int irq, void *dev_id)
2215 rsp = (struct rsp_que *) dev_id; 2241 rsp = (struct rsp_que *) dev_id;
2216 if (!rsp) { 2242 if (!rsp) {
2217 printk(KERN_INFO 2243 printk(KERN_INFO
2218 "%s(): NULL response queue pointer\n", __func__); 2244 "%s(): NULL response queue pointer.\n", __func__);
2219 return; 2245 return;
2220 } 2246 }
2221 ha = rsp->hw; 2247 ha = rsp->hw;
@@ -2245,9 +2271,9 @@ qla82xx_poll(int irq, void *dev_id)
2245 qla24xx_process_response_queue(vha, rsp); 2271 qla24xx_process_response_queue(vha, rsp);
2246 break; 2272 break;
2247 default: 2273 default:
2248 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 2274 ql_dbg(ql_dbg_p3p, vha, 0xb013,
2249 "(%d).\n", 2275 "Unrecognized interrupt type (%d).\n",
2250 vha->host_no, stat & 0xff)); 2276 stat * 0xff);
2251 break; 2277 break;
2252 } 2278 }
2253 } 2279 }
@@ -2347,9 +2373,8 @@ qla82xx_set_rst_ready(struct qla_hw_data *ha)
2347 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2373 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2348 } 2374 }
2349 drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); 2375 drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2350 qla_printk(KERN_INFO, ha, 2376 ql_log(ql_log_info, vha, 0x00bb,
2351 "%s(%ld):drv_state = 0x%x\n", 2377 "drv_state = 0x%x.\n", drv_state);
2352 __func__, vha->host_no, drv_state);
2353 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); 2378 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2354} 2379}
2355 2380
@@ -2392,8 +2417,8 @@ qla82xx_load_fw(scsi_qla_host_t *vha)
2392 struct qla_hw_data *ha = vha->hw; 2417 struct qla_hw_data *ha = vha->hw;
2393 2418
2394 if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) { 2419 if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) {
2395 qla_printk(KERN_ERR, ha, 2420 ql_log(ql_log_fatal, vha, 0x009f,
2396 "%s: Error during CRB Initialization\n", __func__); 2421 "Error during CRB initialization.\n");
2397 return QLA_FUNCTION_FAILED; 2422 return QLA_FUNCTION_FAILED;
2398 } 2423 }
2399 udelay(500); 2424 udelay(500);
@@ -2411,27 +2436,27 @@ qla82xx_load_fw(scsi_qla_host_t *vha)
2411 if (ql2xfwloadbin == 2) 2436 if (ql2xfwloadbin == 2)
2412 goto try_blob_fw; 2437 goto try_blob_fw;
2413 2438
2414 qla_printk(KERN_INFO, ha, 2439 ql_log(ql_log_info, vha, 0x00a0,
2415 "Attempting to load firmware from flash\n"); 2440 "Attempting to load firmware from flash.\n");
2416 2441
2417 if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) { 2442 if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
2418 qla_printk(KERN_ERR, ha, 2443 ql_log(ql_log_info, vha, 0x00a1,
2419 "Firmware loaded successfully from flash\n"); 2444 "Firmware loaded successully from flash.\n");
2420 return QLA_SUCCESS; 2445 return QLA_SUCCESS;
2421 } else { 2446 } else {
2422 qla_printk(KERN_ERR, ha, 2447 ql_log(ql_log_warn, vha, 0x0108,
2423 "Firmware load from flash failed\n"); 2448 "Firmware load from flash failed.\n");
2424 } 2449 }
2425 2450
2426try_blob_fw: 2451try_blob_fw:
2427 qla_printk(KERN_INFO, ha, 2452 ql_log(ql_log_info, vha, 0x00a2,
2428 "Attempting to load firmware from blob\n"); 2453 "Attempting to load firmware from blob.\n");
2429 2454
2430 /* Load firmware blob. */ 2455 /* Load firmware blob. */
2431 blob = ha->hablob = qla2x00_request_firmware(vha); 2456 blob = ha->hablob = qla2x00_request_firmware(vha);
2432 if (!blob) { 2457 if (!blob) {
2433 qla_printk(KERN_ERR, ha, 2458 ql_log(ql_log_fatal, vha, 0x00a3,
2434 "Firmware image not present.\n"); 2459 "Firmware image not preset.\n");
2435 goto fw_load_failed; 2460 goto fw_load_failed;
2436 } 2461 }
2437 2462
@@ -2441,20 +2466,19 @@ try_blob_fw:
2441 /* Fallback to URI format */ 2466 /* Fallback to URI format */
2442 if (qla82xx_validate_firmware_blob(vha, 2467 if (qla82xx_validate_firmware_blob(vha,
2443 QLA82XX_UNIFIED_ROMIMAGE)) { 2468 QLA82XX_UNIFIED_ROMIMAGE)) {
2444 qla_printk(KERN_ERR, ha, 2469 ql_log(ql_log_fatal, vha, 0x00a4,
2445 "No valid firmware image found!!!"); 2470 "No valid firmware image found.\n");
2446 return QLA_FUNCTION_FAILED; 2471 return QLA_FUNCTION_FAILED;
2447 } 2472 }
2448 } 2473 }
2449 2474
2450 if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) { 2475 if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) {
2451 qla_printk(KERN_ERR, ha, 2476 ql_log(ql_log_info, vha, 0x00a5,
2452 "%s: Firmware loaded successfully " 2477 "Firmware loaded successfully from binary blob.\n");
2453 " from binary blob\n", __func__);
2454 return QLA_SUCCESS; 2478 return QLA_SUCCESS;
2455 } else { 2479 } else {
2456 qla_printk(KERN_ERR, ha, 2480 ql_log(ql_log_fatal, vha, 0x00a6,
2457 "Firmware load failed from binary blob\n"); 2481 "Firmware load failed for binary blob.\n");
2458 blob->fw = NULL; 2482 blob->fw = NULL;
2459 blob = NULL; 2483 blob = NULL;
2460 goto fw_load_failed; 2484 goto fw_load_failed;
@@ -2486,15 +2510,15 @@ qla82xx_start_firmware(scsi_qla_host_t *vha)
2486 qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0); 2510 qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
2487 2511
2488 if (qla82xx_load_fw(vha) != QLA_SUCCESS) { 2512 if (qla82xx_load_fw(vha) != QLA_SUCCESS) {
2489 qla_printk(KERN_INFO, ha, 2513 ql_log(ql_log_fatal, vha, 0x00a7,
2490 "%s: Error trying to start fw!\n", __func__); 2514 "Error trying to start fw.\n");
2491 return QLA_FUNCTION_FAILED; 2515 return QLA_FUNCTION_FAILED;
2492 } 2516 }
2493 2517
2494 /* Handshake with the card before we register the devices. */ 2518 /* Handshake with the card before we register the devices. */
2495 if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) { 2519 if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) {
2496 qla_printk(KERN_INFO, ha, 2520 ql_log(ql_log_fatal, vha, 0x00aa,
2497 "%s: Error during card handshake!\n", __func__); 2521 "Error during card handshake.\n");
2498 return QLA_FUNCTION_FAILED; 2522 return QLA_FUNCTION_FAILED;
2499 } 2523 }
2500 2524
@@ -2663,8 +2687,11 @@ qla82xx_start_scsi(srb_t *sp)
2663 /* Send marker if required */ 2687 /* Send marker if required */
2664 if (vha->marker_needed != 0) { 2688 if (vha->marker_needed != 0) {
2665 if (qla2x00_marker(vha, req, 2689 if (qla2x00_marker(vha, req,
2666 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) 2690 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2691 ql_log(ql_log_warn, vha, 0x300c,
2692 "qla2x00_marker failed for cmd=%p.\n", cmd);
2667 return QLA_FUNCTION_FAILED; 2693 return QLA_FUNCTION_FAILED;
2694 }
2668 vha->marker_needed = 0; 2695 vha->marker_needed = 0;
2669 } 2696 }
2670 2697
@@ -2701,8 +2728,13 @@ qla82xx_start_scsi(srb_t *sp)
2701 uint16_t i; 2728 uint16_t i;
2702 2729
2703 more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds); 2730 more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
2704 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) 2731 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2732 ql_dbg(ql_dbg_io, vha, 0x300d,
2733 "Num of DSD list %d is than %d for cmd=%p.\n",
2734 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2735 cmd);
2705 goto queuing_error; 2736 goto queuing_error;
2737 }
2706 2738
2707 if (more_dsd_lists <= ha->gbl_dsd_avail) 2739 if (more_dsd_lists <= ha->gbl_dsd_avail)
2708 goto sufficient_dsds; 2740 goto sufficient_dsds;
@@ -2711,13 +2743,20 @@ qla82xx_start_scsi(srb_t *sp)
2711 2743
2712 for (i = 0; i < more_dsd_lists; i++) { 2744 for (i = 0; i < more_dsd_lists; i++) {
2713 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 2745 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2714 if (!dsd_ptr) 2746 if (!dsd_ptr) {
2747 ql_log(ql_log_fatal, vha, 0x300e,
2748 "Failed to allocate memory for dsd_dma "
2749 "for cmd=%p.\n", cmd);
2715 goto queuing_error; 2750 goto queuing_error;
2751 }
2716 2752
2717 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, 2753 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2718 GFP_ATOMIC, &dsd_ptr->dsd_list_dma); 2754 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2719 if (!dsd_ptr->dsd_addr) { 2755 if (!dsd_ptr->dsd_addr) {
2720 kfree(dsd_ptr); 2756 kfree(dsd_ptr);
2757 ql_log(ql_log_fatal, vha, 0x300f,
2758 "Failed to allocate memory for dsd_addr "
2759 "for cmd=%p.\n", cmd);
2721 goto queuing_error; 2760 goto queuing_error;
2722 } 2761 }
2723 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list); 2762 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
@@ -2742,17 +2781,16 @@ sufficient_dsds:
2742 2781
2743 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); 2782 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2744 if (!sp->ctx) { 2783 if (!sp->ctx) {
2745 DEBUG(printk(KERN_INFO 2784 ql_log(ql_log_fatal, vha, 0x3010,
2746 "%s(%ld): failed to allocate" 2785 "Failed to allocate ctx for cmd=%p.\n", cmd);
2747 " ctx.\n", __func__, vha->host_no));
2748 goto queuing_error; 2786 goto queuing_error;
2749 } 2787 }
2750 memset(ctx, 0, sizeof(struct ct6_dsd)); 2788 memset(ctx, 0, sizeof(struct ct6_dsd));
2751 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool, 2789 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2752 GFP_ATOMIC, &ctx->fcp_cmnd_dma); 2790 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2753 if (!ctx->fcp_cmnd) { 2791 if (!ctx->fcp_cmnd) {
2754 DEBUG2_3(printk("%s(%ld): failed to allocate" 2792 ql_log(ql_log_fatal, vha, 0x3011,
2755 " fcp_cmnd.\n", __func__, vha->host_no)); 2793 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2756 goto queuing_error_fcp_cmnd; 2794 goto queuing_error_fcp_cmnd;
2757 } 2795 }
2758 2796
@@ -2766,6 +2804,9 @@ sufficient_dsds:
2766 /* SCSI command bigger than 16 bytes must be 2804 /* SCSI command bigger than 16 bytes must be
2767 * multiple of 4 2805 * multiple of 4
2768 */ 2806 */
2807 ql_log(ql_log_warn, vha, 0x3012,
2808 "scsi cmd len %d not multiple of 4 "
2809 "for cmd=%p.\n", cmd->cmd_len, cmd);
2769 goto queuing_error_fcp_cmnd; 2810 goto queuing_error_fcp_cmnd;
2770 } 2811 }
2771 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; 2812 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
@@ -2845,7 +2886,7 @@ sufficient_dsds:
2845 cmd_pkt->entry_status = (uint8_t) rsp->id; 2886 cmd_pkt->entry_status = (uint8_t) rsp->id;
2846 } else { 2887 } else {
2847 struct cmd_type_7 *cmd_pkt; 2888 struct cmd_type_7 *cmd_pkt;
2848 req_cnt = qla24xx_calc_iocbs(tot_dsds); 2889 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2849 if (req->cnt < (req_cnt + 2)) { 2890 if (req->cnt < (req_cnt + 2)) {
2850 cnt = (uint16_t)RD_REG_DWORD_RELAXED( 2891 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2851 &reg->req_q_out[0]); 2892 &reg->req_q_out[0]);
@@ -2979,8 +3020,8 @@ qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
2979 /* Dword reads to flash. */ 3020 /* Dword reads to flash. */
2980 for (i = 0; i < length/4; i++, faddr += 4) { 3021 for (i = 0; i < length/4; i++, faddr += 4) {
2981 if (qla82xx_rom_fast_read(ha, faddr, &val)) { 3022 if (qla82xx_rom_fast_read(ha, faddr, &val)) {
2982 qla_printk(KERN_WARNING, ha, 3023 ql_log(ql_log_warn, vha, 0x0106,
2983 "Do ROM fast read failed\n"); 3024 "Do ROM fast read failed.\n");
2984 goto done_read; 3025 goto done_read;
2985 } 3026 }
2986 dwptr[i] = __constant_cpu_to_le32(val); 3027 dwptr[i] = __constant_cpu_to_le32(val);
@@ -2994,10 +3035,12 @@ qla82xx_unprotect_flash(struct qla_hw_data *ha)
2994{ 3035{
2995 int ret; 3036 int ret;
2996 uint32_t val; 3037 uint32_t val;
3038 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2997 3039
2998 ret = ql82xx_rom_lock_d(ha); 3040 ret = ql82xx_rom_lock_d(ha);
2999 if (ret < 0) { 3041 if (ret < 0) {
3000 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); 3042 ql_log(ql_log_warn, vha, 0xb014,
3043 "ROM Lock failed.\n");
3001 return ret; 3044 return ret;
3002 } 3045 }
3003 3046
@@ -3013,7 +3056,8 @@ qla82xx_unprotect_flash(struct qla_hw_data *ha)
3013 } 3056 }
3014 3057
3015 if (qla82xx_write_disable_flash(ha) != 0) 3058 if (qla82xx_write_disable_flash(ha) != 0)
3016 qla_printk(KERN_WARNING, ha, "Write disable failed\n"); 3059 ql_log(ql_log_warn, vha, 0xb015,
3060 "Write disable failed.\n");
3017 3061
3018done_unprotect: 3062done_unprotect:
3019 qla82xx_rom_unlock(ha); 3063 qla82xx_rom_unlock(ha);
@@ -3025,10 +3069,12 @@ qla82xx_protect_flash(struct qla_hw_data *ha)
3025{ 3069{
3026 int ret; 3070 int ret;
3027 uint32_t val; 3071 uint32_t val;
3072 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3028 3073
3029 ret = ql82xx_rom_lock_d(ha); 3074 ret = ql82xx_rom_lock_d(ha);
3030 if (ret < 0) { 3075 if (ret < 0) {
3031 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); 3076 ql_log(ql_log_warn, vha, 0xb016,
3077 "ROM Lock failed.\n");
3032 return ret; 3078 return ret;
3033 } 3079 }
3034 3080
@@ -3040,10 +3086,12 @@ qla82xx_protect_flash(struct qla_hw_data *ha)
3040 /* LOCK all sectors */ 3086 /* LOCK all sectors */
3041 ret = qla82xx_write_status_reg(ha, val); 3087 ret = qla82xx_write_status_reg(ha, val);
3042 if (ret < 0) 3088 if (ret < 0)
3043 qla_printk(KERN_WARNING, ha, "Write status register failed\n"); 3089 ql_log(ql_log_warn, vha, 0xb017,
3090 "Write status register failed.\n");
3044 3091
3045 if (qla82xx_write_disable_flash(ha) != 0) 3092 if (qla82xx_write_disable_flash(ha) != 0)
3046 qla_printk(KERN_WARNING, ha, "Write disable failed\n"); 3093 ql_log(ql_log_warn, vha, 0xb018,
3094 "Write disable failed.\n");
3047done_protect: 3095done_protect:
3048 qla82xx_rom_unlock(ha); 3096 qla82xx_rom_unlock(ha);
3049 return ret; 3097 return ret;
@@ -3053,10 +3101,12 @@ static int
3053qla82xx_erase_sector(struct qla_hw_data *ha, int addr) 3101qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
3054{ 3102{
3055 int ret = 0; 3103 int ret = 0;
3104 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3056 3105
3057 ret = ql82xx_rom_lock_d(ha); 3106 ret = ql82xx_rom_lock_d(ha);
3058 if (ret < 0) { 3107 if (ret < 0) {
3059 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); 3108 ql_log(ql_log_warn, vha, 0xb019,
3109 "ROM Lock failed.\n");
3060 return ret; 3110 return ret;
3061 } 3111 }
3062 3112
@@ -3066,8 +3116,8 @@ qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
3066 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE); 3116 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE);
3067 3117
3068 if (qla82xx_wait_rom_done(ha)) { 3118 if (qla82xx_wait_rom_done(ha)) {
3069 qla_printk(KERN_WARNING, ha, 3119 ql_log(ql_log_warn, vha, 0xb01a,
3070 "Error waiting for rom done\n"); 3120 "Error waiting for rom done.\n");
3071 ret = -1; 3121 ret = -1;
3072 goto done; 3122 goto done;
3073 } 3123 }
@@ -3110,10 +3160,10 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3110 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 3160 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
3111 &optrom_dma, GFP_KERNEL); 3161 &optrom_dma, GFP_KERNEL);
3112 if (!optrom) { 3162 if (!optrom) {
3113 qla_printk(KERN_DEBUG, ha, 3163 ql_log(ql_log_warn, vha, 0xb01b,
3114 "Unable to allocate memory for optrom " 3164 "Unable to allocate memory "
3115 "burst write (%x KB).\n", 3165 "for optron burst write (%x KB).\n",
3116 OPTROM_BURST_SIZE / 1024); 3166 OPTROM_BURST_SIZE / 1024);
3117 } 3167 }
3118 } 3168 }
3119 3169
@@ -3122,8 +3172,8 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3122 3172
3123 ret = qla82xx_unprotect_flash(ha); 3173 ret = qla82xx_unprotect_flash(ha);
3124 if (ret) { 3174 if (ret) {
3125 qla_printk(KERN_WARNING, ha, 3175 ql_log(ql_log_warn, vha, 0xb01c,
3126 "Unable to unprotect flash for update.\n"); 3176 "Unable to unprotect flash for update.\n");
3127 goto write_done; 3177 goto write_done;
3128 } 3178 }
3129 3179
@@ -3133,9 +3183,9 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3133 3183
3134 ret = qla82xx_erase_sector(ha, faddr); 3184 ret = qla82xx_erase_sector(ha, faddr);
3135 if (ret) { 3185 if (ret) {
3136 DEBUG9(qla_printk(KERN_ERR, ha, 3186 ql_log(ql_log_warn, vha, 0xb01d,
3137 "Unable to erase sector: " 3187 "Unable to erase sector: address=%x.\n",
3138 "address=%x.\n", faddr)); 3188 faddr);
3139 break; 3189 break;
3140 } 3190 }
3141 } 3191 }
@@ -3149,12 +3199,12 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3149 (ha->flash_data_off | faddr), 3199 (ha->flash_data_off | faddr),
3150 OPTROM_BURST_DWORDS); 3200 OPTROM_BURST_DWORDS);
3151 if (ret != QLA_SUCCESS) { 3201 if (ret != QLA_SUCCESS) {
3152 qla_printk(KERN_WARNING, ha, 3202 ql_log(ql_log_warn, vha, 0xb01e,
3153 "Unable to burst-write optrom segment " 3203 "Unable to burst-write optrom segment "
3154 "(%x/%x/%llx).\n", ret, 3204 "(%x/%x/%llx).\n", ret,
3155 (ha->flash_data_off | faddr), 3205 (ha->flash_data_off | faddr),
3156 (unsigned long long)optrom_dma); 3206 (unsigned long long)optrom_dma);
3157 qla_printk(KERN_WARNING, ha, 3207 ql_log(ql_log_warn, vha, 0xb01f,
3158 "Reverting to slow-write.\n"); 3208 "Reverting to slow-write.\n");
3159 3209
3160 dma_free_coherent(&ha->pdev->dev, 3210 dma_free_coherent(&ha->pdev->dev,
@@ -3171,16 +3221,16 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3171 ret = qla82xx_write_flash_dword(ha, faddr, 3221 ret = qla82xx_write_flash_dword(ha, faddr,
3172 cpu_to_le32(*dwptr)); 3222 cpu_to_le32(*dwptr));
3173 if (ret) { 3223 if (ret) {
3174 DEBUG9(printk(KERN_DEBUG "%s(%ld) Unable to program" 3224 ql_dbg(ql_dbg_p3p, vha, 0xb020,
3175 "flash address=%x data=%x.\n", __func__, 3225 "Unable to program flash address=%x data=%x.\n",
3176 ha->host_no, faddr, *dwptr)); 3226 faddr, *dwptr);
3177 break; 3227 break;
3178 } 3228 }
3179 } 3229 }
3180 3230
3181 ret = qla82xx_protect_flash(ha); 3231 ret = qla82xx_protect_flash(ha);
3182 if (ret) 3232 if (ret)
3183 qla_printk(KERN_WARNING, ha, 3233 ql_log(ql_log_warn, vha, 0xb021,
3184 "Unable to protect flash after update.\n"); 3234 "Unable to protect flash after update.\n");
3185write_done: 3235write_done:
3186 if (optrom) 3236 if (optrom)
@@ -3244,9 +3294,12 @@ qla82xx_start_iocbs(srb_t *sp)
3244 3294
3245void qla82xx_rom_lock_recovery(struct qla_hw_data *ha) 3295void qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
3246{ 3296{
3297 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3298
3247 if (qla82xx_rom_lock(ha)) 3299 if (qla82xx_rom_lock(ha))
3248 /* Someone else is holding the lock. */ 3300 /* Someone else is holding the lock. */
3249 qla_printk(KERN_INFO, ha, "Resetting rom_lock\n"); 3301 ql_log(ql_log_info, vha, 0xb022,
3302 "Resetting rom_lock.\n");
3250 3303
3251 /* 3304 /*
3252 * Either we got the lock, or someone 3305 * Either we got the lock, or someone
@@ -3313,7 +3366,8 @@ qla82xx_device_bootstrap(scsi_qla_host_t *vha)
3313 3366
3314dev_initialize: 3367dev_initialize:
3315 /* set to DEV_INITIALIZING */ 3368 /* set to DEV_INITIALIZING */
3316 qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n"); 3369 ql_log(ql_log_info, vha, 0x009e,
3370 "HW State: INITIALIZING.\n");
3317 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING); 3371 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING);
3318 3372
3319 /* Driver that sets device state to initializating sets IDC version */ 3373 /* Driver that sets device state to initializating sets IDC version */
@@ -3324,14 +3378,16 @@ dev_initialize:
3324 qla82xx_idc_lock(ha); 3378 qla82xx_idc_lock(ha);
3325 3379
3326 if (rval != QLA_SUCCESS) { 3380 if (rval != QLA_SUCCESS) {
3327 qla_printk(KERN_INFO, ha, "HW State: FAILED\n"); 3381 ql_log(ql_log_fatal, vha, 0x00ad,
3382 "HW State: FAILED.\n");
3328 qla82xx_clear_drv_active(ha); 3383 qla82xx_clear_drv_active(ha);
3329 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED); 3384 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED);
3330 return rval; 3385 return rval;
3331 } 3386 }
3332 3387
3333dev_ready: 3388dev_ready:
3334 qla_printk(KERN_INFO, ha, "HW State: READY\n"); 3389 ql_log(ql_log_info, vha, 0x00ae,
3390 "HW State: READY.\n");
3335 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY); 3391 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
3336 3392
3337 return QLA_SUCCESS; 3393 return QLA_SUCCESS;
@@ -3376,15 +3432,15 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
3376 /* quiescence timeout, other functions didn't ack 3432 /* quiescence timeout, other functions didn't ack
3377 * changing the state to DEV_READY 3433 * changing the state to DEV_READY
3378 */ 3434 */
3379 qla_printk(KERN_INFO, ha, 3435 ql_log(ql_log_info, vha, 0xb023,
3380 "%s: QUIESCENT TIMEOUT\n", QLA2XXX_DRIVER_NAME); 3436 "%s : QUIESCENT TIMEOUT.\n", QLA2XXX_DRIVER_NAME);
3381 qla_printk(KERN_INFO, ha, 3437 ql_log(ql_log_info, vha, 0xb024,
3382 "DRV_ACTIVE:%d DRV_STATE:%d\n", drv_active, 3438 "DRV_ACTIVE:%d DRV_STATE:%d.\n",
3383 drv_state); 3439 drv_active, drv_state);
3384 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3440 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3385 QLA82XX_DEV_READY); 3441 QLA82XX_DEV_READY);
3386 qla_printk(KERN_INFO, ha, 3442 ql_log(ql_log_info, vha, 0xb025,
3387 "HW State: DEV_READY\n"); 3443 "HW State: DEV_READY.\n");
3388 qla82xx_idc_unlock(ha); 3444 qla82xx_idc_unlock(ha);
3389 qla2x00_perform_loop_resync(vha); 3445 qla2x00_perform_loop_resync(vha);
3390 qla82xx_idc_lock(ha); 3446 qla82xx_idc_lock(ha);
@@ -3404,7 +3460,8 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
3404 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3460 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3405 /* everyone acked so set the state to DEV_QUIESCENCE */ 3461 /* everyone acked so set the state to DEV_QUIESCENCE */
3406 if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) { 3462 if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) {
3407 qla_printk(KERN_INFO, ha, "HW State: DEV_QUIESCENT\n"); 3463 ql_log(ql_log_info, vha, 0xb026,
3464 "HW State: DEV_QUIESCENT.\n");
3408 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT); 3465 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT);
3409 } 3466 }
3410} 3467}
@@ -3441,7 +3498,8 @@ qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
3441 struct qla_hw_data *ha = vha->hw; 3498 struct qla_hw_data *ha = vha->hw;
3442 3499
3443 /* Disable the board */ 3500 /* Disable the board */
3444 qla_printk(KERN_INFO, ha, "Disabling the board\n"); 3501 ql_log(ql_log_fatal, vha, 0x00b8,
3502 "Disabling the board.\n");
3445 3503
3446 qla82xx_idc_lock(ha); 3504 qla82xx_idc_lock(ha);
3447 qla82xx_clear_drv_active(ha); 3505 qla82xx_clear_drv_active(ha);
@@ -3492,8 +3550,8 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3492 3550
3493 while (drv_state != drv_active) { 3551 while (drv_state != drv_active) {
3494 if (time_after_eq(jiffies, reset_timeout)) { 3552 if (time_after_eq(jiffies, reset_timeout)) {
3495 qla_printk(KERN_INFO, ha, 3553 ql_log(ql_log_warn, vha, 0x00b5,
3496 "%s: RESET TIMEOUT!\n", QLA2XXX_DRIVER_NAME); 3554 "Reset timeout.\n");
3497 break; 3555 break;
3498 } 3556 }
3499 qla82xx_idc_unlock(ha); 3557 qla82xx_idc_unlock(ha);
@@ -3504,12 +3562,15 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3504 } 3562 }
3505 3563
3506 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3564 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3507 qla_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state, 3565 ql_log(ql_log_info, vha, 0x00b6,
3508 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 3566 "Device state is 0x%x = %s.\n",
3567 dev_state,
3568 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3509 3569
3510 /* Force to DEV_COLD unless someone else is starting a reset */ 3570 /* Force to DEV_COLD unless someone else is starting a reset */
3511 if (dev_state != QLA82XX_DEV_INITIALIZING) { 3571 if (dev_state != QLA82XX_DEV_INITIALIZING) {
3512 qla_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n"); 3572 ql_log(ql_log_info, vha, 0x00b7,
3573 "HW State: COLD/RE-INIT.\n");
3513 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); 3574 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
3514 } 3575 }
3515} 3576}
@@ -3523,8 +3584,12 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3523 fw_heartbeat_counter = qla82xx_rd_32(vha->hw, 3584 fw_heartbeat_counter = qla82xx_rd_32(vha->hw,
3524 QLA82XX_PEG_ALIVE_COUNTER); 3585 QLA82XX_PEG_ALIVE_COUNTER);
3525 /* all 0xff, assume AER/EEH in progress, ignore */ 3586 /* all 0xff, assume AER/EEH in progress, ignore */
3526 if (fw_heartbeat_counter == 0xffffffff) 3587 if (fw_heartbeat_counter == 0xffffffff) {
3588 ql_dbg(ql_dbg_timer, vha, 0x6003,
3589 "FW heartbeat counter is 0xffffffff, "
3590 "returning status=%d.\n", status);
3527 return status; 3591 return status;
3592 }
3528 if (vha->fw_heartbeat_counter == fw_heartbeat_counter) { 3593 if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
3529 vha->seconds_since_last_heartbeat++; 3594 vha->seconds_since_last_heartbeat++;
3530 /* FW not alive after 2 seconds */ 3595 /* FW not alive after 2 seconds */
@@ -3535,6 +3600,9 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3535 } else 3600 } else
3536 vha->seconds_since_last_heartbeat = 0; 3601 vha->seconds_since_last_heartbeat = 0;
3537 vha->fw_heartbeat_counter = fw_heartbeat_counter; 3602 vha->fw_heartbeat_counter = fw_heartbeat_counter;
3603 if (status)
3604 ql_dbg(ql_dbg_timer, vha, 0x6004,
3605 "Returning status=%d.\n", status);
3538 return status; 3606 return status;
3539} 3607}
3540 3608
@@ -3565,8 +3633,10 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3565 3633
3566 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3634 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3567 old_dev_state = dev_state; 3635 old_dev_state = dev_state;
3568 qla_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state, 3636 ql_log(ql_log_info, vha, 0x009b,
3569 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 3637 "Device state is 0x%x = %s.\n",
3638 dev_state,
3639 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3570 3640
3571 /* wait for 30 seconds for device to go ready */ 3641 /* wait for 30 seconds for device to go ready */
3572 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); 3642 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
@@ -3574,9 +3644,8 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3574 while (1) { 3644 while (1) {
3575 3645
3576 if (time_after_eq(jiffies, dev_init_timeout)) { 3646 if (time_after_eq(jiffies, dev_init_timeout)) {
3577 DEBUG(qla_printk(KERN_INFO, ha, 3647 ql_log(ql_log_fatal, vha, 0x009c,
3578 "%s: device init failed!\n", 3648 "Device init failed.\n");
3579 QLA2XXX_DRIVER_NAME));
3580 rval = QLA_FUNCTION_FAILED; 3649 rval = QLA_FUNCTION_FAILED;
3581 break; 3650 break;
3582 } 3651 }
@@ -3586,10 +3655,11 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3586 old_dev_state = dev_state; 3655 old_dev_state = dev_state;
3587 } 3656 }
3588 if (loopcount < 5) { 3657 if (loopcount < 5) {
3589 qla_printk(KERN_INFO, ha, 3658 ql_log(ql_log_info, vha, 0x009d,
3590 "2:Device state is 0x%x = %s\n", dev_state, 3659 "Device state is 0x%x = %s.\n",
3591 dev_state < MAX_STATES ? 3660 dev_state,
3592 qdev_state[dev_state] : "Unknown"); 3661 dev_state < MAX_STATES ? qdev_state[dev_state] :
3662 "Unknown");
3593 } 3663 }
3594 3664
3595 switch (dev_state) { 3665 switch (dev_state) {
@@ -3656,29 +3726,26 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3656 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3726 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3657 if (dev_state == QLA82XX_DEV_NEED_RESET && 3727 if (dev_state == QLA82XX_DEV_NEED_RESET &&
3658 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { 3728 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
3659 qla_printk(KERN_WARNING, ha, 3729 ql_log(ql_log_warn, vha, 0x6001,
3660 "scsi(%ld) %s: Adapter reset needed!\n", 3730 "Adapter reset needed.\n");
3661 vha->host_no, __func__);
3662 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3731 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3663 qla2xxx_wake_dpc(vha); 3732 qla2xxx_wake_dpc(vha);
3664 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && 3733 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
3665 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { 3734 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
3666 DEBUG(qla_printk(KERN_INFO, ha, 3735 ql_log(ql_log_warn, vha, 0x6002,
3667 "scsi(%ld) %s - detected quiescence needed\n", 3736 "Quiescent needed.\n");
3668 vha->host_no, __func__));
3669 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 3737 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
3670 qla2xxx_wake_dpc(vha); 3738 qla2xxx_wake_dpc(vha);
3671 } else { 3739 } else {
3672 if (qla82xx_check_fw_alive(vha)) { 3740 if (qla82xx_check_fw_alive(vha)) {
3673 halt_status = qla82xx_rd_32(ha, 3741 halt_status = qla82xx_rd_32(ha,
3674 QLA82XX_PEG_HALT_STATUS1); 3742 QLA82XX_PEG_HALT_STATUS1);
3675 qla_printk(KERN_INFO, ha, 3743 ql_dbg(ql_dbg_timer, vha, 0x6005,
3676 "scsi(%ld): %s, Dumping hw/fw registers:\n " 3744 "dumping hw/fw registers:.\n "
3677 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n " 3745 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
3678 " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n " 3746 " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
3679 " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n " 3747 " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,.\n "
3680 " PEG_NET_4_PC: 0x%x\n", 3748 " PEG_NET_4_PC: 0x%x.\n", halt_status,
3681 vha->host_no, __func__, halt_status,
3682 qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2), 3749 qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2),
3683 qla82xx_rd_32(ha, 3750 qla82xx_rd_32(ha,
3684 QLA82XX_CRB_PEG_NET_0 + 0x3c), 3751 QLA82XX_CRB_PEG_NET_0 + 0x3c),
@@ -3694,9 +3761,8 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3694 set_bit(ISP_UNRECOVERABLE, 3761 set_bit(ISP_UNRECOVERABLE,
3695 &vha->dpc_flags); 3762 &vha->dpc_flags);
3696 } else { 3763 } else {
3697 qla_printk(KERN_INFO, ha, 3764 ql_log(ql_log_info, vha, 0x6006,
3698 "scsi(%ld): %s - detect abort needed\n", 3765 "Detect abort needed.\n");
3699 vha->host_no, __func__);
3700 set_bit(ISP_ABORT_NEEDED, 3766 set_bit(ISP_ABORT_NEEDED,
3701 &vha->dpc_flags); 3767 &vha->dpc_flags);
3702 } 3768 }
@@ -3704,10 +3770,10 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3704 ha->flags.isp82xx_fw_hung = 1; 3770 ha->flags.isp82xx_fw_hung = 1;
3705 if (ha->flags.mbox_busy) { 3771 if (ha->flags.mbox_busy) {
3706 ha->flags.mbox_int = 1; 3772 ha->flags.mbox_int = 1;
3707 DEBUG2(qla_printk(KERN_ERR, ha, 3773 ql_log(ql_log_warn, vha, 0x6007,
3708 "scsi(%ld) Due to fw hung, doing " 3774 "Due to FW hung, doing "
3709 "premature completion of mbx " 3775 "premature completion of mbx "
3710 "command\n", vha->host_no)); 3776 "command.\n");
3711 if (test_bit(MBX_INTR_WAIT, 3777 if (test_bit(MBX_INTR_WAIT,
3712 &ha->mbx_cmd_flags)) 3778 &ha->mbx_cmd_flags))
3713 complete(&ha->mbx_intr_comp); 3779 complete(&ha->mbx_intr_comp);
@@ -3742,9 +3808,8 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3742 uint32_t dev_state; 3808 uint32_t dev_state;
3743 3809
3744 if (vha->device_flags & DFLG_DEV_FAILED) { 3810 if (vha->device_flags & DFLG_DEV_FAILED) {
3745 qla_printk(KERN_WARNING, ha, 3811 ql_log(ql_log_warn, vha, 0x8024,
3746 "%s(%ld): Device in failed state, " 3812 "Device in failed state, exiting.\n");
3747 "Exiting.\n", __func__, vha->host_no);
3748 return QLA_SUCCESS; 3813 return QLA_SUCCESS;
3749 } 3814 }
3750 ha->flags.isp82xx_reset_hdlr_active = 1; 3815 ha->flags.isp82xx_reset_hdlr_active = 1;
@@ -3752,13 +3817,14 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3752 qla82xx_idc_lock(ha); 3817 qla82xx_idc_lock(ha);
3753 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3818 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3754 if (dev_state == QLA82XX_DEV_READY) { 3819 if (dev_state == QLA82XX_DEV_READY) {
3755 qla_printk(KERN_INFO, ha, "HW State: NEED RESET\n"); 3820 ql_log(ql_log_info, vha, 0x8025,
3821 "HW State: NEED RESET.\n");
3756 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3822 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3757 QLA82XX_DEV_NEED_RESET); 3823 QLA82XX_DEV_NEED_RESET);
3758 } else 3824 } else
3759 qla_printk(KERN_INFO, ha, "HW State: %s\n", 3825 ql_log(ql_log_info, vha, 0x8026,
3760 dev_state < MAX_STATES ? 3826 "Hw State: %s.\n", dev_state < MAX_STATES ?
3761 qdev_state[dev_state] : "Unknown"); 3827 qdev_state[dev_state] : "Unknown");
3762 qla82xx_idc_unlock(ha); 3828 qla82xx_idc_unlock(ha);
3763 3829
3764 rval = qla82xx_device_state_handler(vha); 3830 rval = qla82xx_device_state_handler(vha);
@@ -3777,9 +3843,9 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3777 vha->flags.online = 1; 3843 vha->flags.online = 1;
3778 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 3844 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
3779 if (ha->isp_abort_cnt == 0) { 3845 if (ha->isp_abort_cnt == 0) {
3780 qla_printk(KERN_WARNING, ha, 3846 ql_log(ql_log_warn, vha, 0x8027,
3781 "ISP error recovery failed - " 3847 "ISP error recover failed - board "
3782 "board disabled\n"); 3848 "disabled.\n");
3783 /* 3849 /*
3784 * The next call disables the board 3850 * The next call disables the board
3785 * completely. 3851 * completely.
@@ -3791,16 +3857,16 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3791 rval = QLA_SUCCESS; 3857 rval = QLA_SUCCESS;
3792 } else { /* schedule another ISP abort */ 3858 } else { /* schedule another ISP abort */
3793 ha->isp_abort_cnt--; 3859 ha->isp_abort_cnt--;
3794 DEBUG(qla_printk(KERN_INFO, ha, 3860 ql_log(ql_log_warn, vha, 0x8036,
3795 "qla%ld: ISP abort - retry remaining %d\n", 3861 "ISP abort - retry remaining %d.\n",
3796 vha->host_no, ha->isp_abort_cnt)); 3862 ha->isp_abort_cnt);
3797 rval = QLA_FUNCTION_FAILED; 3863 rval = QLA_FUNCTION_FAILED;
3798 } 3864 }
3799 } else { 3865 } else {
3800 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 3866 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
3801 DEBUG(qla_printk(KERN_INFO, ha, 3867 ql_dbg(ql_dbg_taskm, vha, 0x8029,
3802 "(%ld): ISP error recovery - retrying (%d) " 3868 "ISP error recovery - retrying (%d) more times.\n",
3803 "more times\n", vha->host_no, ha->isp_abort_cnt)); 3869 ha->isp_abort_cnt);
3804 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3870 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3805 rval = QLA_FUNCTION_FAILED; 3871 rval = QLA_FUNCTION_FAILED;
3806 } 3872 }
@@ -3872,8 +3938,8 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
3872 break; 3938 break;
3873 } 3939 }
3874 } 3940 }
3875 DEBUG2(printk(KERN_INFO 3941 ql_dbg(ql_dbg_p3p, vha, 0xb027,
3876 "%s status=%d\n", __func__, status)); 3942 "%s status=%d.\n", status);
3877 3943
3878 return status; 3944 return status;
3879} 3945}
@@ -3902,6 +3968,9 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3902 } 3968 }
3903 } 3969 }
3904 } 3970 }
3971 ql_dbg(ql_dbg_init, vha, 0x00b0,
3972 "Entered %s fw_hung=%d.\n",
3973 __func__, ha->flags.isp82xx_fw_hung);
3905 3974
3906 /* Abort all commands gracefully if fw NOT hung */ 3975 /* Abort all commands gracefully if fw NOT hung */
3907 if (!ha->flags.isp82xx_fw_hung) { 3976 if (!ha->flags.isp82xx_fw_hung) {
@@ -3922,13 +3991,13 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3922 spin_unlock_irqrestore( 3991 spin_unlock_irqrestore(
3923 &ha->hardware_lock, flags); 3992 &ha->hardware_lock, flags);
3924 if (ha->isp_ops->abort_command(sp)) { 3993 if (ha->isp_ops->abort_command(sp)) {
3925 qla_printk(KERN_INFO, ha, 3994 ql_log(ql_log_info, vha,
3926 "scsi(%ld): mbx abort command failed in %s\n", 3995 0x00b1,
3927 vha->host_no, __func__); 3996 "mbx abort failed.\n");
3928 } else { 3997 } else {
3929 qla_printk(KERN_INFO, ha, 3998 ql_log(ql_log_info, vha,
3930 "scsi(%ld): mbx abort command success in %s\n", 3999 0x00b2,
3931 vha->host_no, __func__); 4000 "mbx abort success.\n");
3932 } 4001 }
3933 spin_lock_irqsave(&ha->hardware_lock, flags); 4002 spin_lock_irqsave(&ha->hardware_lock, flags);
3934 } 4003 }
@@ -3940,8 +4009,9 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3940 /* Wait for pending cmds (physical and virtual) to complete */ 4009 /* Wait for pending cmds (physical and virtual) to complete */
3941 if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0, 4010 if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
3942 WAIT_HOST) == QLA_SUCCESS) { 4011 WAIT_HOST) == QLA_SUCCESS) {
3943 DEBUG2(qla_printk(KERN_INFO, ha, 4012 ql_dbg(ql_dbg_init, vha, 0x00b3,
3944 "Done wait for pending commands\n")); 4013 "Done wait for "
4014 "pending commands.\n");
3945 } 4015 }
3946 } 4016 }
3947} 4017}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f461925a9dfc..e02df276804e 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -35,6 +35,10 @@ static struct kmem_cache *srb_cachep;
35 * CT6 CTX allocation cache 35 * CT6 CTX allocation cache
36 */ 36 */
37static struct kmem_cache *ctx_cachep; 37static struct kmem_cache *ctx_cachep;
38/*
39 * error level for logging
40 */
41int ql_errlev = ql_log_all;
38 42
39int ql2xlogintimeout = 20; 43int ql2xlogintimeout = 20;
40module_param(ql2xlogintimeout, int, S_IRUGO); 44module_param(ql2xlogintimeout, int, S_IRUGO);
@@ -69,8 +73,17 @@ MODULE_PARM_DESC(ql2xallocfwdump,
69int ql2xextended_error_logging; 73int ql2xextended_error_logging;
70module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); 74module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
71MODULE_PARM_DESC(ql2xextended_error_logging, 75MODULE_PARM_DESC(ql2xextended_error_logging,
72 "Option to enable extended error logging, " 76 "Option to enable extended error logging,\n"
73 "Default is 0 - no logging. 1 - log errors."); 77 "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n"
78 "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n"
79 "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n"
80 "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n"
81 "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n"
82 "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n"
83 "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n"
84 "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n"
85 "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
86 "\t\tDo LOGICAL OR of the value to enable more than one level");
74 87
75int ql2xshiftctondsd = 6; 88int ql2xshiftctondsd = 6;
76module_param(ql2xshiftctondsd, int, S_IRUGO); 89module_param(ql2xshiftctondsd, int, S_IRUGO);
@@ -128,8 +141,8 @@ MODULE_PARM_DESC(ql2xmultique_tag,
128int ql2xfwloadbin; 141int ql2xfwloadbin;
129module_param(ql2xfwloadbin, int, S_IRUGO); 142module_param(ql2xfwloadbin, int, S_IRUGO);
130MODULE_PARM_DESC(ql2xfwloadbin, 143MODULE_PARM_DESC(ql2xfwloadbin,
131 "Option to specify location from which to load ISP firmware:\n" 144 "Option to specify location from which to load ISP firmware:.\n"
132 " 2 -- load firmware via the request_firmware() (hotplug)\n" 145 " 2 -- load firmware via the request_firmware() (hotplug).\n"
133 " interface.\n" 146 " interface.\n"
134 " 1 -- load firmware from flash.\n" 147 " 1 -- load firmware from flash.\n"
135 " 0 -- use default semantics.\n"); 148 " 0 -- use default semantics.\n");
@@ -143,7 +156,7 @@ MODULE_PARM_DESC(ql2xetsenable,
143int ql2xdbwr = 1; 156int ql2xdbwr = 1;
144module_param(ql2xdbwr, int, S_IRUGO); 157module_param(ql2xdbwr, int, S_IRUGO);
145MODULE_PARM_DESC(ql2xdbwr, 158MODULE_PARM_DESC(ql2xdbwr,
146 "Option to specify scheme for request queue posting\n" 159 "Option to specify scheme for request queue posting.\n"
147 " 0 -- Regular doorbell.\n" 160 " 0 -- Regular doorbell.\n"
148 " 1 -- CAMRAM doorbell (faster).\n"); 161 " 1 -- CAMRAM doorbell (faster).\n");
149 162
@@ -168,7 +181,7 @@ MODULE_PARM_DESC(ql2xasynctmfenable,
168int ql2xdontresethba; 181int ql2xdontresethba;
169module_param(ql2xdontresethba, int, S_IRUGO); 182module_param(ql2xdontresethba, int, S_IRUGO);
170MODULE_PARM_DESC(ql2xdontresethba, 183MODULE_PARM_DESC(ql2xdontresethba,
171 "Option to specify reset behaviour\n" 184 "Option to specify reset behaviour.\n"
172 " 0 (Default) -- Reset on failure.\n" 185 " 0 (Default) -- Reset on failure.\n"
173 " 1 -- Do not reset on failure.\n"); 186 " 1 -- Do not reset on failure.\n");
174 187
@@ -247,8 +260,11 @@ static inline void
247qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval) 260qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
248{ 261{
249 /* Currently used for 82XX only. */ 262 /* Currently used for 82XX only. */
250 if (vha->device_flags & DFLG_DEV_FAILED) 263 if (vha->device_flags & DFLG_DEV_FAILED) {
264 ql_dbg(ql_dbg_timer, vha, 0x600d,
265 "Device in a failed state, returning.\n");
251 return; 266 return;
267 }
252 268
253 mod_timer(&vha->timer, jiffies + interval * HZ); 269 mod_timer(&vha->timer, jiffies + interval * HZ);
254} 270}
@@ -273,19 +289,20 @@ static void qla2x00_sp_free_dma(srb_t *);
273/* -------------------------------------------------------------------------- */ 289/* -------------------------------------------------------------------------- */
274static int qla2x00_alloc_queues(struct qla_hw_data *ha) 290static int qla2x00_alloc_queues(struct qla_hw_data *ha)
275{ 291{
292 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
276 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues, 293 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
277 GFP_KERNEL); 294 GFP_KERNEL);
278 if (!ha->req_q_map) { 295 if (!ha->req_q_map) {
279 qla_printk(KERN_WARNING, ha, 296 ql_log(ql_log_fatal, vha, 0x003b,
280 "Unable to allocate memory for request queue ptrs\n"); 297 "Unable to allocate memory for request queue ptrs.\n");
281 goto fail_req_map; 298 goto fail_req_map;
282 } 299 }
283 300
284 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues, 301 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
285 GFP_KERNEL); 302 GFP_KERNEL);
286 if (!ha->rsp_q_map) { 303 if (!ha->rsp_q_map) {
287 qla_printk(KERN_WARNING, ha, 304 ql_log(ql_log_fatal, vha, 0x003c,
288 "Unable to allocate memory for response queue ptrs\n"); 305 "Unable to allocate memory for response queue ptrs.\n");
289 goto fail_rsp_map; 306 goto fail_rsp_map;
290 } 307 }
291 set_bit(0, ha->rsp_qid_map); 308 set_bit(0, ha->rsp_qid_map);
@@ -349,8 +366,8 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
349 struct qla_hw_data *ha = vha->hw; 366 struct qla_hw_data *ha = vha->hw;
350 367
351 if (!(ha->fw_attributes & BIT_6)) { 368 if (!(ha->fw_attributes & BIT_6)) {
352 qla_printk(KERN_INFO, ha, 369 ql_log(ql_log_warn, vha, 0x00d8,
353 "Firmware is not multi-queue capable\n"); 370 "Firmware is not multi-queue capable.\n");
354 goto fail; 371 goto fail;
355 } 372 }
356 if (ql2xmultique_tag) { 373 if (ql2xmultique_tag) {
@@ -359,8 +376,8 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
359 req = qla25xx_create_req_que(ha, options, 0, 0, -1, 376 req = qla25xx_create_req_que(ha, options, 0, 0, -1,
360 QLA_DEFAULT_QUE_QOS); 377 QLA_DEFAULT_QUE_QOS);
361 if (!req) { 378 if (!req) {
362 qla_printk(KERN_WARNING, ha, 379 ql_log(ql_log_warn, vha, 0x00e0,
363 "Can't create request queue\n"); 380 "Failed to create request queue.\n");
364 goto fail; 381 goto fail;
365 } 382 }
366 ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1); 383 ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
@@ -369,17 +386,20 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
369 for (ques = 1; ques < ha->max_rsp_queues; ques++) { 386 for (ques = 1; ques < ha->max_rsp_queues; ques++) {
370 ret = qla25xx_create_rsp_que(ha, options, 0, 0, req); 387 ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
371 if (!ret) { 388 if (!ret) {
372 qla_printk(KERN_WARNING, ha, 389 ql_log(ql_log_warn, vha, 0x00e8,
373 "Response Queue create failed\n"); 390 "Failed to create response queue.\n");
374 goto fail2; 391 goto fail2;
375 } 392 }
376 } 393 }
377 ha->flags.cpu_affinity_enabled = 1; 394 ha->flags.cpu_affinity_enabled = 1;
378 395 ql_dbg(ql_dbg_multiq, vha, 0xc007,
379 DEBUG2(qla_printk(KERN_INFO, ha, 396 "CPU affinity mode enalbed, "
380 "CPU affinity mode enabled, no. of response" 397 "no. of response queues:%d no. of request queues:%d.\n",
381 " queues:%d, no. of request queues:%d\n", 398 ha->max_rsp_queues, ha->max_req_queues);
382 ha->max_rsp_queues, ha->max_req_queues)); 399 ql_dbg(ql_dbg_init, vha, 0x00e9,
400 "CPU affinity mode enalbed, "
401 "no. of response queues:%d no. of request queues:%d.\n",
402 ha->max_rsp_queues, ha->max_req_queues);
383 } 403 }
384 return 0; 404 return 0;
385fail2: 405fail2:
@@ -526,8 +546,11 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
526 struct qla_hw_data *ha = vha->hw; 546 struct qla_hw_data *ha = vha->hw;
527 547
528 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); 548 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
529 if (!sp) 549 if (!sp) {
550 ql_log(ql_log_warn, vha, 0x3006,
551 "Memory allocation failed for sp.\n");
530 return sp; 552 return sp;
553 }
531 554
532 atomic_set(&sp->ref_count, 1); 555 atomic_set(&sp->ref_count, 1);
533 sp->fcport = fcport; 556 sp->fcport = fcport;
@@ -551,30 +574,43 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
551 int rval; 574 int rval;
552 575
553 if (ha->flags.eeh_busy) { 576 if (ha->flags.eeh_busy) {
554 if (ha->flags.pci_channel_io_perm_failure) 577 if (ha->flags.pci_channel_io_perm_failure) {
578 ql_dbg(ql_dbg_io, vha, 0x3001,
579 "PCI Channel IO permanent failure, exiting "
580 "cmd=%p.\n", cmd);
555 cmd->result = DID_NO_CONNECT << 16; 581 cmd->result = DID_NO_CONNECT << 16;
556 else 582 } else {
583 ql_dbg(ql_dbg_io, vha, 0x3002,
584 "EEH_Busy, Requeuing the cmd=%p.\n", cmd);
557 cmd->result = DID_REQUEUE << 16; 585 cmd->result = DID_REQUEUE << 16;
586 }
558 goto qc24_fail_command; 587 goto qc24_fail_command;
559 } 588 }
560 589
561 rval = fc_remote_port_chkready(rport); 590 rval = fc_remote_port_chkready(rport);
562 if (rval) { 591 if (rval) {
563 cmd->result = rval; 592 cmd->result = rval;
593 ql_dbg(ql_dbg_io, vha, 0x3003,
594 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
595 cmd, rval);
564 goto qc24_fail_command; 596 goto qc24_fail_command;
565 } 597 }
566 598
567 if (!vha->flags.difdix_supported && 599 if (!vha->flags.difdix_supported &&
568 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 600 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
569 DEBUG2(qla_printk(KERN_ERR, ha, 601 ql_dbg(ql_dbg_io, vha, 0x3004,
570 "DIF Cap Not Reg, fail DIF capable cmd's:%x\n", 602 "DIF Cap not reg, fail DIF capable cmd's:%p.\n",
571 cmd->cmnd[0])); 603 cmd);
572 cmd->result = DID_NO_CONNECT << 16; 604 cmd->result = DID_NO_CONNECT << 16;
573 goto qc24_fail_command; 605 goto qc24_fail_command;
574 } 606 }
575 if (atomic_read(&fcport->state) != FCS_ONLINE) { 607 if (atomic_read(&fcport->state) != FCS_ONLINE) {
576 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 608 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
577 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 609 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
610 ql_dbg(ql_dbg_io, vha, 0x3005,
611 "Returning DNC, fcport_state=%d loop_state=%d.\n",
612 atomic_read(&fcport->state),
613 atomic_read(&base_vha->loop_state));
578 cmd->result = DID_NO_CONNECT << 16; 614 cmd->result = DID_NO_CONNECT << 16;
579 goto qc24_fail_command; 615 goto qc24_fail_command;
580 } 616 }
@@ -586,8 +622,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
586 goto qc24_host_busy; 622 goto qc24_host_busy;
587 623
588 rval = ha->isp_ops->start_scsi(sp); 624 rval = ha->isp_ops->start_scsi(sp);
589 if (rval != QLA_SUCCESS) 625 if (rval != QLA_SUCCESS) {
626 ql_dbg(ql_dbg_io, vha, 0x3013,
627 "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
590 goto qc24_host_busy_free_sp; 628 goto qc24_host_busy_free_sp;
629 }
591 630
592 return 0; 631 return 0;
593 632
@@ -630,7 +669,8 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
630 int ret = QLA_SUCCESS; 669 int ret = QLA_SUCCESS;
631 670
632 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) { 671 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
633 DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n")); 672 ql_dbg(ql_dbg_taskm, vha, 0x8005,
673 "Return:eh_wait.\n");
634 return ret; 674 return ret;
635 } 675 }
636 676
@@ -723,7 +763,8 @@ qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha)
723 else 763 else
724 return_status = QLA_FUNCTION_FAILED; 764 return_status = QLA_FUNCTION_FAILED;
725 765
726 DEBUG2(printk("%s return_status=%d\n", __func__, return_status)); 766 ql_dbg(ql_dbg_taskm, vha, 0x8019,
767 "%s return status=%d.\n", __func__, return_status);
727 768
728 return return_status; 769 return return_status;
729} 770}
@@ -831,10 +872,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
831 int wait = 0; 872 int wait = 0;
832 struct qla_hw_data *ha = vha->hw; 873 struct qla_hw_data *ha = vha->hw;
833 874
875 ql_dbg(ql_dbg_taskm, vha, 0x8000,
876 "Entered %s for cmd=%p.\n", __func__, cmd);
834 if (!CMD_SP(cmd)) 877 if (!CMD_SP(cmd))
835 return SUCCESS; 878 return SUCCESS;
836 879
837 ret = fc_block_scsi_eh(cmd); 880 ret = fc_block_scsi_eh(cmd);
881 ql_dbg(ql_dbg_taskm, vha, 0x8001,
882 "Return value of fc_block_scsi_eh=%d.\n", ret);
838 if (ret != 0) 883 if (ret != 0)
839 return ret; 884 return ret;
840 ret = SUCCESS; 885 ret = SUCCESS;
@@ -849,20 +894,19 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
849 return SUCCESS; 894 return SUCCESS;
850 } 895 }
851 896
852 DEBUG2(printk("%s(%ld): aborting sp %p from RISC.", 897 ql_dbg(ql_dbg_taskm, vha, 0x8002,
853 __func__, vha->host_no, sp)); 898 "Aborting sp=%p cmd=%p from RISC ", sp, cmd);
854 899
855 /* Get a reference to the sp and drop the lock.*/ 900 /* Get a reference to the sp and drop the lock.*/
856 sp_get(sp); 901 sp_get(sp);
857 902
858 spin_unlock_irqrestore(&ha->hardware_lock, flags); 903 spin_unlock_irqrestore(&ha->hardware_lock, flags);
859 if (ha->isp_ops->abort_command(sp)) { 904 if (ha->isp_ops->abort_command(sp)) {
860 DEBUG2(printk("%s(%ld): abort_command " 905 ql_dbg(ql_dbg_taskm, vha, 0x8003,
861 "mbx failed.\n", __func__, vha->host_no)); 906 "Abort command mbx failed for cmd=%p.\n", cmd);
862 ret = FAILED;
863 } else { 907 } else {
864 DEBUG3(printk("%s(%ld): abort_command " 908 ql_dbg(ql_dbg_taskm, vha, 0x8004,
865 "mbx success.\n", __func__, vha->host_no)); 909 "Abort command mbx success.\n");
866 wait = 1; 910 wait = 1;
867 } 911 }
868 qla2x00_sp_compl(ha, sp); 912 qla2x00_sp_compl(ha, sp);
@@ -870,16 +914,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
870 /* Wait for the command to be returned. */ 914 /* Wait for the command to be returned. */
871 if (wait) { 915 if (wait) {
872 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) { 916 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
873 qla_printk(KERN_ERR, ha, 917 ql_log(ql_log_warn, vha, 0x8006,
874 "scsi(%ld:%d:%d): Abort handler timed out -- %x.\n", 918 "Abort handler timed out for cmd=%p.\n", cmd);
875 vha->host_no, id, lun, ret);
876 ret = FAILED; 919 ret = FAILED;
877 } 920 }
878 } 921 }
879 922
880 qla_printk(KERN_INFO, ha, 923 ql_log(ql_log_info, vha, 0x801c,
881 "scsi(%ld:%d:%d): Abort command issued -- %d %x.\n", 924 "Abort command issued -- %d %x.\n", wait, ret);
882 vha->host_no, id, lun, wait, ret);
883 925
884 return ret; 926 return ret;
885} 927}
@@ -947,40 +989,59 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
947 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 989 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
948 int err; 990 int err;
949 991
950 if (!fcport) 992 if (!fcport) {
993 ql_log(ql_log_warn, vha, 0x8007,
994 "fcport is NULL.\n");
951 return FAILED; 995 return FAILED;
996 }
952 997
953 err = fc_block_scsi_eh(cmd); 998 err = fc_block_scsi_eh(cmd);
999 ql_dbg(ql_dbg_taskm, vha, 0x8008,
1000 "fc_block_scsi_eh ret=%d.\n", err);
954 if (err != 0) 1001 if (err != 0)
955 return err; 1002 return err;
956 1003
957 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n", 1004 ql_log(ql_log_info, vha, 0x8009,
958 vha->host_no, cmd->device->id, cmd->device->lun, name); 1005 "%s RESET ISSUED for id %d lun %d cmd=%p.\n", name,
1006 cmd->device->id, cmd->device->lun, cmd);
959 1007
960 err = 0; 1008 err = 0;
961 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) 1009 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1010 ql_log(ql_log_warn, vha, 0x800a,
1011 "Wait for hba online failed for cmd=%p.\n", cmd);
962 goto eh_reset_failed; 1012 goto eh_reset_failed;
1013 }
963 err = 1; 1014 err = 1;
964 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) 1015 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) {
1016 ql_log(ql_log_warn, vha, 0x800b,
1017 "Wait for loop ready failed for cmd=%p.\n", cmd);
965 goto eh_reset_failed; 1018 goto eh_reset_failed;
1019 }
966 err = 2; 1020 err = 2;
967 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1) 1021 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
968 != QLA_SUCCESS) 1022 != QLA_SUCCESS) {
1023 ql_log(ql_log_warn, vha, 0x800c,
1024 "do_reset failed for cmd=%p.\n", cmd);
969 goto eh_reset_failed; 1025 goto eh_reset_failed;
1026 }
970 err = 3; 1027 err = 3;
971 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, 1028 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
972 cmd->device->lun, type) != QLA_SUCCESS) 1029 cmd->device->lun, type) != QLA_SUCCESS) {
1030 ql_log(ql_log_warn, vha, 0x800d,
1031 "wait for peding cmds failed for cmd=%p.\n", cmd);
973 goto eh_reset_failed; 1032 goto eh_reset_failed;
1033 }
974 1034
975 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n", 1035 ql_log(ql_log_info, vha, 0x800e,
976 vha->host_no, cmd->device->id, cmd->device->lun, name); 1036 "%s RESET SUCCEEDED for id %d lun %d cmd=%p.\n", name,
1037 cmd->device->id, cmd->device->lun, cmd);
977 1038
978 return SUCCESS; 1039 return SUCCESS;
979 1040
980eh_reset_failed: 1041eh_reset_failed:
981 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n" 1042 ql_log(ql_log_info, vha, 0x800f,
982 , vha->host_no, cmd->device->id, cmd->device->lun, name, 1043 "%s RESET FAILED: %s for id %d lun %d cmd=%p.\n", name,
983 reset_errors[err]); 1044 reset_errors[err], cmd->device->id, cmd->device->lun);
984 return FAILED; 1045 return FAILED;
985} 1046}
986 1047
@@ -1030,19 +1091,25 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1030 id = cmd->device->id; 1091 id = cmd->device->id;
1031 lun = cmd->device->lun; 1092 lun = cmd->device->lun;
1032 1093
1033 if (!fcport) 1094 if (!fcport) {
1095 ql_log(ql_log_warn, vha, 0x8010,
1096 "fcport is NULL.\n");
1034 return ret; 1097 return ret;
1098 }
1035 1099
1036 ret = fc_block_scsi_eh(cmd); 1100 ret = fc_block_scsi_eh(cmd);
1101 ql_dbg(ql_dbg_taskm, vha, 0x8011,
1102 "fc_block_scsi_eh ret=%d.\n", ret);
1037 if (ret != 0) 1103 if (ret != 0)
1038 return ret; 1104 return ret;
1039 ret = FAILED; 1105 ret = FAILED;
1040 1106
1041 qla_printk(KERN_INFO, vha->hw, 1107 ql_log(ql_log_info, vha, 0x8012,
1042 "scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun); 1108 "BUS RESET ISSUED for id %d lun %d.\n", id, lun);
1043 1109
1044 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1110 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1045 DEBUG2(printk("%s failed:board disabled\n",__func__)); 1111 ql_log(ql_log_fatal, vha, 0x8013,
1112 "Wait for hba online failed board disabled.\n");
1046 goto eh_bus_reset_done; 1113 goto eh_bus_reset_done;
1047 } 1114 }
1048 1115
@@ -1055,12 +1122,15 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1055 1122
1056 /* Flush outstanding commands. */ 1123 /* Flush outstanding commands. */
1057 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) != 1124 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
1058 QLA_SUCCESS) 1125 QLA_SUCCESS) {
1126 ql_log(ql_log_warn, vha, 0x8014,
1127 "Wait for pending commands failed.\n");
1059 ret = FAILED; 1128 ret = FAILED;
1129 }
1060 1130
1061eh_bus_reset_done: 1131eh_bus_reset_done:
1062 qla_printk(KERN_INFO, vha->hw, "%s: reset %s\n", __func__, 1132 ql_log(ql_log_warn, vha, 0x802b,
1063 (ret == FAILED) ? "failed" : "succeeded"); 1133 "BUS RESET %s.\n", (ret == FAILED) ? "FAILED" : "SUCCEDED");
1064 1134
1065 return ret; 1135 return ret;
1066} 1136}
@@ -1093,16 +1163,21 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1093 id = cmd->device->id; 1163 id = cmd->device->id;
1094 lun = cmd->device->lun; 1164 lun = cmd->device->lun;
1095 1165
1096 if (!fcport) 1166 if (!fcport) {
1167 ql_log(ql_log_warn, vha, 0x8016,
1168 "fcport is NULL.\n");
1097 return ret; 1169 return ret;
1170 }
1098 1171
1099 ret = fc_block_scsi_eh(cmd); 1172 ret = fc_block_scsi_eh(cmd);
1173 ql_dbg(ql_dbg_taskm, vha, 0x8017,
1174 "fc_block_scsi_eh ret=%d.\n", ret);
1100 if (ret != 0) 1175 if (ret != 0)
1101 return ret; 1176 return ret;
1102 ret = FAILED; 1177 ret = FAILED;
1103 1178
1104 qla_printk(KERN_INFO, ha, 1179 ql_log(ql_log_info, vha, 0x8018,
1105 "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun); 1180 "ADAPTER RESET ISSUED for id %d lun %d.\n", id, lun);
1106 1181
1107 if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS) 1182 if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
1108 goto eh_host_reset_lock; 1183 goto eh_host_reset_lock;
@@ -1137,8 +1212,11 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1137 /* failed. schedule dpc to try */ 1212 /* failed. schedule dpc to try */
1138 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 1213 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
1139 1214
1140 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) 1215 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1216 ql_log(ql_log_warn, vha, 0x802a,
1217 "wait for hba online failed.\n");
1141 goto eh_host_reset_lock; 1218 goto eh_host_reset_lock;
1219 }
1142 } 1220 }
1143 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1221 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1144 } 1222 }
@@ -1149,7 +1227,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1149 ret = SUCCESS; 1227 ret = SUCCESS;
1150 1228
1151eh_host_reset_lock: 1229eh_host_reset_lock:
1152 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__, 1230 qla_printk(KERN_INFO, ha, "%s: reset %s.\n", __func__,
1153 (ret == FAILED) ? "failed" : "succeeded"); 1231 (ret == FAILED) ? "failed" : "succeeded");
1154 1232
1155 return ret; 1233 return ret;
@@ -1179,9 +1257,9 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1179 1257
1180 ret = ha->isp_ops->target_reset(fcport, 0, 0); 1258 ret = ha->isp_ops->target_reset(fcport, 0, 0);
1181 if (ret != QLA_SUCCESS) { 1259 if (ret != QLA_SUCCESS) {
1182 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1260 ql_dbg(ql_dbg_taskm, vha, 0x802c,
1183 "target_reset=%d d_id=%x.\n", __func__, 1261 "Bus Reset failed: Target Reset=%d "
1184 vha->host_no, ret, fcport->d_id.b24)); 1262 "d_id=%x.\n", ret, fcport->d_id.b24);
1185 } 1263 }
1186 } 1264 }
1187 } 1265 }
@@ -1189,9 +1267,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1189 if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) { 1267 if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) {
1190 ret = qla2x00_full_login_lip(vha); 1268 ret = qla2x00_full_login_lip(vha);
1191 if (ret != QLA_SUCCESS) { 1269 if (ret != QLA_SUCCESS) {
1192 DEBUG2_3(printk("%s(%ld): failed: " 1270 ql_dbg(ql_dbg_taskm, vha, 0x802d,
1193 "full_login_lip=%d.\n", __func__, vha->host_no, 1271 "full_login_lip=%d.\n", ret);
1194 ret));
1195 } 1272 }
1196 atomic_set(&vha->loop_state, LOOP_DOWN); 1273 atomic_set(&vha->loop_state, LOOP_DOWN);
1197 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1274 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
@@ -1202,8 +1279,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1202 if (ha->flags.enable_lip_reset) { 1279 if (ha->flags.enable_lip_reset) {
1203 ret = qla2x00_lip_reset(vha); 1280 ret = qla2x00_lip_reset(vha);
1204 if (ret != QLA_SUCCESS) { 1281 if (ret != QLA_SUCCESS) {
1205 DEBUG2_3(printk("%s(%ld): failed: " 1282 ql_dbg(ql_dbg_taskm, vha, 0x802e,
1206 "lip_reset=%d.\n", __func__, vha->host_no, ret)); 1283 "lip_reset failed (%d).\n", ret);
1207 } else 1284 } else
1208 qla2x00_wait_for_loop_ready(vha); 1285 qla2x00_wait_for_loop_ready(vha);
1209 } 1286 }
@@ -1302,17 +1379,17 @@ static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
1302 if (!scsi_track_queue_full(sdev, qdepth)) 1379 if (!scsi_track_queue_full(sdev, qdepth))
1303 return; 1380 return;
1304 1381
1305 DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw, 1382 ql_dbg(ql_dbg_io, fcport->vha, 0x3029,
1306 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n", 1383 "Queue depth adjusted-down "
1307 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun, 1384 "to %d for scsi(%ld:%d:%d:%d).\n",
1308 sdev->queue_depth)); 1385 sdev->queue_depth, fcport->vha->host_no,
1386 sdev->channel, sdev->id, sdev->lun);
1309} 1387}
1310 1388
1311static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth) 1389static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
1312{ 1390{
1313 fc_port_t *fcport = sdev->hostdata; 1391 fc_port_t *fcport = sdev->hostdata;
1314 struct scsi_qla_host *vha = fcport->vha; 1392 struct scsi_qla_host *vha = fcport->vha;
1315 struct qla_hw_data *ha = vha->hw;
1316 struct req_que *req = NULL; 1393 struct req_que *req = NULL;
1317 1394
1318 req = vha->req; 1395 req = vha->req;
@@ -1327,10 +1404,11 @@ static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
1327 else 1404 else
1328 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth); 1405 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
1329 1406
1330 DEBUG2(qla_printk(KERN_INFO, ha, 1407 ql_dbg(ql_dbg_io, vha, 0x302a,
1331 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n", 1408 "Queue depth adjusted-up to %d for "
1332 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun, 1409 "scsi(%ld:%d:%d:%d).\n",
1333 sdev->queue_depth)); 1410 sdev->queue_depth, fcport->vha->host_no,
1411 sdev->channel, sdev->id, sdev->lun);
1334} 1412}
1335 1413
1336static int 1414static int
@@ -1776,6 +1854,9 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
1776 ha->flags.port0 = 1; 1854 ha->flags.port0 = 1;
1777 else 1855 else
1778 ha->flags.port0 = 0; 1856 ha->flags.port0 = 0;
1857 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
1858 "device_type=0x%x port=%d fw_srisc_address=%p.\n",
1859 ha->device_type, ha->flags.port0, ha->fw_srisc_address);
1779} 1860}
1780 1861
1781static int 1862static int
@@ -1790,10 +1871,9 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
1790 1871
1791 if (pci_request_selected_regions(ha->pdev, ha->bars, 1872 if (pci_request_selected_regions(ha->pdev, ha->bars,
1792 QLA2XXX_DRIVER_NAME)) { 1873 QLA2XXX_DRIVER_NAME)) {
1793 qla_printk(KERN_WARNING, ha, 1874 ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
1794 "Failed to reserve PIO/MMIO regions (%s)\n", 1875 "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
1795 pci_name(ha->pdev)); 1876 pci_name(ha->pdev));
1796
1797 goto iospace_error_exit; 1877 goto iospace_error_exit;
1798 } 1878 }
1799 if (!(ha->bars & 1)) 1879 if (!(ha->bars & 1))
@@ -1803,39 +1883,42 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
1803 pio = pci_resource_start(ha->pdev, 0); 1883 pio = pci_resource_start(ha->pdev, 0);
1804 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) { 1884 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
1805 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 1885 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
1806 qla_printk(KERN_WARNING, ha, 1886 ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
1807 "Invalid PCI I/O region size (%s)...\n", 1887 "Invalid pci I/O region size (%s).\n",
1808 pci_name(ha->pdev)); 1888 pci_name(ha->pdev));
1809 pio = 0; 1889 pio = 0;
1810 } 1890 }
1811 } else { 1891 } else {
1812 qla_printk(KERN_WARNING, ha, 1892 ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
1813 "region #0 not a PIO resource (%s)...\n", 1893 "Region #0 no a PIO resource (%s).\n",
1814 pci_name(ha->pdev)); 1894 pci_name(ha->pdev));
1815 pio = 0; 1895 pio = 0;
1816 } 1896 }
1817 ha->pio_address = pio; 1897 ha->pio_address = pio;
1898 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
1899 "PIO address=%p.\n",
1900 ha->pio_address);
1818 1901
1819skip_pio: 1902skip_pio:
1820 /* Use MMIO operations for all accesses. */ 1903 /* Use MMIO operations for all accesses. */
1821 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) { 1904 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
1822 qla_printk(KERN_ERR, ha, 1905 ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
1823 "region #1 not an MMIO resource (%s), aborting\n", 1906 "Region #1 not an MMIO resource (%s), aborting.\n",
1824 pci_name(ha->pdev)); 1907 pci_name(ha->pdev));
1825 goto iospace_error_exit; 1908 goto iospace_error_exit;
1826 } 1909 }
1827 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) { 1910 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
1828 qla_printk(KERN_ERR, ha, 1911 ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
1829 "Invalid PCI mem region size (%s), aborting\n", 1912 "Invalid PCI mem region size (%s), aborting.\n",
1830 pci_name(ha->pdev)); 1913 pci_name(ha->pdev));
1831 goto iospace_error_exit; 1914 goto iospace_error_exit;
1832 } 1915 }
1833 1916
1834 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN); 1917 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
1835 if (!ha->iobase) { 1918 if (!ha->iobase) {
1836 qla_printk(KERN_ERR, ha, 1919 ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
1837 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); 1920 "Cannot remap MMIO (%s), aborting.\n",
1838 1921 pci_name(ha->pdev));
1839 goto iospace_error_exit; 1922 goto iospace_error_exit;
1840 } 1923 }
1841 1924
@@ -1849,6 +1932,8 @@ skip_pio:
1849 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 1932 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1850 pci_resource_len(ha->pdev, 3)); 1933 pci_resource_len(ha->pdev, 3));
1851 if (ha->mqiobase) { 1934 if (ha->mqiobase) {
1935 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
1936 "MQIO Base=%p.\n", ha->mqiobase);
1852 /* Read MSIX vector size of the board */ 1937 /* Read MSIX vector size of the board */
1853 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); 1938 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
1854 ha->msix_count = msix; 1939 ha->msix_count = msix;
@@ -1861,17 +1946,24 @@ skip_pio:
1861 ha->max_req_queues = 2; 1946 ha->max_req_queues = 2;
1862 } else if (ql2xmaxqueues > 1) { 1947 } else if (ql2xmaxqueues > 1) {
1863 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ? 1948 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
1864 QLA_MQ_SIZE : ql2xmaxqueues; 1949 QLA_MQ_SIZE : ql2xmaxqueues;
1865 DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no" 1950 ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
1866 " of request queues:%d\n", ha->max_req_queues)); 1951 "QoS mode set, max no of request queues:%d.\n",
1952 ha->max_req_queues);
1953 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
1954 "QoS mode set, max no of request queues:%d.\n",
1955 ha->max_req_queues);
1867 } 1956 }
1868 qla_printk(KERN_INFO, ha, 1957 ql_log_pci(ql_log_info, ha->pdev, 0x001a,
1869 "MSI-X vector count: %d\n", msix); 1958 "MSI-X vector count: %d.\n", msix);
1870 } else 1959 } else
1871 qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n"); 1960 ql_log_pci(ql_log_info, ha->pdev, 0x001b,
1961 "BAR 3 not enabled.\n");
1872 1962
1873mqiobase_exit: 1963mqiobase_exit:
1874 ha->msix_count = ha->max_rsp_queues + 1; 1964 ha->msix_count = ha->max_rsp_queues + 1;
1965 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
1966 "MSIX Count:%d.\n", ha->msix_count);
1875 return (0); 1967 return (0);
1876 1968
1877iospace_error_exit: 1969iospace_error_exit:
@@ -1935,7 +2027,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1935 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) { 2027 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) {
1936 bars = pci_select_bars(pdev, IORESOURCE_MEM); 2028 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1937 mem_only = 1; 2029 mem_only = 1;
2030 ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
2031 "Mem only adapter.\n");
1938 } 2032 }
2033 ql_dbg_pci(ql_dbg_init, pdev, 0x0008,
2034 "Bars=%d.\n", bars);
1939 2035
1940 if (mem_only) { 2036 if (mem_only) {
1941 if (pci_enable_device_mem(pdev)) 2037 if (pci_enable_device_mem(pdev))
@@ -1950,9 +2046,12 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1950 2046
1951 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL); 2047 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
1952 if (!ha) { 2048 if (!ha) {
1953 DEBUG(printk("Unable to allocate memory for ha\n")); 2049 ql_log_pci(ql_log_fatal, pdev, 0x0009,
2050 "Unable to allocate memory for ha.\n");
1954 goto probe_out; 2051 goto probe_out;
1955 } 2052 }
2053 ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
2054 "Memory allocated for ha=%p.\n", ha);
1956 ha->pdev = pdev; 2055 ha->pdev = pdev;
1957 2056
1958 /* Clear our data area */ 2057 /* Clear our data area */
@@ -1974,10 +2073,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1974 if (ret) 2073 if (ret)
1975 goto probe_hw_failed; 2074 goto probe_hw_failed;
1976 2075
1977 qla_printk(KERN_INFO, ha, 2076 ql_log_pci(ql_log_info, pdev, 0x001d,
1978 "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq, 2077 "Found an ISP%04X irq %d iobase 0x%p.\n",
1979 ha->iobase); 2078 pdev->device, pdev->irq, ha->iobase);
1980
1981 ha->prev_topology = 0; 2079 ha->prev_topology = 0;
1982 ha->init_cb_size = sizeof(init_cb_t); 2080 ha->init_cb_size = sizeof(init_cb_t);
1983 ha->link_data_rate = PORT_SPEED_UNKNOWN; 2081 ha->link_data_rate = PORT_SPEED_UNKNOWN;
@@ -2078,7 +2176,18 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2078 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2176 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2079 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2177 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2080 } 2178 }
2081 2179 ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
2180 "mbx_count=%d, req_length=%d, "
2181 "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, "
2182 "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, .\n",
2183 ha->mbx_count, req_length, rsp_length, ha->max_loop_id,
2184 ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size,
2185 ha->nvram_npiv_size);
2186 ql_dbg_pci(ql_dbg_init, pdev, 0x001f,
2187 "isp_ops=%p, flash_conf_off=%d, "
2188 "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
2189 ha->isp_ops, ha->flash_conf_off, ha->flash_data_off,
2190 ha->nvram_conf_off, ha->nvram_data_off);
2082 mutex_init(&ha->vport_lock); 2191 mutex_init(&ha->vport_lock);
2083 init_completion(&ha->mbx_cmd_comp); 2192 init_completion(&ha->mbx_cmd_comp);
2084 complete(&ha->mbx_cmd_comp); 2193 complete(&ha->mbx_cmd_comp);
@@ -2088,10 +2197,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2088 set_bit(0, (unsigned long *) ha->vp_idx_map); 2197 set_bit(0, (unsigned long *) ha->vp_idx_map);
2089 2198
2090 qla2x00_config_dma_addressing(ha); 2199 qla2x00_config_dma_addressing(ha);
2200 ql_dbg_pci(ql_dbg_init, pdev, 0x0020,
2201 "64 Bit addressing is %s.\n",
2202 ha->flags.enable_64bit_addressing ? "enable" :
2203 "disable");
2091 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); 2204 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
2092 if (!ret) { 2205 if (!ret) {
2093 qla_printk(KERN_WARNING, ha, 2206 ql_log_pci(ql_log_fatal, pdev, 0x0031,
2094 "[ERROR] Failed to allocate memory for adapter\n"); 2207 "Failed to allocate memory for adapter, aborting.\n");
2095 2208
2096 goto probe_hw_failed; 2209 goto probe_hw_failed;
2097 } 2210 }
@@ -2103,9 +2216,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2103 2216
2104 base_vha = qla2x00_create_host(sht, ha); 2217 base_vha = qla2x00_create_host(sht, ha);
2105 if (!base_vha) { 2218 if (!base_vha) {
2106 qla_printk(KERN_WARNING, ha,
2107 "[ERROR] Failed to allocate memory for scsi_host\n");
2108
2109 ret = -ENOMEM; 2219 ret = -ENOMEM;
2110 qla2x00_mem_free(ha); 2220 qla2x00_mem_free(ha);
2111 qla2x00_free_req_que(ha, req); 2221 qla2x00_free_req_que(ha, req);
@@ -2132,7 +2242,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2132 if (!IS_QLA82XX(ha)) 2242 if (!IS_QLA82XX(ha))
2133 host->sg_tablesize = QLA_SG_ALL; 2243 host->sg_tablesize = QLA_SG_ALL;
2134 } 2244 }
2135 2245 ql_dbg(ql_dbg_init, base_vha, 0x0032,
2246 "can_queue=%d, req=%p, "
2247 "mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
2248 host->can_queue, base_vha->req,
2249 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
2136 host->max_id = max_id; 2250 host->max_id = max_id;
2137 host->this_id = 255; 2251 host->this_id = 255;
2138 host->cmd_per_lun = 3; 2252 host->cmd_per_lun = 3;
@@ -2146,6 +2260,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2146 host->transportt = qla2xxx_transport_template; 2260 host->transportt = qla2xxx_transport_template;
2147 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC); 2261 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
2148 2262
2263 ql_dbg(ql_dbg_init, base_vha, 0x0033,
2264 "max_id=%d this_id=%d "
2265 "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
2266 "max_lun=%d transportt=%p, vendor_id=%d.\n", host->max_id,
2267 host->this_id, host->cmd_per_lun, host->unique_id,
2268 host->max_cmd_len, host->max_channel, host->max_lun,
2269 host->transportt, sht->vendor_id);
2270
2149 /* Set up the irqs */ 2271 /* Set up the irqs */
2150 ret = qla2x00_request_irqs(ha, rsp); 2272 ret = qla2x00_request_irqs(ha, rsp);
2151 if (ret) 2273 if (ret)
@@ -2156,9 +2278,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2156 /* Alloc arrays of request and response ring ptrs */ 2278 /* Alloc arrays of request and response ring ptrs */
2157que_init: 2279que_init:
2158 if (!qla2x00_alloc_queues(ha)) { 2280 if (!qla2x00_alloc_queues(ha)) {
2159 qla_printk(KERN_WARNING, ha, 2281 ql_log(ql_log_fatal, base_vha, 0x003d,
2160 "[ERROR] Failed to allocate memory for queue" 2282 "Failed to allocate memory for queue pointers.. aborting.\n");
2161 " pointers\n");
2162 goto probe_init_failed; 2283 goto probe_init_failed;
2163 } 2284 }
2164 2285
@@ -2186,20 +2307,33 @@ que_init:
2186 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0]; 2307 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
2187 } 2308 }
2188 2309
2189 if (qla2x00_initialize_adapter(base_vha)) { 2310 ql_dbg(ql_dbg_multiq, base_vha, 0xc009,
2190 qla_printk(KERN_WARNING, ha, 2311 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
2191 "Failed to initialize adapter\n"); 2312 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
2313 ql_dbg(ql_dbg_multiq, base_vha, 0xc00a,
2314 "req->req_q_in=%p req->req_q_out=%p "
2315 "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
2316 req->req_q_in, req->req_q_out,
2317 rsp->rsp_q_in, rsp->rsp_q_out);
2318 ql_dbg(ql_dbg_init, base_vha, 0x003e,
2319 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
2320 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
2321 ql_dbg(ql_dbg_init, base_vha, 0x003f,
2322 "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
2323 req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
2192 2324
2193 DEBUG2(printk("scsi(%ld): Failed to initialize adapter - " 2325 if (qla2x00_initialize_adapter(base_vha)) {
2194 "Adapter flags %x.\n", 2326 ql_log(ql_log_fatal, base_vha, 0x00d6,
2195 base_vha->host_no, base_vha->device_flags)); 2327 "Failed to initialize adapter - Adapter flags %x.\n",
2328 base_vha->device_flags);
2196 2329
2197 if (IS_QLA82XX(ha)) { 2330 if (IS_QLA82XX(ha)) {
2198 qla82xx_idc_lock(ha); 2331 qla82xx_idc_lock(ha);
2199 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2332 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2200 QLA82XX_DEV_FAILED); 2333 QLA82XX_DEV_FAILED);
2201 qla82xx_idc_unlock(ha); 2334 qla82xx_idc_unlock(ha);
2202 qla_printk(KERN_INFO, ha, "HW State: FAILED\n"); 2335 ql_log(ql_log_fatal, base_vha, 0x00d7,
2336 "HW State: FAILED.\n");
2203 } 2337 }
2204 2338
2205 ret = -ENODEV; 2339 ret = -ENODEV;
@@ -2208,9 +2342,8 @@ que_init:
2208 2342
2209 if (ha->mqenable) { 2343 if (ha->mqenable) {
2210 if (qla25xx_setup_mode(base_vha)) { 2344 if (qla25xx_setup_mode(base_vha)) {
2211 qla_printk(KERN_WARNING, ha, 2345 ql_log(ql_log_warn, base_vha, 0x00ec,
2212 "Can't create queues, falling back to single" 2346 "Failed to create queues, falling back to single queue mode.\n");
2213 " queue mode\n");
2214 goto que_init; 2347 goto que_init;
2215 } 2348 }
2216 } 2349 }
@@ -2222,13 +2355,15 @@ que_init:
2222 * Startup the kernel thread for this host adapter 2355 * Startup the kernel thread for this host adapter
2223 */ 2356 */
2224 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, 2357 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
2225 "%s_dpc", base_vha->host_str); 2358 "%s_dpc", base_vha->host_str);
2226 if (IS_ERR(ha->dpc_thread)) { 2359 if (IS_ERR(ha->dpc_thread)) {
2227 qla_printk(KERN_WARNING, ha, 2360 ql_log(ql_log_fatal, base_vha, 0x00ed,
2228 "Unable to start DPC thread!\n"); 2361 "Failed to start DPC thread.\n");
2229 ret = PTR_ERR(ha->dpc_thread); 2362 ret = PTR_ERR(ha->dpc_thread);
2230 goto probe_failed; 2363 goto probe_failed;
2231 } 2364 }
2365 ql_dbg(ql_dbg_init, base_vha, 0x00ee,
2366 "DPC thread started successfully.\n");
2232 2367
2233skip_dpc: 2368skip_dpc:
2234 list_add_tail(&base_vha->list, &ha->vp_list); 2369 list_add_tail(&base_vha->list, &ha->vp_list);
@@ -2236,16 +2371,18 @@ skip_dpc:
2236 2371
2237 /* Initialized the timer */ 2372 /* Initialized the timer */
2238 qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL); 2373 qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
2239 2374 ql_dbg(ql_dbg_init, base_vha, 0x00ef,
2240 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", 2375 "Started qla2x00_timer with "
2241 base_vha->host_no, ha)); 2376 "interval=%d.\n", WATCH_INTERVAL);
2377 ql_dbg(ql_dbg_init, base_vha, 0x00f0,
2378 "Detected hba at address=%p.\n",
2379 ha);
2242 2380
2243 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { 2381 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
2244 if (ha->fw_attributes & BIT_4) { 2382 if (ha->fw_attributes & BIT_4) {
2245 base_vha->flags.difdix_supported = 1; 2383 base_vha->flags.difdix_supported = 1;
2246 DEBUG18(qla_printk(KERN_INFO, ha, 2384 ql_dbg(ql_dbg_init, base_vha, 0x00f1,
2247 "Registering for DIF/DIX type 1 and 3" 2385 "Registering for DIF/DIX type 1 and 3 protection.\n");
2248 " protection.\n"));
2249 scsi_host_set_prot(host, 2386 scsi_host_set_prot(host,
2250 SHOST_DIF_TYPE1_PROTECTION 2387 SHOST_DIF_TYPE1_PROTECTION
2251 | SHOST_DIF_TYPE2_PROTECTION 2388 | SHOST_DIF_TYPE2_PROTECTION
@@ -2267,6 +2404,9 @@ skip_dpc:
2267 base_vha->flags.init_done = 1; 2404 base_vha->flags.init_done = 1;
2268 base_vha->flags.online = 1; 2405 base_vha->flags.online = 1;
2269 2406
2407 ql_dbg(ql_dbg_init, base_vha, 0x00f2,
2408 "Init done and hba is online.\n");
2409
2270 scsi_scan_host(host); 2410 scsi_scan_host(host);
2271 2411
2272 qla2x00_alloc_sysfs_attr(base_vha); 2412 qla2x00_alloc_sysfs_attr(base_vha);
@@ -2275,14 +2415,17 @@ skip_dpc:
2275 2415
2276 qla2x00_dfs_setup(base_vha); 2416 qla2x00_dfs_setup(base_vha);
2277 2417
2278 qla_printk(KERN_INFO, ha, "\n" 2418 ql_log(ql_log_info, base_vha, 0x00fa,
2279 " QLogic Fibre Channel HBA Driver: %s\n" 2419 "QLogic Fibre Channed HBA Driver: %s.\n",
2280 " QLogic %s - %s\n" 2420 qla2x00_version_str);
2281 " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n", 2421 ql_log(ql_log_info, base_vha, 0x00fb,
2282 qla2x00_version_str, ha->model_number, 2422 "QLogic %s - %s.\n",
2283 ha->model_desc ? ha->model_desc : "", pdev->device, 2423 ha->model_number, ha->model_desc ? ha->model_desc : "");
2284 ha->isp_ops->pci_info_str(base_vha, pci_info), pci_name(pdev), 2424 ql_log(ql_log_info, base_vha, 0x00fc,
2285 ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no, 2425 "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n",
2426 pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info),
2427 pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
2428 base_vha->host_no,
2286 ha->isp_ops->fw_version_str(base_vha, fw_str)); 2429 ha->isp_ops->fw_version_str(base_vha, fw_str));
2287 2430
2288 return 0; 2431 return 0;
@@ -2580,20 +2723,15 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
2580 fcport->login_retry = vha->hw->login_retry_count; 2723 fcport->login_retry = vha->hw->login_retry_count;
2581 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 2724 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2582 2725
2583 DEBUG(printk("scsi(%ld): Port login retry: " 2726 ql_dbg(ql_dbg_disc, vha, 0x2067,
2727 "Port login retry "
2584 "%02x%02x%02x%02x%02x%02x%02x%02x, " 2728 "%02x%02x%02x%02x%02x%02x%02x%02x, "
2585 "id = 0x%04x retry cnt=%d\n", 2729 "id = 0x%04x retry cnt=%d.\n",
2586 vha->host_no, 2730 fcport->port_name[0], fcport->port_name[1],
2587 fcport->port_name[0], 2731 fcport->port_name[2], fcport->port_name[3],
2588 fcport->port_name[1], 2732 fcport->port_name[4], fcport->port_name[5],
2589 fcport->port_name[2], 2733 fcport->port_name[6], fcport->port_name[7],
2590 fcport->port_name[3], 2734 fcport->loop_id, fcport->login_retry);
2591 fcport->port_name[4],
2592 fcport->port_name[5],
2593 fcport->port_name[6],
2594 fcport->port_name[7],
2595 fcport->loop_id,
2596 fcport->login_retry));
2597 } 2735 }
2598} 2736}
2599 2737
@@ -2676,6 +2814,9 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2676 ctx_cachep); 2814 ctx_cachep);
2677 if (!ha->ctx_mempool) 2815 if (!ha->ctx_mempool)
2678 goto fail_free_srb_mempool; 2816 goto fail_free_srb_mempool;
2817 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021,
2818 "ctx_cachep=%p ctx_mempool=%p.\n",
2819 ctx_cachep, ha->ctx_mempool);
2679 } 2820 }
2680 2821
2681 /* Get memory for cached NVRAM */ 2822 /* Get memory for cached NVRAM */
@@ -2690,22 +2831,29 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2690 if (!ha->s_dma_pool) 2831 if (!ha->s_dma_pool)
2691 goto fail_free_nvram; 2832 goto fail_free_nvram;
2692 2833
2834 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022,
2835 "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n",
2836 ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool);
2837
2693 if (IS_QLA82XX(ha) || ql2xenabledif) { 2838 if (IS_QLA82XX(ha) || ql2xenabledif) {
2694 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev, 2839 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2695 DSD_LIST_DMA_POOL_SIZE, 8, 0); 2840 DSD_LIST_DMA_POOL_SIZE, 8, 0);
2696 if (!ha->dl_dma_pool) { 2841 if (!ha->dl_dma_pool) {
2697 qla_printk(KERN_WARNING, ha, 2842 ql_log_pci(ql_log_fatal, ha->pdev, 0x0023,
2698 "Memory Allocation failed - dl_dma_pool\n"); 2843 "Failed to allocate memory for dl_dma_pool.\n");
2699 goto fail_s_dma_pool; 2844 goto fail_s_dma_pool;
2700 } 2845 }
2701 2846
2702 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev, 2847 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2703 FCP_CMND_DMA_POOL_SIZE, 8, 0); 2848 FCP_CMND_DMA_POOL_SIZE, 8, 0);
2704 if (!ha->fcp_cmnd_dma_pool) { 2849 if (!ha->fcp_cmnd_dma_pool) {
2705 qla_printk(KERN_WARNING, ha, 2850 ql_log_pci(ql_log_fatal, ha->pdev, 0x0024,
2706 "Memory Allocation failed - fcp_cmnd_dma_pool\n"); 2851 "Failed to allocate memory for fcp_cmnd_dma_pool.\n");
2707 goto fail_dl_dma_pool; 2852 goto fail_dl_dma_pool;
2708 } 2853 }
2854 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025,
2855 "dl_dma_pool=%p fcp_cmnd_dma_pool=%p.\n",
2856 ha->dl_dma_pool, ha->fcp_cmnd_dma_pool);
2709 } 2857 }
2710 2858
2711 /* Allocate memory for SNS commands */ 2859 /* Allocate memory for SNS commands */
@@ -2715,6 +2863,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2715 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); 2863 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
2716 if (!ha->sns_cmd) 2864 if (!ha->sns_cmd)
2717 goto fail_dma_pool; 2865 goto fail_dma_pool;
2866 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026,
2867 "sns_cmd.\n", ha->sns_cmd);
2718 } else { 2868 } else {
2719 /* Get consistent memory allocated for MS IOCB */ 2869 /* Get consistent memory allocated for MS IOCB */
2720 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 2870 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
@@ -2726,12 +2876,16 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2726 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); 2876 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
2727 if (!ha->ct_sns) 2877 if (!ha->ct_sns)
2728 goto fail_free_ms_iocb; 2878 goto fail_free_ms_iocb;
2879 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027,
2880 "ms_iocb=%p ct_sns=%p.\n",
2881 ha->ms_iocb, ha->ct_sns);
2729 } 2882 }
2730 2883
2731 /* Allocate memory for request ring */ 2884 /* Allocate memory for request ring */
2732 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 2885 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
2733 if (!*req) { 2886 if (!*req) {
2734 DEBUG(printk("Unable to allocate memory for req\n")); 2887 ql_log_pci(ql_log_fatal, ha->pdev, 0x0028,
2888 "Failed to allocate memory for req.\n");
2735 goto fail_req; 2889 goto fail_req;
2736 } 2890 }
2737 (*req)->length = req_len; 2891 (*req)->length = req_len;
@@ -2739,14 +2893,15 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2739 ((*req)->length + 1) * sizeof(request_t), 2893 ((*req)->length + 1) * sizeof(request_t),
2740 &(*req)->dma, GFP_KERNEL); 2894 &(*req)->dma, GFP_KERNEL);
2741 if (!(*req)->ring) { 2895 if (!(*req)->ring) {
2742 DEBUG(printk("Unable to allocate memory for req_ring\n")); 2896 ql_log_pci(ql_log_fatal, ha->pdev, 0x0029,
2897 "Failed to allocate memory for req_ring.\n");
2743 goto fail_req_ring; 2898 goto fail_req_ring;
2744 } 2899 }
2745 /* Allocate memory for response ring */ 2900 /* Allocate memory for response ring */
2746 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 2901 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
2747 if (!*rsp) { 2902 if (!*rsp) {
2748 qla_printk(KERN_WARNING, ha, 2903 ql_log_pci(ql_log_fatal, ha->pdev, 0x002a,
2749 "Unable to allocate memory for rsp\n"); 2904 "Failed to allocate memory for rsp.\n");
2750 goto fail_rsp; 2905 goto fail_rsp;
2751 } 2906 }
2752 (*rsp)->hw = ha; 2907 (*rsp)->hw = ha;
@@ -2755,19 +2910,24 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2755 ((*rsp)->length + 1) * sizeof(response_t), 2910 ((*rsp)->length + 1) * sizeof(response_t),
2756 &(*rsp)->dma, GFP_KERNEL); 2911 &(*rsp)->dma, GFP_KERNEL);
2757 if (!(*rsp)->ring) { 2912 if (!(*rsp)->ring) {
2758 qla_printk(KERN_WARNING, ha, 2913 ql_log_pci(ql_log_fatal, ha->pdev, 0x002b,
2759 "Unable to allocate memory for rsp_ring\n"); 2914 "Failed to allocate memory for rsp_ring.\n");
2760 goto fail_rsp_ring; 2915 goto fail_rsp_ring;
2761 } 2916 }
2762 (*req)->rsp = *rsp; 2917 (*req)->rsp = *rsp;
2763 (*rsp)->req = *req; 2918 (*rsp)->req = *req;
2919 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c,
2920 "req=%p req->length=%d req->ring=%p rsp=%p "
2921 "rsp->length=%d rsp->ring=%p.\n",
2922 *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length,
2923 (*rsp)->ring);
2764 /* Allocate memory for NVRAM data for vports */ 2924 /* Allocate memory for NVRAM data for vports */
2765 if (ha->nvram_npiv_size) { 2925 if (ha->nvram_npiv_size) {
2766 ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) * 2926 ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
2767 ha->nvram_npiv_size, GFP_KERNEL); 2927 ha->nvram_npiv_size, GFP_KERNEL);
2768 if (!ha->npiv_info) { 2928 if (!ha->npiv_info) {
2769 qla_printk(KERN_WARNING, ha, 2929 ql_log_pci(ql_log_fatal, ha->pdev, 0x002d,
2770 "Unable to allocate memory for npiv info\n"); 2930 "Failed to allocate memory for npiv_info.\n");
2771 goto fail_npiv_info; 2931 goto fail_npiv_info;
2772 } 2932 }
2773 } else 2933 } else
@@ -2779,6 +2939,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2779 &ha->ex_init_cb_dma); 2939 &ha->ex_init_cb_dma);
2780 if (!ha->ex_init_cb) 2940 if (!ha->ex_init_cb)
2781 goto fail_ex_init_cb; 2941 goto fail_ex_init_cb;
2942 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e,
2943 "ex_init_cb=%p.\n", ha->ex_init_cb);
2782 } 2944 }
2783 2945
2784 INIT_LIST_HEAD(&ha->gbl_dsd_list); 2946 INIT_LIST_HEAD(&ha->gbl_dsd_list);
@@ -2789,6 +2951,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2789 &ha->async_pd_dma); 2951 &ha->async_pd_dma);
2790 if (!ha->async_pd) 2952 if (!ha->async_pd)
2791 goto fail_async_pd; 2953 goto fail_async_pd;
2954 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f,
2955 "async_pd=%p.\n", ha->async_pd);
2792 } 2956 }
2793 2957
2794 INIT_LIST_HEAD(&ha->vp_list); 2958 INIT_LIST_HEAD(&ha->vp_list);
@@ -2854,7 +3018,8 @@ fail_free_init_cb:
2854 ha->init_cb = NULL; 3018 ha->init_cb = NULL;
2855 ha->init_cb_dma = 0; 3019 ha->init_cb_dma = 0;
2856fail: 3020fail:
2857 DEBUG(printk("%s: Memory allocation failure\n", __func__)); 3021 ql_log(ql_log_fatal, NULL, 0x0030,
3022 "Memory allocation failure.\n");
2858 return -ENOMEM; 3023 return -ENOMEM;
2859} 3024}
2860 3025
@@ -3003,8 +3168,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
3003 3168
3004 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); 3169 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
3005 if (host == NULL) { 3170 if (host == NULL) {
3006 printk(KERN_WARNING 3171 ql_log_pci(ql_log_fatal, ha->pdev, 0x0107,
3007 "qla2xxx: Couldn't allocate host from scsi layer!\n"); 3172 "Failed to allocate host from the scsi layer, aborting.\n");
3008 goto fail; 3173 goto fail;
3009 } 3174 }
3010 3175
@@ -3023,6 +3188,11 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
3023 spin_lock_init(&vha->work_lock); 3188 spin_lock_init(&vha->work_lock);
3024 3189
3025 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); 3190 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
3191 ql_dbg(ql_dbg_init, vha, 0x0041,
3192 "Allocated the host=%p hw=%p vha=%p dev_name=%s",
3193 vha->host, vha->hw, vha,
3194 dev_name(&(ha->pdev->dev)));
3195
3026 return vha; 3196 return vha;
3027 3197
3028fail: 3198fail:
@@ -3264,18 +3434,18 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
3264 if (status == QLA_SUCCESS) { 3434 if (status == QLA_SUCCESS) {
3265 fcport->old_loop_id = fcport->loop_id; 3435 fcport->old_loop_id = fcport->loop_id;
3266 3436
3267 DEBUG(printk("scsi(%ld): port login OK: logged " 3437 ql_dbg(ql_dbg_disc, vha, 0x2003,
3268 "in ID 0x%x\n", vha->host_no, fcport->loop_id)); 3438 "Port login OK: logged in ID 0x%x.\n",
3439 fcport->loop_id);
3269 3440
3270 qla2x00_update_fcport(vha, fcport); 3441 qla2x00_update_fcport(vha, fcport);
3271 3442
3272 } else if (status == 1) { 3443 } else if (status == 1) {
3273 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 3444 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
3274 /* retry the login again */ 3445 /* retry the login again */
3275 DEBUG(printk("scsi(%ld): Retrying" 3446 ql_dbg(ql_dbg_disc, vha, 0x2007,
3276 " %d login again loop_id 0x%x\n", 3447 "Retrying %d login again loop_id 0x%x.\n",
3277 vha->host_no, fcport->login_retry, 3448 fcport->login_retry, fcport->loop_id);
3278 fcport->loop_id));
3279 } else { 3449 } else {
3280 fcport->login_retry = 0; 3450 fcport->login_retry = 0;
3281 } 3451 }
@@ -3315,26 +3485,27 @@ qla2x00_do_dpc(void *data)
3315 3485
3316 set_current_state(TASK_INTERRUPTIBLE); 3486 set_current_state(TASK_INTERRUPTIBLE);
3317 while (!kthread_should_stop()) { 3487 while (!kthread_should_stop()) {
3318 DEBUG3(printk("qla2x00: DPC handler sleeping\n")); 3488 ql_dbg(ql_dbg_dpc, base_vha, 0x4000,
3489 "DPC handler sleeping.\n");
3319 3490
3320 schedule(); 3491 schedule();
3321 __set_current_state(TASK_RUNNING); 3492 __set_current_state(TASK_RUNNING);
3322 3493
3323 DEBUG3(printk("qla2x00: DPC handler waking up\n")); 3494 ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
3495 "DPC handler waking up.\n");
3496 ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
3497 "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
3324 3498
3325 /* Initialization not yet finished. Don't do anything yet. */ 3499 /* Initialization not yet finished. Don't do anything yet. */
3326 if (!base_vha->flags.init_done) 3500 if (!base_vha->flags.init_done)
3327 continue; 3501 continue;
3328 3502
3329 if (ha->flags.eeh_busy) { 3503 if (ha->flags.eeh_busy) {
3330 DEBUG17(qla_printk(KERN_WARNING, ha, 3504 ql_dbg(ql_dbg_dpc, base_vha, 0x4003,
3331 "qla2x00_do_dpc: dpc_flags: %lx\n", 3505 "eeh_busy=%d.\n", ha->flags.eeh_busy);
3332 base_vha->dpc_flags));
3333 continue; 3506 continue;
3334 } 3507 }
3335 3508
3336 DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
3337
3338 ha->dpc_active = 1; 3509 ha->dpc_active = 1;
3339 3510
3340 if (ha->flags.mbox_busy) { 3511 if (ha->flags.mbox_busy) {
@@ -3351,8 +3522,8 @@ qla2x00_do_dpc(void *data)
3351 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3522 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3352 QLA82XX_DEV_FAILED); 3523 QLA82XX_DEV_FAILED);
3353 qla82xx_idc_unlock(ha); 3524 qla82xx_idc_unlock(ha);
3354 qla_printk(KERN_INFO, ha, 3525 ql_log(ql_log_info, base_vha, 0x4004,
3355 "HW State: FAILED\n"); 3526 "HW State: FAILED.\n");
3356 qla82xx_device_state_handler(base_vha); 3527 qla82xx_device_state_handler(base_vha);
3357 continue; 3528 continue;
3358 } 3529 }
@@ -3360,10 +3531,8 @@ qla2x00_do_dpc(void *data)
3360 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED, 3531 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
3361 &base_vha->dpc_flags)) { 3532 &base_vha->dpc_flags)) {
3362 3533
3363 DEBUG(printk(KERN_INFO 3534 ql_dbg(ql_dbg_dpc, base_vha, 0x4005,
3364 "scsi(%ld): dpc: sched " 3535 "FCoE context reset scheduled.\n");
3365 "qla82xx_fcoe_ctx_reset ha = %p\n",
3366 base_vha->host_no, ha));
3367 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 3536 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
3368 &base_vha->dpc_flags))) { 3537 &base_vha->dpc_flags))) {
3369 if (qla82xx_fcoe_ctx_reset(base_vha)) { 3538 if (qla82xx_fcoe_ctx_reset(base_vha)) {
@@ -3377,18 +3546,16 @@ qla2x00_do_dpc(void *data)
3377 &base_vha->dpc_flags); 3546 &base_vha->dpc_flags);
3378 } 3547 }
3379 3548
3380 DEBUG(printk("scsi(%ld): dpc:" 3549 ql_dbg(ql_dbg_dpc, base_vha, 0x4006,
3381 " qla82xx_fcoe_ctx_reset end\n", 3550 "FCoE context reset end.\n");
3382 base_vha->host_no));
3383 } 3551 }
3384 } 3552 }
3385 3553
3386 if (test_and_clear_bit(ISP_ABORT_NEEDED, 3554 if (test_and_clear_bit(ISP_ABORT_NEEDED,
3387 &base_vha->dpc_flags)) { 3555 &base_vha->dpc_flags)) {
3388 3556
3389 DEBUG(printk("scsi(%ld): dpc: sched " 3557 ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
3390 "qla2x00_abort_isp ha = %p\n", 3558 "ISP abort scheduled.\n");
3391 base_vha->host_no, ha));
3392 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 3559 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
3393 &base_vha->dpc_flags))) { 3560 &base_vha->dpc_flags))) {
3394 3561
@@ -3401,8 +3568,8 @@ qla2x00_do_dpc(void *data)
3401 &base_vha->dpc_flags); 3568 &base_vha->dpc_flags);
3402 } 3569 }
3403 3570
3404 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n", 3571 ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
3405 base_vha->host_no)); 3572 "ISP abort end.\n");
3406 } 3573 }
3407 3574
3408 if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) { 3575 if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
@@ -3411,9 +3578,8 @@ qla2x00_do_dpc(void *data)
3411 } 3578 }
3412 3579
3413 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { 3580 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
3414 DEBUG(printk(KERN_INFO "scsi(%ld): dpc: sched " 3581 ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
3415 "qla2x00_quiesce_needed ha = %p\n", 3582 "Quiescence mode scheduled.\n");
3416 base_vha->host_no, ha));
3417 qla82xx_device_state_handler(base_vha); 3583 qla82xx_device_state_handler(base_vha);
3418 clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags); 3584 clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags);
3419 if (!ha->flags.quiesce_owner) { 3585 if (!ha->flags.quiesce_owner) {
@@ -3423,17 +3589,20 @@ qla2x00_do_dpc(void *data)
3423 qla82xx_clear_qsnt_ready(base_vha); 3589 qla82xx_clear_qsnt_ready(base_vha);
3424 qla82xx_idc_unlock(ha); 3590 qla82xx_idc_unlock(ha);
3425 } 3591 }
3592 ql_dbg(ql_dbg_dpc, base_vha, 0x400a,
3593 "Quiescence mode end.\n");
3426 } 3594 }
3427 3595
3428 if (test_and_clear_bit(RESET_MARKER_NEEDED, 3596 if (test_and_clear_bit(RESET_MARKER_NEEDED,
3429 &base_vha->dpc_flags) && 3597 &base_vha->dpc_flags) &&
3430 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) { 3598 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
3431 3599
3432 DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n", 3600 ql_dbg(ql_dbg_dpc, base_vha, 0x400b,
3433 base_vha->host_no)); 3601 "Reset marker scheduled.\n");
3434
3435 qla2x00_rst_aen(base_vha); 3602 qla2x00_rst_aen(base_vha);
3436 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags); 3603 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
3604 ql_dbg(ql_dbg_dpc, base_vha, 0x400c,
3605 "Reset marker end.\n");
3437 } 3606 }
3438 3607
3439 /* Retry each device up to login retry count */ 3608 /* Retry each device up to login retry count */
@@ -3442,19 +3611,18 @@ qla2x00_do_dpc(void *data)
3442 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) && 3611 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
3443 atomic_read(&base_vha->loop_state) != LOOP_DOWN) { 3612 atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
3444 3613
3445 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n", 3614 ql_dbg(ql_dbg_dpc, base_vha, 0x400d,
3446 base_vha->host_no)); 3615 "Relogin scheduled.\n");
3447 qla2x00_relogin(base_vha); 3616 qla2x00_relogin(base_vha);
3448 3617 ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
3449 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n", 3618 "Relogin end.\n");
3450 base_vha->host_no));
3451 } 3619 }
3452 3620
3453 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, 3621 if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
3454 &base_vha->dpc_flags)) { 3622 &base_vha->dpc_flags)) {
3455 3623
3456 DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n", 3624 ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
3457 base_vha->host_no)); 3625 "Loop resync scheduled.\n");
3458 3626
3459 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, 3627 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
3460 &base_vha->dpc_flags))) { 3628 &base_vha->dpc_flags))) {
@@ -3465,8 +3633,8 @@ qla2x00_do_dpc(void *data)
3465 &base_vha->dpc_flags); 3633 &base_vha->dpc_flags);
3466 } 3634 }
3467 3635
3468 DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n", 3636 ql_dbg(ql_dbg_dpc, base_vha, 0x4010,
3469 base_vha->host_no)); 3637 "Loop resync end.\n");
3470 } 3638 }
3471 3639
3472 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) && 3640 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
@@ -3489,7 +3657,8 @@ qla2x00_do_dpc(void *data)
3489 } /* End of while(1) */ 3657 } /* End of while(1) */
3490 __set_current_state(TASK_RUNNING); 3658 __set_current_state(TASK_RUNNING);
3491 3659
3492 DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no)); 3660 ql_dbg(ql_dbg_dpc, base_vha, 0x4011,
3661 "DPC handler exiting.\n");
3493 3662
3494 /* 3663 /*
3495 * Make sure that nobody tries to wake us up again. 3664 * Make sure that nobody tries to wake us up again.
@@ -3596,9 +3765,11 @@ void
3596qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp) 3765qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
3597{ 3766{
3598 if (atomic_read(&sp->ref_count) == 0) { 3767 if (atomic_read(&sp->ref_count) == 0) {
3599 DEBUG2(qla_printk(KERN_WARNING, ha, 3768 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
3600 "SP reference-count to ZERO -- sp=%p\n", sp)); 3769 "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
3601 DEBUG2(BUG()); 3770 sp, sp->cmd);
3771 if (ql2xextended_error_logging & ql_dbg_io)
3772 BUG();
3602 return; 3773 return;
3603 } 3774 }
3604 if (!atomic_dec_and_test(&sp->ref_count)) 3775 if (!atomic_dec_and_test(&sp->ref_count))
@@ -3626,6 +3797,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
3626 struct req_que *req; 3797 struct req_que *req;
3627 3798
3628 if (ha->flags.eeh_busy) { 3799 if (ha->flags.eeh_busy) {
3800 ql_dbg(ql_dbg_timer, vha, 0x6000,
3801 "EEH = %d, restarting timer.\n",
3802 ha->flags.eeh_busy);
3629 qla2x00_restart_timer(vha, WATCH_INTERVAL); 3803 qla2x00_restart_timer(vha, WATCH_INTERVAL);
3630 return; 3804 return;
3631 } 3805 }
@@ -3650,9 +3824,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
3650 if (atomic_read(&vha->loop_down_timer) == 3824 if (atomic_read(&vha->loop_down_timer) ==
3651 vha->loop_down_abort_time) { 3825 vha->loop_down_abort_time) {
3652 3826
3653 DEBUG(printk("scsi(%ld): Loop Down - aborting the " 3827 ql_log(ql_log_info, vha, 0x6008,
3654 "queues before time expire\n", 3828 "Loop down - aborting the queues before time expires.\n");
3655 vha->host_no));
3656 3829
3657 if (!IS_QLA2100(ha) && vha->link_down_timeout) 3830 if (!IS_QLA2100(ha) && vha->link_down_timeout)
3658 atomic_set(&vha->loop_state, LOOP_DEAD); 3831 atomic_set(&vha->loop_state, LOOP_DEAD);
@@ -3697,10 +3870,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
3697 /* if the loop has been down for 4 minutes, reinit adapter */ 3870 /* if the loop has been down for 4 minutes, reinit adapter */
3698 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) { 3871 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
3699 if (!(vha->device_flags & DFLG_NO_CABLE)) { 3872 if (!(vha->device_flags & DFLG_NO_CABLE)) {
3700 DEBUG(printk("scsi(%ld): Loop down - " 3873 ql_log(ql_log_warn, vha, 0x6009,
3701 "aborting ISP.\n",
3702 vha->host_no));
3703 qla_printk(KERN_WARNING, ha,
3704 "Loop down - aborting ISP.\n"); 3874 "Loop down - aborting ISP.\n");
3705 3875
3706 if (IS_QLA82XX(ha)) 3876 if (IS_QLA82XX(ha))
@@ -3711,9 +3881,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
3711 &vha->dpc_flags); 3881 &vha->dpc_flags);
3712 } 3882 }
3713 } 3883 }
3714 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n", 3884 ql_dbg(ql_dbg_timer, vha, 0x600a,
3715 vha->host_no, 3885 "Loop down - seconds remaining %d.\n",
3716 atomic_read(&vha->loop_down_timer))); 3886 atomic_read(&vha->loop_down_timer));
3717 } 3887 }
3718 3888
3719 /* Check if beacon LED needs to be blinked for physical host only */ 3889 /* Check if beacon LED needs to be blinked for physical host only */
@@ -3736,8 +3906,27 @@ qla2x00_timer(scsi_qla_host_t *vha)
3736 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) || 3906 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
3737 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || 3907 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
3738 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) || 3908 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
3739 test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) 3909 test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) {
3910 ql_dbg(ql_dbg_timer, vha, 0x600b,
3911 "isp_abort_needed=%d loop_resync_needed=%d "
3912 "fcport_update_needed=%d start_dpc=%d "
3913 "reset_marker_needed=%d",
3914 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags),
3915 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags),
3916 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags),
3917 start_dpc,
3918 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags));
3919 ql_dbg(ql_dbg_timer, vha, 0x600c,
3920 "beacon_blink_needed=%d isp_unrecoverable=%d "
3921 "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
3922 "relogin_needed=%d.\n",
3923 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
3924 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
3925 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
3926 test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
3927 test_bit(RELOGIN_NEEDED, &vha->dpc_flags));
3740 qla2xxx_wake_dpc(vha); 3928 qla2xxx_wake_dpc(vha);
3929 }
3741 3930
3742 qla2x00_restart_timer(vha, WATCH_INTERVAL); 3931 qla2x00_restart_timer(vha, WATCH_INTERVAL);
3743} 3932}
@@ -3806,8 +3995,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
3806 goto out; 3995 goto out;
3807 3996
3808 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { 3997 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
3809 DEBUG2(printk("scsi(%ld): Failed to load firmware image " 3998 ql_log(ql_log_warn, vha, 0x0063,
3810 "(%s).\n", vha->host_no, blob->name)); 3999 "Failed to load firmware image (%s).\n", blob->name);
3811 blob->fw = NULL; 4000 blob->fw = NULL;
3812 blob = NULL; 4001 blob = NULL;
3813 goto out; 4002 goto out;
@@ -3836,8 +4025,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3836 scsi_qla_host_t *vha = pci_get_drvdata(pdev); 4025 scsi_qla_host_t *vha = pci_get_drvdata(pdev);
3837 struct qla_hw_data *ha = vha->hw; 4026 struct qla_hw_data *ha = vha->hw;
3838 4027
3839 DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n", 4028 ql_dbg(ql_dbg_aer, vha, 0x9000,
3840 state)); 4029 "PCI error detected, state %x.\n", state);
3841 4030
3842 switch (state) { 4031 switch (state) {
3843 case pci_channel_io_normal: 4032 case pci_channel_io_normal:
@@ -3850,9 +4039,9 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3850 ha->flags.isp82xx_fw_hung = 1; 4039 ha->flags.isp82xx_fw_hung = 1;
3851 if (ha->flags.mbox_busy) { 4040 if (ha->flags.mbox_busy) {
3852 ha->flags.mbox_int = 1; 4041 ha->flags.mbox_int = 1;
3853 DEBUG2(qla_printk(KERN_ERR, ha, 4042 ql_dbg(ql_dbg_aer, vha, 0x9001,
3854 "Due to pci channel io frozen, doing premature " 4043 "Due to pci channel io frozen, doing premature "
3855 "completion of mbx command\n")); 4044 "completion of mbx command.\n");
3856 complete(&ha->mbx_intr_comp); 4045 complete(&ha->mbx_intr_comp);
3857 } 4046 }
3858 } 4047 }
@@ -3900,8 +4089,8 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
3900 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4089 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3901 4090
3902 if (risc_paused) { 4091 if (risc_paused) {
3903 qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, " 4092 ql_log(ql_log_info, base_vha, 0x9003,
3904 "Dumping firmware!\n"); 4093 "RISC paused -- mmio_enabled, Dumping firmware.\n");
3905 ha->isp_ops->fw_dump(base_vha, 0); 4094 ha->isp_ops->fw_dump(base_vha, 0);
3906 4095
3907 return PCI_ERS_RESULT_NEED_RESET; 4096 return PCI_ERS_RESULT_NEED_RESET;
@@ -3917,8 +4106,8 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3917 int fn; 4106 int fn;
3918 struct pci_dev *other_pdev = NULL; 4107 struct pci_dev *other_pdev = NULL;
3919 4108
3920 DEBUG17(qla_printk(KERN_INFO, ha, 4109 ql_dbg(ql_dbg_aer, base_vha, 0x9006,
3921 "scsi(%ld): In qla82xx_error_recovery\n", base_vha->host_no)); 4110 "Entered %s.\n", __func__);
3922 4111
3923 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 4112 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3924 4113
@@ -3932,8 +4121,8 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3932 fn = PCI_FUNC(ha->pdev->devfn); 4121 fn = PCI_FUNC(ha->pdev->devfn);
3933 while (fn > 0) { 4122 while (fn > 0) {
3934 fn--; 4123 fn--;
3935 DEBUG17(qla_printk(KERN_INFO, ha, 4124 ql_dbg(ql_dbg_aer, base_vha, 0x9007,
3936 "Finding pci device at function = 0x%x\n", fn)); 4125 "Finding pci device at function = 0x%x.\n", fn);
3937 other_pdev = 4126 other_pdev =
3938 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), 4127 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
3939 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 4128 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
@@ -3942,9 +4131,9 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3942 if (!other_pdev) 4131 if (!other_pdev)
3943 continue; 4132 continue;
3944 if (atomic_read(&other_pdev->enable_cnt)) { 4133 if (atomic_read(&other_pdev->enable_cnt)) {
3945 DEBUG17(qla_printk(KERN_INFO, ha, 4134 ql_dbg(ql_dbg_aer, base_vha, 0x9008,
3946 "Found PCI func available and enabled at 0x%x\n", 4135 "Found PCI func available and enable at 0x%x.\n",
3947 fn)); 4136 fn);
3948 pci_dev_put(other_pdev); 4137 pci_dev_put(other_pdev);
3949 break; 4138 break;
3950 } 4139 }
@@ -3953,8 +4142,9 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3953 4142
3954 if (!fn) { 4143 if (!fn) {
3955 /* Reset owner */ 4144 /* Reset owner */
3956 DEBUG17(qla_printk(KERN_INFO, ha, 4145 ql_dbg(ql_dbg_aer, base_vha, 0x9009,
3957 "This devfn is reset owner = 0x%x\n", ha->pdev->devfn)); 4146 "This devfn is reset owner = 0x%x.\n",
4147 ha->pdev->devfn);
3958 qla82xx_idc_lock(ha); 4148 qla82xx_idc_lock(ha);
3959 4149
3960 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 4150 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
@@ -3964,8 +4154,8 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3964 QLA82XX_IDC_VERSION); 4154 QLA82XX_IDC_VERSION);
3965 4155
3966 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 4156 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3967 DEBUG17(qla_printk(KERN_INFO, ha, 4157 ql_dbg(ql_dbg_aer, base_vha, 0x900a,
3968 "drv_active = 0x%x\n", drv_active)); 4158 "drv_active = 0x%x.\n", drv_active);
3969 4159
3970 qla82xx_idc_unlock(ha); 4160 qla82xx_idc_unlock(ha);
3971 /* Reset if device is not already reset 4161 /* Reset if device is not already reset
@@ -3978,12 +4168,14 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3978 qla82xx_idc_lock(ha); 4168 qla82xx_idc_lock(ha);
3979 4169
3980 if (rval != QLA_SUCCESS) { 4170 if (rval != QLA_SUCCESS) {
3981 qla_printk(KERN_INFO, ha, "HW State: FAILED\n"); 4171 ql_log(ql_log_info, base_vha, 0x900b,
4172 "HW State: FAILED.\n");
3982 qla82xx_clear_drv_active(ha); 4173 qla82xx_clear_drv_active(ha);
3983 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 4174 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3984 QLA82XX_DEV_FAILED); 4175 QLA82XX_DEV_FAILED);
3985 } else { 4176 } else {
3986 qla_printk(KERN_INFO, ha, "HW State: READY\n"); 4177 ql_log(ql_log_info, base_vha, 0x900c,
4178 "HW State: READY.\n");
3987 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 4179 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3988 QLA82XX_DEV_READY); 4180 QLA82XX_DEV_READY);
3989 qla82xx_idc_unlock(ha); 4181 qla82xx_idc_unlock(ha);
@@ -3996,8 +4188,9 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3996 } 4188 }
3997 qla82xx_idc_unlock(ha); 4189 qla82xx_idc_unlock(ha);
3998 } else { 4190 } else {
3999 DEBUG17(qla_printk(KERN_INFO, ha, 4191 ql_dbg(ql_dbg_aer, base_vha, 0x900d,
4000 "This devfn is not reset owner = 0x%x\n", ha->pdev->devfn)); 4192 "This devfn is not reset owner = 0x%x.\n",
4193 ha->pdev->devfn);
4001 if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == 4194 if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
4002 QLA82XX_DEV_READY)) { 4195 QLA82XX_DEV_READY)) {
4003 ha->flags.isp82xx_fw_hung = 0; 4196 ha->flags.isp82xx_fw_hung = 0;
@@ -4021,7 +4214,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
4021 struct rsp_que *rsp; 4214 struct rsp_que *rsp;
4022 int rc, retries = 10; 4215 int rc, retries = 10;
4023 4216
4024 DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n")); 4217 ql_dbg(ql_dbg_aer, base_vha, 0x9004,
4218 "Slot Reset.\n");
4025 4219
4026 /* Workaround: qla2xxx driver which access hardware earlier 4220 /* Workaround: qla2xxx driver which access hardware earlier
4027 * needs error state to be pci_channel_io_online. 4221 * needs error state to be pci_channel_io_online.
@@ -4042,7 +4236,7 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
4042 rc = pci_enable_device(pdev); 4236 rc = pci_enable_device(pdev);
4043 4237
4044 if (rc) { 4238 if (rc) {
4045 qla_printk(KERN_WARNING, ha, 4239 ql_log(ql_log_warn, base_vha, 0x9005,
4046 "Can't re-enable PCI device after reset.\n"); 4240 "Can't re-enable PCI device after reset.\n");
4047 goto exit_slot_reset; 4241 goto exit_slot_reset;
4048 } 4242 }
@@ -4072,8 +4266,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
4072 4266
4073 4267
4074exit_slot_reset: 4268exit_slot_reset:
4075 DEBUG17(qla_printk(KERN_WARNING, ha, 4269 ql_dbg(ql_dbg_aer, base_vha, 0x900e,
4076 "slot_reset-return:ret=%x\n", ret)); 4270 "slot_reset return %x.\n", ret);
4077 4271
4078 return ret; 4272 return ret;
4079} 4273}
@@ -4085,13 +4279,13 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
4085 struct qla_hw_data *ha = base_vha->hw; 4279 struct qla_hw_data *ha = base_vha->hw;
4086 int ret; 4280 int ret;
4087 4281
4088 DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n")); 4282 ql_dbg(ql_dbg_aer, base_vha, 0x900f,
4283 "pci_resume.\n");
4089 4284
4090 ret = qla2x00_wait_for_hba_online(base_vha); 4285 ret = qla2x00_wait_for_hba_online(base_vha);
4091 if (ret != QLA_SUCCESS) { 4286 if (ret != QLA_SUCCESS) {
4092 qla_printk(KERN_ERR, ha, 4287 ql_log(ql_log_fatal, base_vha, 0x9002,
4093 "the device failed to resume I/O " 4288 "The device failed to resume I/O from slot/link_reset.\n");
4094 "from slot/link_reset");
4095 } 4289 }
4096 4290
4097 pci_cleanup_aer_uncorrect_error_status(pdev); 4291 pci_cleanup_aer_uncorrect_error_status(pdev);
@@ -4155,8 +4349,8 @@ qla2x00_module_init(void)
4155 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, 4349 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
4156 SLAB_HWCACHE_ALIGN, NULL); 4350 SLAB_HWCACHE_ALIGN, NULL);
4157 if (srb_cachep == NULL) { 4351 if (srb_cachep == NULL) {
4158 printk(KERN_ERR 4352 ql_log(ql_log_fatal, NULL, 0x0001,
4159 "qla2xxx: Unable to allocate SRB cache...Failing load!\n"); 4353 "Unable to allocate SRB cache...Failing load!.\n");
4160 return -ENOMEM; 4354 return -ENOMEM;
4161 } 4355 }
4162 4356
@@ -4169,13 +4363,15 @@ qla2x00_module_init(void)
4169 fc_attach_transport(&qla2xxx_transport_functions); 4363 fc_attach_transport(&qla2xxx_transport_functions);
4170 if (!qla2xxx_transport_template) { 4364 if (!qla2xxx_transport_template) {
4171 kmem_cache_destroy(srb_cachep); 4365 kmem_cache_destroy(srb_cachep);
4366 ql_log(ql_log_fatal, NULL, 0x0002,
4367 "fc_attach_transport failed...Failing load!.\n");
4172 return -ENODEV; 4368 return -ENODEV;
4173 } 4369 }
4174 4370
4175 apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops); 4371 apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
4176 if (apidev_major < 0) { 4372 if (apidev_major < 0) {
4177 printk(KERN_WARNING "qla2xxx: Unable to register char device " 4373 ql_log(ql_log_fatal, NULL, 0x0003,
4178 "%s\n", QLA2XXX_APIDEV); 4374 "Unable to register char device %s.\n", QLA2XXX_APIDEV);
4179 } 4375 }
4180 4376
4181 qla2xxx_transport_vport_template = 4377 qla2xxx_transport_vport_template =
@@ -4183,16 +4379,21 @@ qla2x00_module_init(void)
4183 if (!qla2xxx_transport_vport_template) { 4379 if (!qla2xxx_transport_vport_template) {
4184 kmem_cache_destroy(srb_cachep); 4380 kmem_cache_destroy(srb_cachep);
4185 fc_release_transport(qla2xxx_transport_template); 4381 fc_release_transport(qla2xxx_transport_template);
4382 ql_log(ql_log_fatal, NULL, 0x0004,
4383 "fc_attach_transport vport failed...Failing load!.\n");
4186 return -ENODEV; 4384 return -ENODEV;
4187 } 4385 }
4188 4386 ql_log(ql_log_info, NULL, 0x0005,
4189 printk(KERN_INFO "QLogic Fibre Channel HBA Driver: %s\n", 4387 "QLogic Fibre Channel HBA Driver: %s.\n",
4190 qla2x00_version_str); 4388 qla2x00_version_str);
4191 ret = pci_register_driver(&qla2xxx_pci_driver); 4389 ret = pci_register_driver(&qla2xxx_pci_driver);
4192 if (ret) { 4390 if (ret) {
4193 kmem_cache_destroy(srb_cachep); 4391 kmem_cache_destroy(srb_cachep);
4194 fc_release_transport(qla2xxx_transport_template); 4392 fc_release_transport(qla2xxx_transport_template);
4195 fc_release_transport(qla2xxx_transport_vport_template); 4393 fc_release_transport(qla2xxx_transport_vport_template);
4394 ql_log(ql_log_fatal, NULL, 0x0006,
4395 "pci_register_driver failed...ret=%d Failing load!.\n",
4396 ret);
4196 } 4397 }
4197 return ret; 4398 return ret;
4198} 4399}
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 693647661ed1..eff13563c82d 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -189,6 +189,7 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
189 uint16_t word; 189 uint16_t word;
190 uint32_t nv_cmd, wait_cnt; 190 uint32_t nv_cmd, wait_cnt;
191 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 191 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
192 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
192 193
193 qla2x00_nv_write(ha, NVR_DATA_OUT); 194 qla2x00_nv_write(ha, NVR_DATA_OUT);
194 qla2x00_nv_write(ha, 0); 195 qla2x00_nv_write(ha, 0);
@@ -220,8 +221,8 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
220 wait_cnt = NVR_WAIT_CNT; 221 wait_cnt = NVR_WAIT_CNT;
221 do { 222 do {
222 if (!--wait_cnt) { 223 if (!--wait_cnt) {
223 DEBUG9_10(qla_printk(KERN_WARNING, ha, 224 ql_dbg(ql_dbg_user, vha, 0x708d,
224 "NVRAM didn't go ready...\n")); 225 "NVRAM didn't go ready...\n");
225 break; 226 break;
226 } 227 }
227 NVRAM_DELAY(); 228 NVRAM_DELAY();
@@ -308,6 +309,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
308 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 309 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
309 uint32_t word, wait_cnt; 310 uint32_t word, wait_cnt;
310 uint16_t wprot, wprot_old; 311 uint16_t wprot, wprot_old;
312 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
311 313
312 /* Clear NVRAM write protection. */ 314 /* Clear NVRAM write protection. */
313 ret = QLA_FUNCTION_FAILED; 315 ret = QLA_FUNCTION_FAILED;
@@ -350,8 +352,8 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
350 wait_cnt = NVR_WAIT_CNT; 352 wait_cnt = NVR_WAIT_CNT;
351 do { 353 do {
352 if (!--wait_cnt) { 354 if (!--wait_cnt) {
353 DEBUG9_10(qla_printk(KERN_WARNING, ha, 355 ql_dbg(ql_dbg_user, vha, 0x708e,
354 "NVRAM didn't go ready...\n")); 356 "NVRAM didn't go ready...\n");
355 break; 357 break;
356 } 358 }
357 NVRAM_DELAY(); 359 NVRAM_DELAY();
@@ -371,6 +373,7 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
371{ 373{
372 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 374 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
373 uint32_t word, wait_cnt; 375 uint32_t word, wait_cnt;
376 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
374 377
375 if (stat != QLA_SUCCESS) 378 if (stat != QLA_SUCCESS)
376 return; 379 return;
@@ -409,8 +412,8 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
409 wait_cnt = NVR_WAIT_CNT; 412 wait_cnt = NVR_WAIT_CNT;
410 do { 413 do {
411 if (!--wait_cnt) { 414 if (!--wait_cnt) {
412 DEBUG9_10(qla_printk(KERN_WARNING, ha, 415 ql_dbg(ql_dbg_user, vha, 0x708f,
413 "NVRAM didn't go ready...\n")); 416 "NVRAM didn't go ready...\n");
414 break; 417 break;
415 } 418 }
416 NVRAM_DELAY(); 419 NVRAM_DELAY();
@@ -607,9 +610,10 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
607 for (chksum = 0; cnt; cnt--) 610 for (chksum = 0; cnt; cnt--)
608 chksum += le16_to_cpu(*wptr++); 611 chksum += le16_to_cpu(*wptr++);
609 if (chksum) { 612 if (chksum) {
610 qla_printk(KERN_ERR, ha, 613 ql_log(ql_log_fatal, vha, 0x0045,
611 "Inconsistent FLTL detected: checksum=0x%x.\n", chksum); 614 "Inconsistent FLTL detected: checksum=0x%x.\n", chksum);
612 qla2x00_dump_buffer(buf, sizeof(struct qla_flt_location)); 615 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010e,
616 buf, sizeof(struct qla_flt_location));
613 return QLA_FUNCTION_FAILED; 617 return QLA_FUNCTION_FAILED;
614 } 618 }
615 619
@@ -618,7 +622,9 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
618 *start = (le16_to_cpu(fltl->start_hi) << 16 | 622 *start = (le16_to_cpu(fltl->start_hi) << 16 |
619 le16_to_cpu(fltl->start_lo)) >> 2; 623 le16_to_cpu(fltl->start_lo)) >> 2;
620end: 624end:
621 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLTL[%s] = 0x%x.\n", loc, *start)); 625 ql_dbg(ql_dbg_init, vha, 0x0046,
626 "FLTL[%s] = 0x%x.\n",
627 loc, *start);
622 return QLA_SUCCESS; 628 return QLA_SUCCESS;
623} 629}
624 630
@@ -685,10 +691,10 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
685 if (*wptr == __constant_cpu_to_le16(0xffff)) 691 if (*wptr == __constant_cpu_to_le16(0xffff))
686 goto no_flash_data; 692 goto no_flash_data;
687 if (flt->version != __constant_cpu_to_le16(1)) { 693 if (flt->version != __constant_cpu_to_le16(1)) {
688 DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported FLT detected: " 694 ql_log(ql_log_warn, vha, 0x0047,
689 "version=0x%x length=0x%x checksum=0x%x.\n", 695 "Unsupported FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
690 le16_to_cpu(flt->version), le16_to_cpu(flt->length), 696 le16_to_cpu(flt->version), le16_to_cpu(flt->length),
691 le16_to_cpu(flt->checksum))); 697 le16_to_cpu(flt->checksum));
692 goto no_flash_data; 698 goto no_flash_data;
693 } 699 }
694 700
@@ -696,10 +702,10 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
696 for (chksum = 0; cnt; cnt--) 702 for (chksum = 0; cnt; cnt--)
697 chksum += le16_to_cpu(*wptr++); 703 chksum += le16_to_cpu(*wptr++);
698 if (chksum) { 704 if (chksum) {
699 DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FLT detected: " 705 ql_log(ql_log_fatal, vha, 0x0048,
700 "version=0x%x length=0x%x checksum=0x%x.\n", 706 "Inconsistent FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
701 le16_to_cpu(flt->version), le16_to_cpu(flt->length), 707 le16_to_cpu(flt->version), le16_to_cpu(flt->length),
702 chksum)); 708 le16_to_cpu(flt->checksum));
703 goto no_flash_data; 709 goto no_flash_data;
704 } 710 }
705 711
@@ -708,10 +714,11 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
708 for ( ; cnt; cnt--, region++) { 714 for ( ; cnt; cnt--, region++) {
709 /* Store addresses as DWORD offsets. */ 715 /* Store addresses as DWORD offsets. */
710 start = le32_to_cpu(region->start) >> 2; 716 start = le32_to_cpu(region->start) >> 2;
711 717 ql_dbg(ql_dbg_init, vha, 0x0049,
712 DEBUG3(qla_printk(KERN_DEBUG, ha, "FLT[%02x]: start=0x%x " 718 "FLT[%02x]: start=0x%x "
713 "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start, 719 "end=0x%x size=0x%x.\n", le32_to_cpu(region->code),
714 le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size))); 720 start, le32_to_cpu(region->end) >> 2,
721 le32_to_cpu(region->size));
715 722
716 switch (le32_to_cpu(region->code) & 0xff) { 723 switch (le32_to_cpu(region->code) & 0xff) {
717 case FLT_REG_FW: 724 case FLT_REG_FW:
@@ -796,12 +803,16 @@ no_flash_data:
796 ha->flt_region_npiv_conf = ha->flags.port0 ? 803 ha->flt_region_npiv_conf = ha->flags.port0 ?
797 def_npiv_conf0[def] : def_npiv_conf1[def]; 804 def_npiv_conf0[def] : def_npiv_conf1[def];
798done: 805done:
799 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x " 806 ql_dbg(ql_dbg_init, vha, 0x004a,
800 "vpd_nvram=0x%x vpd=0x%x nvram=0x%x fdt=0x%x flt=0x%x " 807 "FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x.\n",
801 "npiv=0x%x. fcp_prio_cfg=0x%x\n", loc, ha->flt_region_boot, 808 loc, ha->flt_region_boot,
802 ha->flt_region_fw, ha->flt_region_vpd_nvram, ha->flt_region_vpd, 809 ha->flt_region_fw, ha->flt_region_vpd_nvram,
803 ha->flt_region_nvram, ha->flt_region_fdt, ha->flt_region_flt, 810 ha->flt_region_vpd);
804 ha->flt_region_npiv_conf, ha->flt_region_fcp_prio)); 811 ql_dbg(ql_dbg_init, vha, 0x004b,
812 "nvram=0x%x fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n",
813 ha->flt_region_nvram,
814 ha->flt_region_fdt, ha->flt_region_flt,
815 ha->flt_region_npiv_conf, ha->flt_region_fcp_prio);
805} 816}
806 817
807static void 818static void
@@ -833,10 +844,12 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
833 cnt++) 844 cnt++)
834 chksum += le16_to_cpu(*wptr++); 845 chksum += le16_to_cpu(*wptr++);
835 if (chksum) { 846 if (chksum) {
836 DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FDT detected: " 847 ql_dbg(ql_dbg_init, vha, 0x004c,
837 "checksum=0x%x id=%c version=0x%x.\n", chksum, fdt->sig[0], 848 "Inconsistent FDT detected:"
838 le16_to_cpu(fdt->version))); 849 " checksum=0x%x id=%c version0x%x.\n", chksum,
839 DEBUG9(qla2x00_dump_buffer((uint8_t *)fdt, sizeof(*fdt))); 850 fdt->sig[0], le16_to_cpu(fdt->version));
851 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0113,
852 (uint8_t *)fdt, sizeof(*fdt));
840 goto no_flash_data; 853 goto no_flash_data;
841 } 854 }
842 855
@@ -890,11 +903,12 @@ no_flash_data:
890 break; 903 break;
891 } 904 }
892done: 905done:
893 DEBUG2(qla_printk(KERN_DEBUG, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x " 906 ql_dbg(ql_dbg_init, vha, 0x004d,
894 "pro=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid, 907 "FDT[%x]: (0x%x/0x%x) erase=0x%x "
908 "pr=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
895 ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd, 909 ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
896 ha->fdt_unprotect_sec_cmd, ha->fdt_wrt_disable, 910 ha->fdt_wrt_disable, ha->fdt_block_size);
897 ha->fdt_block_size)); 911
898} 912}
899 913
900static void 914static void
@@ -919,6 +933,10 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha)
919 ha->nx_dev_init_timeout = le32_to_cpu(*wptr++); 933 ha->nx_dev_init_timeout = le32_to_cpu(*wptr++);
920 ha->nx_reset_timeout = le32_to_cpu(*wptr); 934 ha->nx_reset_timeout = le32_to_cpu(*wptr);
921 } 935 }
936 ql_dbg(ql_dbg_init, vha, 0x004e,
937 "nx_dev_init_timeout=%d "
938 "nx_reset_timeout=%d.\n", ha->nx_dev_init_timeout,
939 ha->nx_reset_timeout);
922 return; 940 return;
923} 941}
924 942
@@ -963,17 +981,18 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
963 if (hdr.version == __constant_cpu_to_le16(0xffff)) 981 if (hdr.version == __constant_cpu_to_le16(0xffff))
964 return; 982 return;
965 if (hdr.version != __constant_cpu_to_le16(1)) { 983 if (hdr.version != __constant_cpu_to_le16(1)) {
966 DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported NPIV-Config " 984 ql_dbg(ql_dbg_user, vha, 0x7090,
985 "Unsupported NPIV-Config "
967 "detected: version=0x%x entries=0x%x checksum=0x%x.\n", 986 "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
968 le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries), 987 le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
969 le16_to_cpu(hdr.checksum))); 988 le16_to_cpu(hdr.checksum));
970 return; 989 return;
971 } 990 }
972 991
973 data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL); 992 data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL);
974 if (!data) { 993 if (!data) {
975 DEBUG2(qla_printk(KERN_INFO, ha, "NPIV-Config: Unable to " 994 ql_log(ql_log_warn, vha, 0x7091,
976 "allocate memory.\n")); 995 "Unable to allocate memory for data.\n");
977 return; 996 return;
978 } 997 }
979 998
@@ -985,10 +1004,11 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
985 for (wptr = data, chksum = 0; cnt; cnt--) 1004 for (wptr = data, chksum = 0; cnt; cnt--)
986 chksum += le16_to_cpu(*wptr++); 1005 chksum += le16_to_cpu(*wptr++);
987 if (chksum) { 1006 if (chksum) {
988 DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent NPIV-Config " 1007 ql_dbg(ql_dbg_user, vha, 0x7092,
1008 "Inconsistent NPIV-Config "
989 "detected: version=0x%x entries=0x%x checksum=0x%x.\n", 1009 "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
990 le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries), 1010 le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
991 chksum)); 1011 le16_to_cpu(hdr.checksum));
992 goto done; 1012 goto done;
993 } 1013 }
994 1014
@@ -1014,21 +1034,22 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
1014 vid.port_name = wwn_to_u64(entry->port_name); 1034 vid.port_name = wwn_to_u64(entry->port_name);
1015 vid.node_name = wwn_to_u64(entry->node_name); 1035 vid.node_name = wwn_to_u64(entry->node_name);
1016 1036
1017 DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx " 1037 ql_dbg(ql_dbg_user, vha, 0x7093,
1018 "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt, 1038 "NPIV[%02x]: wwpn=%llx "
1019 (unsigned long long)vid.port_name, 1039 "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
1020 (unsigned long long)vid.node_name, 1040 (unsigned long long)vid.port_name,
1021 le16_to_cpu(entry->vf_id), 1041 (unsigned long long)vid.node_name,
1022 entry->q_qos, entry->f_qos)); 1042 le16_to_cpu(entry->vf_id),
1043 entry->q_qos, entry->f_qos);
1023 1044
1024 if (i < QLA_PRECONFIG_VPORTS) { 1045 if (i < QLA_PRECONFIG_VPORTS) {
1025 vport = fc_vport_create(vha->host, 0, &vid); 1046 vport = fc_vport_create(vha->host, 0, &vid);
1026 if (!vport) 1047 if (!vport)
1027 qla_printk(KERN_INFO, ha, 1048 ql_log(ql_log_warn, vha, 0x7094,
1028 "NPIV-Config: Failed to create vport [%02x]: " 1049 "NPIV-Config Failed to create vport [%02x]: "
1029 "wwpn=%llx wwnn=%llx.\n", cnt, 1050 "wwpn=%llx wwnn=%llx.\n", cnt,
1030 (unsigned long long)vid.port_name, 1051 (unsigned long long)vid.port_name,
1031 (unsigned long long)vid.node_name); 1052 (unsigned long long)vid.node_name);
1032 } 1053 }
1033 } 1054 }
1034done: 1055done:
@@ -1127,9 +1148,10 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1127 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 1148 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
1128 &optrom_dma, GFP_KERNEL); 1149 &optrom_dma, GFP_KERNEL);
1129 if (!optrom) { 1150 if (!optrom) {
1130 qla_printk(KERN_DEBUG, ha, 1151 ql_log(ql_log_warn, vha, 0x7095,
1131 "Unable to allocate memory for optrom burst write " 1152 "Unable to allocate "
1132 "(%x KB).\n", OPTROM_BURST_SIZE / 1024); 1153 "memory for optrom burst write (%x KB).\n",
1154 OPTROM_BURST_SIZE / 1024);
1133 } 1155 }
1134 } 1156 }
1135 1157
@@ -1138,7 +1160,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1138 1160
1139 ret = qla24xx_unprotect_flash(vha); 1161 ret = qla24xx_unprotect_flash(vha);
1140 if (ret != QLA_SUCCESS) { 1162 if (ret != QLA_SUCCESS) {
1141 qla_printk(KERN_WARNING, ha, 1163 ql_log(ql_log_warn, vha, 0x7096,
1142 "Unable to unprotect flash for update.\n"); 1164 "Unable to unprotect flash for update.\n");
1143 goto done; 1165 goto done;
1144 } 1166 }
@@ -1156,9 +1178,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1156 0xff0000) | ((fdata >> 16) & 0xff)); 1178 0xff0000) | ((fdata >> 16) & 0xff));
1157 ret = qla24xx_erase_sector(vha, fdata); 1179 ret = qla24xx_erase_sector(vha, fdata);
1158 if (ret != QLA_SUCCESS) { 1180 if (ret != QLA_SUCCESS) {
1159 DEBUG9(qla_printk(KERN_WARNING, ha, 1181 ql_dbg(ql_dbg_user, vha, 0x7007,
1160 "Unable to erase sector: address=%x.\n", 1182 "Unable to erase erase sector: address=%x.\n",
1161 faddr)); 1183 faddr);
1162 break; 1184 break;
1163 } 1185 }
1164 } 1186 }
@@ -1172,12 +1194,12 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1172 flash_data_addr(ha, faddr), 1194 flash_data_addr(ha, faddr),
1173 OPTROM_BURST_DWORDS); 1195 OPTROM_BURST_DWORDS);
1174 if (ret != QLA_SUCCESS) { 1196 if (ret != QLA_SUCCESS) {
1175 qla_printk(KERN_WARNING, ha, 1197 ql_log(ql_log_warn, vha, 0x7097,
1176 "Unable to burst-write optrom segment " 1198 "Unable to burst-write optrom segment "
1177 "(%x/%x/%llx).\n", ret, 1199 "(%x/%x/%llx).\n", ret,
1178 flash_data_addr(ha, faddr), 1200 flash_data_addr(ha, faddr),
1179 (unsigned long long)optrom_dma); 1201 (unsigned long long)optrom_dma);
1180 qla_printk(KERN_WARNING, ha, 1202 ql_log(ql_log_warn, vha, 0x7098,
1181 "Reverting to slow-write.\n"); 1203 "Reverting to slow-write.\n");
1182 1204
1183 dma_free_coherent(&ha->pdev->dev, 1205 dma_free_coherent(&ha->pdev->dev,
@@ -1194,9 +1216,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1194 ret = qla24xx_write_flash_dword(ha, 1216 ret = qla24xx_write_flash_dword(ha,
1195 flash_data_addr(ha, faddr), cpu_to_le32(*dwptr)); 1217 flash_data_addr(ha, faddr), cpu_to_le32(*dwptr));
1196 if (ret != QLA_SUCCESS) { 1218 if (ret != QLA_SUCCESS) {
1197 DEBUG9(printk("%s(%ld) Unable to program flash " 1219 ql_dbg(ql_dbg_user, vha, 0x7006,
1198 "address=%x data=%x.\n", __func__, 1220 "Unable to program flash address=%x data=%x.\n",
1199 vha->host_no, faddr, *dwptr)); 1221 faddr, *dwptr);
1200 break; 1222 break;
1201 } 1223 }
1202 1224
@@ -1211,7 +1233,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1211 1233
1212 ret = qla24xx_protect_flash(vha); 1234 ret = qla24xx_protect_flash(vha);
1213 if (ret != QLA_SUCCESS) 1235 if (ret != QLA_SUCCESS)
1214 qla_printk(KERN_WARNING, ha, 1236 ql_log(ql_log_warn, vha, 0x7099,
1215 "Unable to protect flash after update.\n"); 1237 "Unable to protect flash after update.\n");
1216done: 1238done:
1217 if (optrom) 1239 if (optrom)
@@ -1324,9 +1346,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1324 ret = qla24xx_write_flash_dword(ha, 1346 ret = qla24xx_write_flash_dword(ha,
1325 nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr)); 1347 nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr));
1326 if (ret != QLA_SUCCESS) { 1348 if (ret != QLA_SUCCESS) {
1327 DEBUG9(qla_printk(KERN_WARNING, ha, 1349 ql_dbg(ql_dbg_user, vha, 0x709a,
1328 "Unable to program nvram address=%x data=%x.\n", 1350 "Unable to program nvram address=%x data=%x.\n",
1329 naddr, *dwptr)); 1351 naddr, *dwptr);
1330 break; 1352 break;
1331 } 1353 }
1332 } 1354 }
@@ -1476,7 +1498,7 @@ qla2x00_beacon_on(struct scsi_qla_host *vha)
1476 ha->fw_options[1] |= FO1_DISABLE_GPIO6_7; 1498 ha->fw_options[1] |= FO1_DISABLE_GPIO6_7;
1477 1499
1478 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { 1500 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
1479 qla_printk(KERN_WARNING, ha, 1501 ql_log(ql_log_warn, vha, 0x709b,
1480 "Unable to update fw options (beacon on).\n"); 1502 "Unable to update fw options (beacon on).\n");
1481 return QLA_FUNCTION_FAILED; 1503 return QLA_FUNCTION_FAILED;
1482 } 1504 }
@@ -1541,7 +1563,7 @@ qla2x00_beacon_off(struct scsi_qla_host *vha)
1541 1563
1542 rval = qla2x00_set_fw_options(vha, ha->fw_options); 1564 rval = qla2x00_set_fw_options(vha, ha->fw_options);
1543 if (rval != QLA_SUCCESS) 1565 if (rval != QLA_SUCCESS)
1544 qla_printk(KERN_WARNING, ha, 1566 ql_log(ql_log_warn, vha, 0x709c,
1545 "Unable to update fw options (beacon off).\n"); 1567 "Unable to update fw options (beacon off).\n");
1546 return rval; 1568 return rval;
1547} 1569}
@@ -1616,7 +1638,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
1616 1638
1617 if (qla2x00_get_fw_options(vha, ha->fw_options) != 1639 if (qla2x00_get_fw_options(vha, ha->fw_options) !=
1618 QLA_SUCCESS) { 1640 QLA_SUCCESS) {
1619 qla_printk(KERN_WARNING, ha, 1641 ql_log(ql_log_warn, vha, 0x7009,
1620 "Unable to update fw options (beacon on).\n"); 1642 "Unable to update fw options (beacon on).\n");
1621 return QLA_FUNCTION_FAILED; 1643 return QLA_FUNCTION_FAILED;
1622 } 1644 }
@@ -1670,14 +1692,14 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
1670 ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL; 1692 ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL;
1671 1693
1672 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { 1694 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
1673 qla_printk(KERN_WARNING, ha, 1695 ql_log(ql_log_warn, vha, 0x704d,
1674 "Unable to update fw options (beacon off).\n"); 1696 "Unable to update fw options (beacon on).\n");
1675 return QLA_FUNCTION_FAILED; 1697 return QLA_FUNCTION_FAILED;
1676 } 1698 }
1677 1699
1678 if (qla2x00_get_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { 1700 if (qla2x00_get_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
1679 qla_printk(KERN_WARNING, ha, 1701 ql_log(ql_log_warn, vha, 0x704e,
1680 "Unable to get fw options (beacon off).\n"); 1702 "Unable to update fw options (beacon on).\n");
1681 return QLA_FUNCTION_FAILED; 1703 return QLA_FUNCTION_FAILED;
1682 } 1704 }
1683 1705
@@ -2389,10 +2411,9 @@ try_fast:
2389 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 2411 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
2390 &optrom_dma, GFP_KERNEL); 2412 &optrom_dma, GFP_KERNEL);
2391 if (!optrom) { 2413 if (!optrom) {
2392 qla_printk(KERN_DEBUG, ha, 2414 ql_log(ql_log_warn, vha, 0x00cc,
2393 "Unable to allocate memory for optrom burst read " 2415 "Unable to allocate memory for optrom burst read (%x KB).\n",
2394 "(%x KB).\n", OPTROM_BURST_SIZE / 1024); 2416 OPTROM_BURST_SIZE / 1024);
2395
2396 goto slow_read; 2417 goto slow_read;
2397 } 2418 }
2398 2419
@@ -2407,12 +2428,11 @@ try_fast:
2407 rval = qla2x00_dump_ram(vha, optrom_dma, 2428 rval = qla2x00_dump_ram(vha, optrom_dma,
2408 flash_data_addr(ha, faddr), burst); 2429 flash_data_addr(ha, faddr), burst);
2409 if (rval) { 2430 if (rval) {
2410 qla_printk(KERN_WARNING, ha, 2431 ql_log(ql_log_warn, vha, 0x00f5,
2411 "Unable to burst-read optrom segment " 2432 "Unable to burst-read optrom segment (%x/%x/%llx).\n",
2412 "(%x/%x/%llx).\n", rval, 2433 rval, flash_data_addr(ha, faddr),
2413 flash_data_addr(ha, faddr),
2414 (unsigned long long)optrom_dma); 2434 (unsigned long long)optrom_dma);
2415 qla_printk(KERN_WARNING, ha, 2435 ql_log(ql_log_warn, vha, 0x00f6,
2416 "Reverting to slow-read.\n"); 2436 "Reverting to slow-read.\n");
2417 2437
2418 dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 2438 dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
@@ -2556,8 +2576,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2556 if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 || 2576 if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 ||
2557 qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) { 2577 qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) {
2558 /* No signature */ 2578 /* No signature */
2559 DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM " 2579 ql_log(ql_log_fatal, vha, 0x0050,
2560 "signature.\n")); 2580 "No matching ROM signature.\n");
2561 ret = QLA_FUNCTION_FAILED; 2581 ret = QLA_FUNCTION_FAILED;
2562 break; 2582 break;
2563 } 2583 }
@@ -2573,8 +2593,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2573 qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' || 2593 qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' ||
2574 qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') { 2594 qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') {
2575 /* Incorrect header. */ 2595 /* Incorrect header. */
2576 DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not " 2596 ql_log(ql_log_fatal, vha, 0x0051,
2577 "found pcir_adr=%x.\n", pcids)); 2597 "PCI data struct not found pcir_adr=%x.\n", pcids);
2578 ret = QLA_FUNCTION_FAILED; 2598 ret = QLA_FUNCTION_FAILED;
2579 break; 2599 break;
2580 } 2600 }
@@ -2588,8 +2608,9 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2588 qla2x00_read_flash_byte(ha, pcids + 0x12); 2608 qla2x00_read_flash_byte(ha, pcids + 0x12);
2589 ha->bios_revision[1] = 2609 ha->bios_revision[1] =
2590 qla2x00_read_flash_byte(ha, pcids + 0x13); 2610 qla2x00_read_flash_byte(ha, pcids + 0x13);
2591 DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n", 2611 ql_dbg(ql_dbg_init, vha, 0x0052,
2592 ha->bios_revision[1], ha->bios_revision[0])); 2612 "Read BIOS %d.%d.\n",
2613 ha->bios_revision[1], ha->bios_revision[0]);
2593 break; 2614 break;
2594 case ROM_CODE_TYPE_FCODE: 2615 case ROM_CODE_TYPE_FCODE:
2595 /* Open Firmware standard for PCI (FCode). */ 2616 /* Open Firmware standard for PCI (FCode). */
@@ -2602,12 +2623,14 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2602 qla2x00_read_flash_byte(ha, pcids + 0x12); 2623 qla2x00_read_flash_byte(ha, pcids + 0x12);
2603 ha->efi_revision[1] = 2624 ha->efi_revision[1] =
2604 qla2x00_read_flash_byte(ha, pcids + 0x13); 2625 qla2x00_read_flash_byte(ha, pcids + 0x13);
2605 DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n", 2626 ql_dbg(ql_dbg_init, vha, 0x0053,
2606 ha->efi_revision[1], ha->efi_revision[0])); 2627 "Read EFI %d.%d.\n",
2628 ha->efi_revision[1], ha->efi_revision[0]);
2607 break; 2629 break;
2608 default: 2630 default:
2609 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code " 2631 ql_log(ql_log_warn, vha, 0x0054,
2610 "type %x at pcids %x.\n", code_type, pcids)); 2632 "Unrecognized code type %x at pcids %x.\n",
2633 code_type, pcids);
2611 break; 2634 break;
2612 } 2635 }
2613 2636
@@ -2627,21 +2650,28 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2627 2650
2628 qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10, 2651 qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10,
2629 8); 2652 8);
2630 DEBUG3(qla_printk(KERN_DEBUG, ha, "dumping fw ver from " 2653 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010a,
2631 "flash:\n")); 2654 "Dumping fw "
2632 DEBUG3(qla2x00_dump_buffer((uint8_t *)dbyte, 8)); 2655 "ver from flash:.\n");
2656 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010b,
2657 (uint8_t *)dbyte, 8);
2633 2658
2634 if ((dcode[0] == 0xffff && dcode[1] == 0xffff && 2659 if ((dcode[0] == 0xffff && dcode[1] == 0xffff &&
2635 dcode[2] == 0xffff && dcode[3] == 0xffff) || 2660 dcode[2] == 0xffff && dcode[3] == 0xffff) ||
2636 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 2661 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
2637 dcode[3] == 0)) { 2662 dcode[3] == 0)) {
2638 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw " 2663 ql_log(ql_log_warn, vha, 0x0057,
2639 "revision at %x.\n", ha->flt_region_fw * 4)); 2664 "Unrecognized fw revision at %x.\n",
2665 ha->flt_region_fw * 4);
2640 } else { 2666 } else {
2641 /* values are in big endian */ 2667 /* values are in big endian */
2642 ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1]; 2668 ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1];
2643 ha->fw_revision[1] = dbyte[2] << 16 | dbyte[3]; 2669 ha->fw_revision[1] = dbyte[2] << 16 | dbyte[3];
2644 ha->fw_revision[2] = dbyte[4] << 16 | dbyte[5]; 2670 ha->fw_revision[2] = dbyte[4] << 16 | dbyte[5];
2671 ql_dbg(ql_dbg_init, vha, 0x0058,
2672 "FW Version: "
2673 "%d.%d.%d.\n", ha->fw_revision[0],
2674 ha->fw_revision[1], ha->fw_revision[2]);
2645 } 2675 }
2646 } 2676 }
2647 2677
@@ -2683,8 +2713,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2683 bcode = mbuf + (pcihdr % 4); 2713 bcode = mbuf + (pcihdr % 4);
2684 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) { 2714 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
2685 /* No signature */ 2715 /* No signature */
2686 DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM " 2716 ql_log(ql_log_fatal, vha, 0x0059,
2687 "signature.\n")); 2717 "No matching ROM signature.\n");
2688 ret = QLA_FUNCTION_FAILED; 2718 ret = QLA_FUNCTION_FAILED;
2689 break; 2719 break;
2690 } 2720 }
@@ -2699,8 +2729,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2699 if (bcode[0x0] != 'P' || bcode[0x1] != 'C' || 2729 if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
2700 bcode[0x2] != 'I' || bcode[0x3] != 'R') { 2730 bcode[0x2] != 'I' || bcode[0x3] != 'R') {
2701 /* Incorrect header. */ 2731 /* Incorrect header. */
2702 DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not " 2732 ql_log(ql_log_fatal, vha, 0x005a,
2703 "found pcir_adr=%x.\n", pcids)); 2733 "PCI data struct not found pcir_adr=%x.\n", pcids);
2704 ret = QLA_FUNCTION_FAILED; 2734 ret = QLA_FUNCTION_FAILED;
2705 break; 2735 break;
2706 } 2736 }
@@ -2712,26 +2742,30 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2712 /* Intel x86, PC-AT compatible. */ 2742 /* Intel x86, PC-AT compatible. */
2713 ha->bios_revision[0] = bcode[0x12]; 2743 ha->bios_revision[0] = bcode[0x12];
2714 ha->bios_revision[1] = bcode[0x13]; 2744 ha->bios_revision[1] = bcode[0x13];
2715 DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n", 2745 ql_dbg(ql_dbg_init, vha, 0x005b,
2716 ha->bios_revision[1], ha->bios_revision[0])); 2746 "Read BIOS %d.%d.\n",
2747 ha->bios_revision[1], ha->bios_revision[0]);
2717 break; 2748 break;
2718 case ROM_CODE_TYPE_FCODE: 2749 case ROM_CODE_TYPE_FCODE:
2719 /* Open Firmware standard for PCI (FCode). */ 2750 /* Open Firmware standard for PCI (FCode). */
2720 ha->fcode_revision[0] = bcode[0x12]; 2751 ha->fcode_revision[0] = bcode[0x12];
2721 ha->fcode_revision[1] = bcode[0x13]; 2752 ha->fcode_revision[1] = bcode[0x13];
2722 DEBUG3(qla_printk(KERN_DEBUG, ha, "read FCODE %d.%d.\n", 2753 ql_dbg(ql_dbg_init, vha, 0x005c,
2723 ha->fcode_revision[1], ha->fcode_revision[0])); 2754 "Read FCODE %d.%d.\n",
2755 ha->fcode_revision[1], ha->fcode_revision[0]);
2724 break; 2756 break;
2725 case ROM_CODE_TYPE_EFI: 2757 case ROM_CODE_TYPE_EFI:
2726 /* Extensible Firmware Interface (EFI). */ 2758 /* Extensible Firmware Interface (EFI). */
2727 ha->efi_revision[0] = bcode[0x12]; 2759 ha->efi_revision[0] = bcode[0x12];
2728 ha->efi_revision[1] = bcode[0x13]; 2760 ha->efi_revision[1] = bcode[0x13];
2729 DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n", 2761 ql_dbg(ql_dbg_init, vha, 0x005d,
2730 ha->efi_revision[1], ha->efi_revision[0])); 2762 "Read EFI %d.%d.\n",
2763 ha->efi_revision[1], ha->efi_revision[0]);
2731 break; 2764 break;
2732 default: 2765 default:
2733 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code " 2766 ql_log(ql_log_warn, vha, 0x005e,
2734 "type %x at pcids %x.\n", code_type, pcids)); 2767 "Unrecognized code type %x at pcids %x.\n",
2768 code_type, pcids);
2735 break; 2769 break;
2736 } 2770 }
2737 2771
@@ -2753,13 +2787,18 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2753 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 2787 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
2754 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 2788 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
2755 dcode[3] == 0)) { 2789 dcode[3] == 0)) {
2756 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw " 2790 ql_log(ql_log_warn, vha, 0x005f,
2757 "revision at %x.\n", ha->flt_region_fw * 4)); 2791 "Unrecognized fw revision at %x.\n",
2792 ha->flt_region_fw * 4);
2758 } else { 2793 } else {
2759 ha->fw_revision[0] = dcode[0]; 2794 ha->fw_revision[0] = dcode[0];
2760 ha->fw_revision[1] = dcode[1]; 2795 ha->fw_revision[1] = dcode[1];
2761 ha->fw_revision[2] = dcode[2]; 2796 ha->fw_revision[2] = dcode[2];
2762 ha->fw_revision[3] = dcode[3]; 2797 ha->fw_revision[3] = dcode[3];
2798 ql_dbg(ql_dbg_init, vha, 0x0060,
2799 "Firmware revision %d.%d.%d.%d.\n",
2800 ha->fw_revision[0], ha->fw_revision[1],
2801 ha->fw_revision[2], ha->fw_revision[3]);
2763 } 2802 }
2764 2803
2765 /* Check for golden firmware and get version if available */ 2804 /* Check for golden firmware and get version if available */
@@ -2775,9 +2814,9 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2775 2814
2776 if (dcode[4] == 0xFFFFFFFF && dcode[5] == 0xFFFFFFFF && 2815 if (dcode[4] == 0xFFFFFFFF && dcode[5] == 0xFFFFFFFF &&
2777 dcode[6] == 0xFFFFFFFF && dcode[7] == 0xFFFFFFFF) { 2816 dcode[6] == 0xFFFFFFFF && dcode[7] == 0xFFFFFFFF) {
2778 DEBUG2(qla_printk(KERN_INFO, ha, 2817 ql_log(ql_log_warn, vha, 0x0056,
2779 "%s(%ld): Unrecognized golden fw at 0x%x.\n", 2818 "Unrecognized golden fw at 0x%x.\n",
2780 __func__, vha->host_no, ha->flt_region_gold_fw * 4)); 2819 ha->flt_region_gold_fw * 4);
2781 return ret; 2820 return ret;
2782 } 2821 }
2783 2822
@@ -2843,9 +2882,9 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
2843 if (!ha->fcp_prio_cfg) { 2882 if (!ha->fcp_prio_cfg) {
2844 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); 2883 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
2845 if (!ha->fcp_prio_cfg) { 2884 if (!ha->fcp_prio_cfg) {
2846 qla_printk(KERN_WARNING, ha, 2885 ql_log(ql_log_warn, vha, 0x00d5,
2847 "Unable to allocate memory for fcp priority data " 2886 "Unable to allocate memory for fcp priorty data (%x).\n",
2848 "(%x).\n", FCP_PRIO_CFG_SIZE); 2887 FCP_PRIO_CFG_SIZE);
2849 return QLA_FUNCTION_FAILED; 2888 return QLA_FUNCTION_FAILED;
2850 } 2889 }
2851 } 2890 }
@@ -2857,7 +2896,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
2857 ha->isp_ops->read_optrom(vha, (uint8_t *)ha->fcp_prio_cfg, 2896 ha->isp_ops->read_optrom(vha, (uint8_t *)ha->fcp_prio_cfg,
2858 fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE); 2897 fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE);
2859 2898
2860 if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 0)) 2899 if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 0))
2861 goto fail; 2900 goto fail;
2862 2901
2863 /* read remaining FCP CMD config data from flash */ 2902 /* read remaining FCP CMD config data from flash */
@@ -2869,7 +2908,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
2869 fcp_prio_addr << 2, (len < max_len ? len : max_len)); 2908 fcp_prio_addr << 2, (len < max_len ? len : max_len));
2870 2909
2871 /* revalidate the entire FCP priority config data, including entries */ 2910 /* revalidate the entire FCP priority config data, including entries */
2872 if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 1)) 2911 if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1))
2873 goto fail; 2912 goto fail;
2874 2913
2875 ha->flags.fcp_prio_enabled = 1; 2914 ha->flags.fcp_prio_enabled = 1;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 28d9c9d6b4b4..fc3f168decb4 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -137,6 +137,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
137 host->host_blocked = host->max_host_blocked; 137 host->host_blocked = host->max_host_blocked;
138 break; 138 break;
139 case SCSI_MLQUEUE_DEVICE_BUSY: 139 case SCSI_MLQUEUE_DEVICE_BUSY:
140 case SCSI_MLQUEUE_EH_RETRY:
140 device->device_blocked = device->max_device_blocked; 141 device->device_blocked = device->max_device_blocked;
141 break; 142 break;
142 case SCSI_MLQUEUE_TARGET_BUSY: 143 case SCSI_MLQUEUE_TARGET_BUSY:
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 8a172d4f4564..5fbeadd96819 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -597,6 +597,28 @@ static DEVICE_ATTR(signalling, S_IRUGO,
597 show_spi_host_signalling, 597 show_spi_host_signalling,
598 store_spi_host_signalling); 598 store_spi_host_signalling);
599 599
600static ssize_t show_spi_host_width(struct device *cdev,
601 struct device_attribute *attr,
602 char *buf)
603{
604 struct Scsi_Host *shost = transport_class_to_shost(cdev);
605
606 return sprintf(buf, "%s\n", shost->max_id == 16 ? "wide" : "narrow");
607}
608static DEVICE_ATTR(host_width, S_IRUGO,
609 show_spi_host_width, NULL);
610
611static ssize_t show_spi_host_hba_id(struct device *cdev,
612 struct device_attribute *attr,
613 char *buf)
614{
615 struct Scsi_Host *shost = transport_class_to_shost(cdev);
616
617 return sprintf(buf, "%d\n", shost->this_id);
618}
619static DEVICE_ATTR(hba_id, S_IRUGO,
620 show_spi_host_hba_id, NULL);
621
600#define DV_SET(x, y) \ 622#define DV_SET(x, y) \
601 if(i->f->set_##x) \ 623 if(i->f->set_##x) \
602 i->f->set_##x(sdev->sdev_target, y) 624 i->f->set_##x(sdev->sdev_target, y)
@@ -1380,6 +1402,8 @@ static DECLARE_ANON_TRANSPORT_CLASS(spi_device_class,
1380 1402
1381static struct attribute *host_attributes[] = { 1403static struct attribute *host_attributes[] = {
1382 &dev_attr_signalling.attr, 1404 &dev_attr_signalling.attr,
1405 &dev_attr_host_width.attr,
1406 &dev_attr_hba_id.attr,
1383 NULL 1407 NULL
1384}; 1408};
1385 1409
diff --git a/drivers/staging/ath6kl/os/linux/ar6000_drv.c b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
index 499b7a90e941..32ee39ad00df 100644
--- a/drivers/staging/ath6kl/os/linux/ar6000_drv.c
+++ b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
@@ -6205,6 +6205,7 @@ int ar6000_create_ap_interface(struct ar6_softc *ar, char *ap_ifname)
6205 6205
6206 ether_setup(dev); 6206 ether_setup(dev);
6207 init_netdev(dev, ap_ifname); 6207 init_netdev(dev, ap_ifname);
6208 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
6208 6209
6209 if (register_netdev(dev)) { 6210 if (register_netdev(dev)) {
6210 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_create_ap_interface: register_netdev failed\n")); 6211 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_create_ap_interface: register_netdev failed\n"));
diff --git a/drivers/staging/brcm80211/brcmsmac/mac80211_if.h b/drivers/staging/brcm80211/brcmsmac/mac80211_if.h
index 5711e7c16b50..40e3d375ea99 100644
--- a/drivers/staging/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/staging/brcm80211/brcmsmac/mac80211_if.h
@@ -24,8 +24,6 @@
24#define BRCMS_SET_SHORTSLOT_OVERRIDE 146 24#define BRCMS_SET_SHORTSLOT_OVERRIDE 146
25 25
26 26
27#include <linux/interrupt.h>
28
29/* BMAC Note: High-only driver is no longer working in softirq context as it needs to block and 27/* BMAC Note: High-only driver is no longer working in softirq context as it needs to block and
30 * sleep so perimeter lock has to be a semaphore instead of spinlock. This requires timers to be 28 * sleep so perimeter lock has to be a semaphore instead of spinlock. This requires timers to be
31 * submitted to workqueue instead of being on kernel timer 29 * submitted to workqueue instead of being on kernel timer
diff --git a/drivers/staging/cxd2099/Kconfig b/drivers/staging/cxd2099/Kconfig
index 9d638c30735d..b48aefddc84c 100644
--- a/drivers/staging/cxd2099/Kconfig
+++ b/drivers/staging/cxd2099/Kconfig
@@ -1,9 +1,10 @@
1config DVB_CXD2099 1config DVB_CXD2099
2 tristate "CXD2099AR Common Interface driver" 2 tristate "CXD2099AR Common Interface driver"
3 depends on DVB_CORE && PCI && I2C && DVB_NGENE 3 depends on DVB_CORE && PCI && I2C
4 ---help--- 4 ---help---
5 Support for the CI module found on cineS2 DVB-S2, supported by 5 Support for the CI module found on cards based on
6 the Micronas PCIe device driver (ngene). 6 - Micronas ngene PCIe bridge: cineS2 etc.
7 - Digital Devices PCIe bridge: Octopus series
7 8
8 For now, data is passed through '/dev/dvb/adapterX/sec0': 9 For now, data is passed through '/dev/dvb/adapterX/sec0':
9 - Encrypted data must be written to 'sec0'. 10 - Encrypted data must be written to 'sec0'.
diff --git a/drivers/staging/cxd2099/cxd2099.c b/drivers/staging/cxd2099/cxd2099.c
index 55b1c4a59035..1c04185bcfd7 100644
--- a/drivers/staging/cxd2099/cxd2099.c
+++ b/drivers/staging/cxd2099/cxd2099.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * cxd2099.c: Driver for the CXD2099AR Common Interface Controller 2 * cxd2099.c: Driver for the CXD2099AR Common Interface Controller
3 * 3 *
4 * Copyright (C) 2010 DigitalDevices UG 4 * Copyright (C) 2010-2011 Digital Devices GmbH
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
@@ -41,13 +41,13 @@ struct cxd {
41 struct dvb_ca_en50221 en; 41 struct dvb_ca_en50221 en;
42 42
43 struct i2c_adapter *i2c; 43 struct i2c_adapter *i2c;
44 u8 adr; 44 struct cxd2099_cfg cfg;
45
45 u8 regs[0x23]; 46 u8 regs[0x23];
46 u8 lastaddress; 47 u8 lastaddress;
47 u8 clk_reg_f; 48 u8 clk_reg_f;
48 u8 clk_reg_b; 49 u8 clk_reg_b;
49 int mode; 50 int mode;
50 u32 bitrate;
51 int ready; 51 int ready;
52 int dr; 52 int dr;
53 int slot_stat; 53 int slot_stat;
@@ -89,9 +89,9 @@ static int i2c_read_reg(struct i2c_adapter *adapter, u8 adr,
89 u8 reg, u8 *val) 89 u8 reg, u8 *val)
90{ 90{
91 struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0, 91 struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
92 .buf = &reg, .len = 1 }, 92 .buf = &reg, .len = 1},
93 {.addr = adr, .flags = I2C_M_RD, 93 {.addr = adr, .flags = I2C_M_RD,
94 .buf = val, .len = 1 } }; 94 .buf = val, .len = 1} };
95 95
96 if (i2c_transfer(adapter, msgs, 2) != 2) { 96 if (i2c_transfer(adapter, msgs, 2) != 2) {
97 printk(KERN_ERR "error in i2c_read_reg\n"); 97 printk(KERN_ERR "error in i2c_read_reg\n");
@@ -104,9 +104,9 @@ static int i2c_read(struct i2c_adapter *adapter, u8 adr,
104 u8 reg, u8 *data, u8 n) 104 u8 reg, u8 *data, u8 n)
105{ 105{
106 struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0, 106 struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
107 .buf = &reg, .len = 1 }, 107 .buf = &reg, .len = 1},
108 {.addr = adr, .flags = I2C_M_RD, 108 {.addr = adr, .flags = I2C_M_RD,
109 .buf = data, .len = n } }; 109 .buf = data, .len = n} };
110 110
111 if (i2c_transfer(adapter, msgs, 2) != 2) { 111 if (i2c_transfer(adapter, msgs, 2) != 2) {
112 printk(KERN_ERR "error in i2c_read\n"); 112 printk(KERN_ERR "error in i2c_read\n");
@@ -119,10 +119,10 @@ static int read_block(struct cxd *ci, u8 adr, u8 *data, u8 n)
119{ 119{
120 int status; 120 int status;
121 121
122 status = i2c_write_reg(ci->i2c, ci->adr, 0, adr); 122 status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, adr);
123 if (!status) { 123 if (!status) {
124 ci->lastaddress = adr; 124 ci->lastaddress = adr;
125 status = i2c_read(ci->i2c, ci->adr, 1, data, n); 125 status = i2c_read(ci->i2c, ci->cfg.adr, 1, data, n);
126 } 126 }
127 return status; 127 return status;
128} 128}
@@ -136,24 +136,24 @@ static int read_reg(struct cxd *ci, u8 reg, u8 *val)
136static int read_pccard(struct cxd *ci, u16 address, u8 *data, u8 n) 136static int read_pccard(struct cxd *ci, u16 address, u8 *data, u8 n)
137{ 137{
138 int status; 138 int status;
139 u8 addr[3] = { 2, address&0xff, address>>8 }; 139 u8 addr[3] = {2, address & 0xff, address >> 8};
140 140
141 status = i2c_write(ci->i2c, ci->adr, addr, 3); 141 status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
142 if (!status) 142 if (!status)
143 status = i2c_read(ci->i2c, ci->adr, 3, data, n); 143 status = i2c_read(ci->i2c, ci->cfg.adr, 3, data, n);
144 return status; 144 return status;
145} 145}
146 146
147static int write_pccard(struct cxd *ci, u16 address, u8 *data, u8 n) 147static int write_pccard(struct cxd *ci, u16 address, u8 *data, u8 n)
148{ 148{
149 int status; 149 int status;
150 u8 addr[3] = { 2, address&0xff, address>>8 }; 150 u8 addr[3] = {2, address & 0xff, address >> 8};
151 151
152 status = i2c_write(ci->i2c, ci->adr, addr, 3); 152 status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
153 if (!status) { 153 if (!status) {
154 u8 buf[256] = {3}; 154 u8 buf[256] = {3};
155 memcpy(buf+1, data, n); 155 memcpy(buf+1, data, n);
156 status = i2c_write(ci->i2c, ci->adr, buf, n+1); 156 status = i2c_write(ci->i2c, ci->cfg.adr, buf, n+1);
157 } 157 }
158 return status; 158 return status;
159} 159}
@@ -161,39 +161,64 @@ static int write_pccard(struct cxd *ci, u16 address, u8 *data, u8 n)
161static int read_io(struct cxd *ci, u16 address, u8 *val) 161static int read_io(struct cxd *ci, u16 address, u8 *val)
162{ 162{
163 int status; 163 int status;
164 u8 addr[3] = { 2, address&0xff, address>>8 }; 164 u8 addr[3] = {2, address & 0xff, address >> 8};
165 165
166 status = i2c_write(ci->i2c, ci->adr, addr, 3); 166 status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
167 if (!status) 167 if (!status)
168 status = i2c_read(ci->i2c, ci->adr, 3, val, 1); 168 status = i2c_read(ci->i2c, ci->cfg.adr, 3, val, 1);
169 return status; 169 return status;
170} 170}
171 171
172static int write_io(struct cxd *ci, u16 address, u8 val) 172static int write_io(struct cxd *ci, u16 address, u8 val)
173{ 173{
174 int status; 174 int status;
175 u8 addr[3] = { 2, address&0xff, address>>8 }; 175 u8 addr[3] = {2, address & 0xff, address >> 8};
176 u8 buf[2] = { 3, val }; 176 u8 buf[2] = {3, val};
177 177
178 status = i2c_write(ci->i2c, ci->adr, addr, 3); 178 status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
179 if (!status) 179 if (!status)
180 status = i2c_write(ci->i2c, ci->adr, buf, 2); 180 status = i2c_write(ci->i2c, ci->cfg.adr, buf, 2);
181
182 return status; 181 return status;
183} 182}
184 183
184#if 0
185static int read_io_data(struct cxd *ci, u8 *data, u8 n)
186{
187 int status;
188 u8 addr[3] = { 2, 0, 0 };
189
190 status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
191 if (!status)
192 status = i2c_read(ci->i2c, ci->cfg.adr, 3, data, n);
193 return 0;
194}
195
196static int write_io_data(struct cxd *ci, u8 *data, u8 n)
197{
198 int status;
199 u8 addr[3] = {2, 0, 0};
200
201 status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
202 if (!status) {
203 u8 buf[256] = {3};
204 memcpy(buf+1, data, n);
205 status = i2c_write(ci->i2c, ci->cfg.adr, buf, n + 1);
206 }
207 return 0;
208}
209#endif
185 210
186static int write_regm(struct cxd *ci, u8 reg, u8 val, u8 mask) 211static int write_regm(struct cxd *ci, u8 reg, u8 val, u8 mask)
187{ 212{
188 int status; 213 int status;
189 214
190 status = i2c_write_reg(ci->i2c, ci->adr, 0, reg); 215 status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, reg);
191 if (!status && reg >= 6 && reg <= 8 && mask != 0xff) 216 if (!status && reg >= 6 && reg <= 8 && mask != 0xff)
192 status = i2c_read_reg(ci->i2c, ci->adr, 1, &ci->regs[reg]); 217 status = i2c_read_reg(ci->i2c, ci->cfg.adr, 1, &ci->regs[reg]);
193 ci->regs[reg] = (ci->regs[reg]&(~mask))|val; 218 ci->regs[reg] = (ci->regs[reg] & (~mask)) | val;
194 if (!status) { 219 if (!status) {
195 ci->lastaddress = reg; 220 ci->lastaddress = reg;
196 status = i2c_write_reg(ci->i2c, ci->adr, 1, ci->regs[reg]); 221 status = i2c_write_reg(ci->i2c, ci->cfg.adr, 1, ci->regs[reg]);
197 } 222 }
198 if (reg == 0x20) 223 if (reg == 0x20)
199 ci->regs[reg] &= 0x7f; 224 ci->regs[reg] &= 0x7f;
@@ -211,11 +236,11 @@ static int write_block(struct cxd *ci, u8 adr, u8 *data, int n)
211 int status; 236 int status;
212 u8 buf[256] = {1}; 237 u8 buf[256] = {1};
213 238
214 status = i2c_write_reg(ci->i2c, ci->adr, 0, adr); 239 status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, adr);
215 if (!status) { 240 if (!status) {
216 ci->lastaddress = adr; 241 ci->lastaddress = adr;
217 memcpy(buf+1, data, n); 242 memcpy(buf + 1, data, n);
218 status = i2c_write(ci->i2c, ci->adr, buf, n+1); 243 status = i2c_write(ci->i2c, ci->cfg.adr, buf, n + 1);
219 } 244 }
220 return status; 245 return status;
221} 246}
@@ -249,12 +274,16 @@ static void cam_mode(struct cxd *ci, int mode)
249 write_regm(ci, 0x20, 0x80, 0x80); 274 write_regm(ci, 0x20, 0x80, 0x80);
250 break; 275 break;
251 case 0x01: 276 case 0x01:
277#ifdef BUFFER_MODE
278 if (!ci->en.read_data)
279 return;
252 printk(KERN_INFO "enable cam buffer mode\n"); 280 printk(KERN_INFO "enable cam buffer mode\n");
253 /* write_reg(ci, 0x0d, 0x00); */ 281 /* write_reg(ci, 0x0d, 0x00); */
254 /* write_reg(ci, 0x0e, 0x01); */ 282 /* write_reg(ci, 0x0e, 0x01); */
255 write_regm(ci, 0x08, 0x40, 0x40); 283 write_regm(ci, 0x08, 0x40, 0x40);
256 /* read_reg(ci, 0x12, &dummy); */ 284 /* read_reg(ci, 0x12, &dummy); */
257 write_regm(ci, 0x08, 0x80, 0x80); 285 write_regm(ci, 0x08, 0x80, 0x80);
286#endif
258 break; 287 break;
259 default: 288 default:
260 break; 289 break;
@@ -264,8 +293,6 @@ static void cam_mode(struct cxd *ci, int mode)
264 293
265 294
266 295
267#define CHK_ERROR(s) if ((status = s)) break
268
269static int init(struct cxd *ci) 296static int init(struct cxd *ci)
270{ 297{
271 int status; 298 int status;
@@ -273,63 +300,160 @@ static int init(struct cxd *ci)
273 mutex_lock(&ci->lock); 300 mutex_lock(&ci->lock);
274 ci->mode = -1; 301 ci->mode = -1;
275 do { 302 do {
276 CHK_ERROR(write_reg(ci, 0x00, 0x00)); 303 status = write_reg(ci, 0x00, 0x00);
277 CHK_ERROR(write_reg(ci, 0x01, 0x00)); 304 if (status < 0)
278 CHK_ERROR(write_reg(ci, 0x02, 0x10)); 305 break;
279 CHK_ERROR(write_reg(ci, 0x03, 0x00)); 306 status = write_reg(ci, 0x01, 0x00);
280 CHK_ERROR(write_reg(ci, 0x05, 0xFF)); 307 if (status < 0)
281 CHK_ERROR(write_reg(ci, 0x06, 0x1F)); 308 break;
282 CHK_ERROR(write_reg(ci, 0x07, 0x1F)); 309 status = write_reg(ci, 0x02, 0x10);
283 CHK_ERROR(write_reg(ci, 0x08, 0x28)); 310 if (status < 0)
284 CHK_ERROR(write_reg(ci, 0x14, 0x20)); 311 break;
285 312 status = write_reg(ci, 0x03, 0x00);
286 CHK_ERROR(write_reg(ci, 0x09, 0x4D)); /* Input Mode C, BYPass Serial, TIVAL = low, MSB */ 313 if (status < 0)
287 CHK_ERROR(write_reg(ci, 0x0A, 0xA7)); /* TOSTRT = 8, Mode B (gated clock), falling Edge, Serial, POL=HIGH, MSB */ 314 break;
288 315 status = write_reg(ci, 0x05, 0xFF);
289 /* Sync detector */ 316 if (status < 0)
290 CHK_ERROR(write_reg(ci, 0x0B, 0x33)); 317 break;
291 CHK_ERROR(write_reg(ci, 0x0C, 0x33)); 318 status = write_reg(ci, 0x06, 0x1F);
292 319 if (status < 0)
293 CHK_ERROR(write_regm(ci, 0x14, 0x00, 0x0F)); 320 break;
294 CHK_ERROR(write_reg(ci, 0x15, ci->clk_reg_b)); 321 status = write_reg(ci, 0x07, 0x1F);
295 CHK_ERROR(write_regm(ci, 0x16, 0x00, 0x0F)); 322 if (status < 0)
296 CHK_ERROR(write_reg(ci, 0x17, ci->clk_reg_f)); 323 break;
297 324 status = write_reg(ci, 0x08, 0x28);
298 CHK_ERROR(write_reg(ci, 0x20, 0x28)); /* Integer Divider, Falling Edge, Internal Sync, */ 325 if (status < 0)
299 CHK_ERROR(write_reg(ci, 0x21, 0x00)); /* MCLKI = TICLK/8 */ 326 break;
300 CHK_ERROR(write_reg(ci, 0x22, 0x07)); /* MCLKI = TICLK/8 */ 327 status = write_reg(ci, 0x14, 0x20);
301 328 if (status < 0)
302 329 break;
303 CHK_ERROR(write_regm(ci, 0x20, 0x80, 0x80)); /* Reset CAM state machine */ 330
304 331#if 0
305 CHK_ERROR(write_regm(ci, 0x03, 0x02, 02)); /* Enable IREQA Interrupt */ 332 status = write_reg(ci, 0x09, 0x4D); /* Input Mode C, BYPass Serial, TIVAL = low, MSB */
306 CHK_ERROR(write_reg(ci, 0x01, 0x04)); /* Enable CD Interrupt */ 333 if (status < 0)
307 CHK_ERROR(write_reg(ci, 0x00, 0x31)); /* Enable TS1,Hot Swap,Slot A */ 334 break;
308 CHK_ERROR(write_regm(ci, 0x09, 0x08, 0x08)); /* Put TS in bypass */ 335#endif
336 status = write_reg(ci, 0x0A, 0xA7); /* TOSTRT = 8, Mode B (gated clock), falling Edge, Serial, POL=HIGH, MSB */
337 if (status < 0)
338 break;
339
340 status = write_reg(ci, 0x0B, 0x33);
341 if (status < 0)
342 break;
343 status = write_reg(ci, 0x0C, 0x33);
344 if (status < 0)
345 break;
346
347 status = write_regm(ci, 0x14, 0x00, 0x0F);
348 if (status < 0)
349 break;
350 status = write_reg(ci, 0x15, ci->clk_reg_b);
351 if (status < 0)
352 break;
353 status = write_regm(ci, 0x16, 0x00, 0x0F);
354 if (status < 0)
355 break;
356 status = write_reg(ci, 0x17, ci->clk_reg_f);
357 if (status < 0)
358 break;
359
360 if (ci->cfg.clock_mode) {
361 if (ci->cfg.polarity) {
362 status = write_reg(ci, 0x09, 0x6f);
363 if (status < 0)
364 break;
365 } else {
366 status = write_reg(ci, 0x09, 0x6d);
367 if (status < 0)
368 break;
369 }
370 status = write_reg(ci, 0x20, 0x68);
371 if (status < 0)
372 break;
373 status = write_reg(ci, 0x21, 0x00);
374 if (status < 0)
375 break;
376 status = write_reg(ci, 0x22, 0x02);
377 if (status < 0)
378 break;
379 } else {
380 if (ci->cfg.polarity) {
381 status = write_reg(ci, 0x09, 0x4f);
382 if (status < 0)
383 break;
384 } else {
385 status = write_reg(ci, 0x09, 0x4d);
386 if (status < 0)
387 break;
388 }
389
390 status = write_reg(ci, 0x20, 0x28);
391 if (status < 0)
392 break;
393 status = write_reg(ci, 0x21, 0x00);
394 if (status < 0)
395 break;
396 status = write_reg(ci, 0x22, 0x07);
397 if (status < 0)
398 break;
399 }
400
401 status = write_regm(ci, 0x20, 0x80, 0x80);
402 if (status < 0)
403 break;
404 status = write_regm(ci, 0x03, 0x02, 0x02);
405 if (status < 0)
406 break;
407 status = write_reg(ci, 0x01, 0x04);
408 if (status < 0)
409 break;
410 status = write_reg(ci, 0x00, 0x31);
411 if (status < 0)
412 break;
413
414 /* Put TS in bypass */
415 status = write_regm(ci, 0x09, 0x08, 0x08);
416 if (status < 0)
417 break;
309 ci->cammode = -1; 418 ci->cammode = -1;
310#ifdef BUFFER_MODE
311 cam_mode(ci, 0); 419 cam_mode(ci, 0);
312#endif
313 } while (0); 420 } while (0);
314 mutex_unlock(&ci->lock); 421 mutex_unlock(&ci->lock);
315 422
316 return 0; 423 return 0;
317} 424}
318 425
319
320static int read_attribute_mem(struct dvb_ca_en50221 *ca, 426static int read_attribute_mem(struct dvb_ca_en50221 *ca,
321 int slot, int address) 427 int slot, int address)
322{ 428{
323 struct cxd *ci = ca->data; 429 struct cxd *ci = ca->data;
430#if 0
431 if (ci->amem_read) {
432 if (address <= 0 || address > 1024)
433 return -EIO;
434 return ci->amem[address];
435 }
436
437 mutex_lock(&ci->lock);
438 write_regm(ci, 0x06, 0x00, 0x05);
439 read_pccard(ci, 0, &ci->amem[0], 128);
440 read_pccard(ci, 128, &ci->amem[0], 128);
441 read_pccard(ci, 256, &ci->amem[0], 128);
442 read_pccard(ci, 384, &ci->amem[0], 128);
443 write_regm(ci, 0x06, 0x05, 0x05);
444 mutex_unlock(&ci->lock);
445 return ci->amem[address];
446#else
324 u8 val; 447 u8 val;
325 mutex_lock(&ci->lock); 448 mutex_lock(&ci->lock);
326 set_mode(ci, 1); 449 set_mode(ci, 1);
327 read_pccard(ci, address, &val, 1); 450 read_pccard(ci, address, &val, 1);
328 mutex_unlock(&ci->lock); 451 mutex_unlock(&ci->lock);
452 /* printk(KERN_INFO "%02x:%02x\n", address,val); */
329 return val; 453 return val;
454#endif
330} 455}
331 456
332
333static int write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, 457static int write_attribute_mem(struct dvb_ca_en50221 *ca, int slot,
334 int address, u8 value) 458 int address, u8 value)
335{ 459{
@@ -372,6 +496,15 @@ static int slot_reset(struct dvb_ca_en50221 *ca, int slot)
372 struct cxd *ci = ca->data; 496 struct cxd *ci = ca->data;
373 497
374 mutex_lock(&ci->lock); 498 mutex_lock(&ci->lock);
499#if 0
500 write_reg(ci, 0x00, 0x21);
501 write_reg(ci, 0x06, 0x1F);
502 write_reg(ci, 0x00, 0x31);
503#else
504#if 0
505 write_reg(ci, 0x06, 0x1F);
506 write_reg(ci, 0x06, 0x2F);
507#else
375 cam_mode(ci, 0); 508 cam_mode(ci, 0);
376 write_reg(ci, 0x00, 0x21); 509 write_reg(ci, 0x00, 0x21);
377 write_reg(ci, 0x06, 0x1F); 510 write_reg(ci, 0x06, 0x1F);
@@ -379,13 +512,25 @@ static int slot_reset(struct dvb_ca_en50221 *ca, int slot)
379 write_regm(ci, 0x20, 0x80, 0x80); 512 write_regm(ci, 0x20, 0x80, 0x80);
380 write_reg(ci, 0x03, 0x02); 513 write_reg(ci, 0x03, 0x02);
381 ci->ready = 0; 514 ci->ready = 0;
515#endif
516#endif
382 ci->mode = -1; 517 ci->mode = -1;
383 { 518 {
384 int i; 519 int i;
520#if 0
521 u8 val;
522#endif
385 for (i = 0; i < 100; i++) { 523 for (i = 0; i < 100; i++) {
386 msleep(10); 524 msleep(10);
525#if 0
526 read_reg(ci, 0x06, &val);
527 printk(KERN_INFO "%d:%02x\n", i, val);
528 if (!(val&0x10))
529 break;
530#else
387 if (ci->ready) 531 if (ci->ready)
388 break; 532 break;
533#endif
389 } 534 }
390 } 535 }
391 mutex_unlock(&ci->lock); 536 mutex_unlock(&ci->lock);
@@ -399,12 +544,12 @@ static int slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
399 544
400 printk(KERN_INFO "slot_shutdown\n"); 545 printk(KERN_INFO "slot_shutdown\n");
401 mutex_lock(&ci->lock); 546 mutex_lock(&ci->lock);
402 /* write_regm(ci, 0x09, 0x08, 0x08); */ 547 write_regm(ci, 0x09, 0x08, 0x08);
403 write_regm(ci, 0x20, 0x80, 0x80); 548 write_regm(ci, 0x20, 0x80, 0x80); /* Reset CAM Mode */
404 write_regm(ci, 0x06, 0x07, 0x07); 549 write_regm(ci, 0x06, 0x07, 0x07); /* Clear IO Mode */
405 ci->mode = -1; 550 ci->mode = -1;
406 mutex_unlock(&ci->lock); 551 mutex_unlock(&ci->lock);
407 return 0; /* shutdown(ci); */ 552 return 0;
408} 553}
409 554
410static int slot_ts_enable(struct dvb_ca_en50221 *ca, int slot) 555static int slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
@@ -459,7 +604,6 @@ static int campoll(struct cxd *ci)
459 if (istat&8 && ci->slot_stat == DVB_CA_EN50221_POLL_CAM_PRESENT) { 604 if (istat&8 && ci->slot_stat == DVB_CA_EN50221_POLL_CAM_PRESENT) {
460 ci->ready = 1; 605 ci->ready = 1;
461 ci->slot_stat |= DVB_CA_EN50221_POLL_CAM_READY; 606 ci->slot_stat |= DVB_CA_EN50221_POLL_CAM_READY;
462 printk(KERN_INFO "READY\n");
463 } 607 }
464 } 608 }
465 return 0; 609 return 0;
@@ -510,7 +654,7 @@ static int write_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount)
510 struct cxd *ci = ca->data; 654 struct cxd *ci = ca->data;
511 655
512 mutex_lock(&ci->lock); 656 mutex_lock(&ci->lock);
513 printk(KERN_INFO "write_data %d\n", ecount); 657 printk(kern_INFO "write_data %d\n", ecount);
514 write_reg(ci, 0x0d, ecount>>8); 658 write_reg(ci, 0x0d, ecount>>8);
515 write_reg(ci, 0x0e, ecount&0xff); 659 write_reg(ci, 0x0e, ecount&0xff);
516 write_block(ci, 0x11, ebuf, ecount); 660 write_block(ci, 0x11, ebuf, ecount);
@@ -535,15 +679,15 @@ static struct dvb_ca_en50221 en_templ = {
535 679
536}; 680};
537 681
538struct dvb_ca_en50221 *cxd2099_attach(u8 adr, void *priv, 682struct dvb_ca_en50221 *cxd2099_attach(struct cxd2099_cfg *cfg,
683 void *priv,
539 struct i2c_adapter *i2c) 684 struct i2c_adapter *i2c)
540{ 685{
541 struct cxd *ci = 0; 686 struct cxd *ci = 0;
542 u32 bitrate = 62000000;
543 u8 val; 687 u8 val;
544 688
545 if (i2c_read_reg(i2c, adr, 0, &val) < 0) { 689 if (i2c_read_reg(i2c, cfg->adr, 0, &val) < 0) {
546 printk(KERN_ERR "No CXD2099 detected at %02x\n", adr); 690 printk(KERN_INFO "No CXD2099 detected at %02x\n", cfg->adr);
547 return 0; 691 return 0;
548 } 692 }
549 693
@@ -553,21 +697,20 @@ struct dvb_ca_en50221 *cxd2099_attach(u8 adr, void *priv,
553 memset(ci, 0, sizeof(*ci)); 697 memset(ci, 0, sizeof(*ci));
554 698
555 mutex_init(&ci->lock); 699 mutex_init(&ci->lock);
700 memcpy(&ci->cfg, cfg, sizeof(struct cxd2099_cfg));
556 ci->i2c = i2c; 701 ci->i2c = i2c;
557 ci->adr = adr;
558 ci->lastaddress = 0xff; 702 ci->lastaddress = 0xff;
559 ci->clk_reg_b = 0x4a; 703 ci->clk_reg_b = 0x4a;
560 ci->clk_reg_f = 0x1b; 704 ci->clk_reg_f = 0x1b;
561 ci->bitrate = bitrate;
562 705
563 memcpy(&ci->en, &en_templ, sizeof(en_templ)); 706 memcpy(&ci->en, &en_templ, sizeof(en_templ));
564 ci->en.data = ci; 707 ci->en.data = ci;
565 init(ci); 708 init(ci);
566 printk(KERN_INFO "Attached CXD2099AR at %02x\n", ci->adr); 709 printk(KERN_INFO "Attached CXD2099AR at %02x\n", ci->cfg.adr);
567 return &ci->en; 710 return &ci->en;
568} 711}
569EXPORT_SYMBOL(cxd2099_attach); 712EXPORT_SYMBOL(cxd2099_attach);
570 713
571MODULE_DESCRIPTION("cxd2099"); 714MODULE_DESCRIPTION("cxd2099");
572MODULE_AUTHOR("Ralph Metzler <rjkm@metzlerbros.de>"); 715MODULE_AUTHOR("Ralph Metzler");
573MODULE_LICENSE("GPL"); 716MODULE_LICENSE("GPL");
diff --git a/drivers/staging/cxd2099/cxd2099.h b/drivers/staging/cxd2099/cxd2099.h
index bed54ff3e30b..19c588a59588 100644
--- a/drivers/staging/cxd2099/cxd2099.h
+++ b/drivers/staging/cxd2099/cxd2099.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * cxd2099.h: Driver for the CXD2099AR Common Interface Controller 2 * cxd2099.h: Driver for the CXD2099AR Common Interface Controller
3 * 3 *
4 * Copyright (C) 2010 DigitalDevices UG 4 * Copyright (C) 2010-2011 Digital Devices GmbH
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
@@ -27,11 +27,21 @@
27 27
28#include <dvb_ca_en50221.h> 28#include <dvb_ca_en50221.h>
29 29
30struct cxd2099_cfg {
31 u32 bitrate;
32 u8 adr;
33 u8 polarity:1;
34 u8 clock_mode:1;
35};
36
30#if defined(CONFIG_DVB_CXD2099) || \ 37#if defined(CONFIG_DVB_CXD2099) || \
31 (defined(CONFIG_DVB_CXD2099_MODULE) && defined(MODULE)) 38 (defined(CONFIG_DVB_CXD2099_MODULE) && defined(MODULE))
32struct dvb_ca_en50221 *cxd2099_attach(u8 adr, void *priv, struct i2c_adapter *i2c); 39struct dvb_ca_en50221 *cxd2099_attach(struct cxd2099_cfg *cfg,
40 void *priv, struct i2c_adapter *i2c);
33#else 41#else
34static inline struct dvb_ca_en50221 *cxd2099_attach(u8 adr, void *priv, struct i2c_adapter *i2c) 42
43static inline struct dvb_ca_en50221 *cxd2099_attach(struct cxd2099_cfg *cfg,
44 void *priv, struct i2c_adapter *i2c)
35{ 45{
36 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 46 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
37 return NULL; 47 return NULL;
diff --git a/drivers/staging/gma500/mrst_hdmi.c b/drivers/staging/gma500/mrst_hdmi.c
index d6a517971ba8..e66607eb3d3e 100644
--- a/drivers/staging/gma500/mrst_hdmi.c
+++ b/drivers/staging/gma500/mrst_hdmi.c
@@ -129,7 +129,7 @@ static void wait_for_vblank(struct drm_device *dev)
129{ 129{
130 /* FIXME: Can we do this as a sleep ? */ 130 /* FIXME: Can we do this as a sleep ? */
131 /* Wait for 20ms, i.e. one cycle at 50hz. */ 131 /* Wait for 20ms, i.e. one cycle at 50hz. */
132 udelay(20000); 132 mdelay(20);
133} 133}
134 134
135static void scu_busy_loop(void *scu_base) 135static void scu_busy_loop(void *scu_base)
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 0f22f0f47446..1a7c19ae766f 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -42,7 +42,7 @@
42#include <net/xfrm.h> 42#include <net/xfrm.h>
43#endif /* CONFIG_XFRM */ 43#endif /* CONFIG_XFRM */
44 44
45#include <asm/atomic.h> 45#include <linux/atomic.h>
46 46
47#include <asm/octeon/octeon.h> 47#include <asm/octeon/octeon.h>
48 48
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 6227571149f5..b445cd63f901 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -38,7 +38,7 @@
38#include <net/xfrm.h> 38#include <net/xfrm.h>
39#endif /* CONFIG_XFRM */ 39#endif /* CONFIG_XFRM */
40 40
41#include <asm/atomic.h> 41#include <linux/atomic.h>
42 42
43#include <asm/octeon/octeon.h> 43#include <asm/octeon/octeon.h>
44 44
diff --git a/drivers/staging/solo6x10/solo6x10.h b/drivers/staging/solo6x10/solo6x10.h
index fd59b093dd4d..17c06bd6cc91 100644
--- a/drivers/staging/solo6x10/solo6x10.h
+++ b/drivers/staging/solo6x10/solo6x10.h
@@ -29,7 +29,7 @@
29#include <linux/wait.h> 29#include <linux/wait.h>
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <asm/io.h> 31#include <asm/io.h>
32#include <asm/atomic.h> 32#include <linux/atomic.h>
33#include <linux/videodev2.h> 33#include <linux/videodev2.h>
34#include <media/v4l2-dev.h> 34#include <media/v4l2-dev.h>
35#include <media/videobuf-core.h> 35#include <media/videobuf-core.h>
diff --git a/drivers/staging/tidspbridge/include/dspbridge/host_os.h b/drivers/staging/tidspbridge/include/dspbridge/host_os.h
index 1a38896f4331..a2f31c69d12e 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/host_os.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/host_os.h
@@ -18,7 +18,7 @@
18#define _HOST_OS_H_ 18#define _HOST_OS_H_
19 19
20#include <asm/system.h> 20#include <asm/system.h>
21#include <asm/atomic.h> 21#include <linux/atomic.h>
22#include <linux/semaphore.h> 22#include <linux/semaphore.h>
23#include <linux/uaccess.h> 23#include <linux/uaccess.h>
24#include <linux/irq.h> 24#include <linux/irq.h>
diff --git a/drivers/staging/tm6000/tm6000-alsa.c b/drivers/staging/tm6000/tm6000-alsa.c
index ddfd7c33361b..bd5fa89af07c 100644
--- a/drivers/staging/tm6000/tm6000-alsa.c
+++ b/drivers/staging/tm6000/tm6000-alsa.c
@@ -84,7 +84,6 @@ static int _tm6000_start_audio_dma(struct snd_tm6000_card *chip)
84 84
85 tm6000_set_audio_bitrate(core, 48000); 85 tm6000_set_audio_bitrate(core, 48000);
86 86
87
88 return 0; 87 return 0;
89} 88}
90 89
@@ -123,6 +122,7 @@ static int dsp_buffer_alloc(struct snd_pcm_substream *substream, int size)
123 if (substream->runtime->dma_area) { 122 if (substream->runtime->dma_area) {
124 if (substream->runtime->dma_bytes > size) 123 if (substream->runtime->dma_bytes > size)
125 return 0; 124 return 0;
125
126 dsp_buffer_free(substream); 126 dsp_buffer_free(substream);
127 } 127 }
128 128
@@ -152,9 +152,9 @@ static struct snd_pcm_hardware snd_tm6000_digital_hw = {
152 SNDRV_PCM_INFO_MMAP_VALID, 152 SNDRV_PCM_INFO_MMAP_VALID,
153 .formats = SNDRV_PCM_FMTBIT_S16_LE, 153 .formats = SNDRV_PCM_FMTBIT_S16_LE,
154 154
155 .rates = SNDRV_PCM_RATE_CONTINUOUS, 155 .rates = SNDRV_PCM_RATE_CONTINUOUS,
156 .rate_min = 48000, 156 .rate_min = 48000,
157 .rate_max = 48000, 157 .rate_max = 48000,
158 .channels_min = 2, 158 .channels_min = 2,
159 .channels_max = 2, 159 .channels_max = 2,
160 .period_bytes_min = 64, 160 .period_bytes_min = 64,
@@ -254,9 +254,7 @@ static int tm6000_fillbuf(struct tm6000_core *core, char *buf, int size)
254 memcpy(runtime->dma_area + buf_pos * stride, buf, 254 memcpy(runtime->dma_area + buf_pos * stride, buf,
255 length * stride); 255 length * stride);
256 256
257#ifndef NO_PCM_LOCK
258 snd_pcm_stream_lock(substream); 257 snd_pcm_stream_lock(substream);
259#endif
260 258
261 chip->buf_pos += length; 259 chip->buf_pos += length;
262 if (chip->buf_pos >= runtime->buffer_size) 260 if (chip->buf_pos >= runtime->buffer_size)
@@ -268,9 +266,7 @@ static int tm6000_fillbuf(struct tm6000_core *core, char *buf, int size)
268 period_elapsed = 1; 266 period_elapsed = 1;
269 } 267 }
270 268
271#ifndef NO_PCM_LOCK
272 snd_pcm_stream_unlock(substream); 269 snd_pcm_stream_unlock(substream);
273#endif
274 270
275 if (period_elapsed) 271 if (period_elapsed)
276 snd_pcm_period_elapsed(substream); 272 snd_pcm_period_elapsed(substream);
diff --git a/drivers/staging/winbond/mds_s.h b/drivers/staging/winbond/mds_s.h
index eeedf0186365..07d835b3b706 100644
--- a/drivers/staging/winbond/mds_s.h
+++ b/drivers/staging/winbond/mds_s.h
@@ -3,7 +3,7 @@
3 3
4#include <linux/timer.h> 4#include <linux/timer.h>
5#include <linux/types.h> 5#include <linux/types.h>
6#include <asm/atomic.h> 6#include <linux/atomic.h>
7 7
8#include "localpara.h" 8#include "localpara.h"
9#include "mac_structures.h" 9#include "mac_structures.h"
diff --git a/drivers/staging/winbond/wb35reg_s.h b/drivers/staging/winbond/wb35reg_s.h
index eb274ffdd1ba..dc79faa4029f 100644
--- a/drivers/staging/winbond/wb35reg_s.h
+++ b/drivers/staging/winbond/wb35reg_s.h
@@ -3,7 +3,7 @@
3 3
4#include <linux/spinlock.h> 4#include <linux/spinlock.h>
5#include <linux/types.h> 5#include <linux/types.h>
6#include <asm/atomic.h> 6#include <linux/atomic.h>
7 7
8struct hw_data; 8struct hw_data;
9 9
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index 5cb0f0ef6af0..b28794b72125 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -31,5 +31,6 @@ config TCM_PSCSI
31 31
32source "drivers/target/loopback/Kconfig" 32source "drivers/target/loopback/Kconfig"
33source "drivers/target/tcm_fc/Kconfig" 33source "drivers/target/tcm_fc/Kconfig"
34source "drivers/target/iscsi/Kconfig"
34 35
35endif 36endif
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 21df808a992c..1060c7b7f803 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -24,5 +24,5 @@ obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o
24 24
25# Fabric modules 25# Fabric modules
26obj-$(CONFIG_LOOPBACK_TARGET) += loopback/ 26obj-$(CONFIG_LOOPBACK_TARGET) += loopback/
27
28obj-$(CONFIG_TCM_FC) += tcm_fc/ 27obj-$(CONFIG_TCM_FC) += tcm_fc/
28obj-$(CONFIG_ISCSI_TARGET) += iscsi/
diff --git a/drivers/target/iscsi/Kconfig b/drivers/target/iscsi/Kconfig
new file mode 100644
index 000000000000..564ff4e0dbc4
--- /dev/null
+++ b/drivers/target/iscsi/Kconfig
@@ -0,0 +1,8 @@
1config ISCSI_TARGET
2 tristate "Linux-iSCSI.org iSCSI Target Mode Stack"
3 select CRYPTO
4 select CRYPTO_CRC32C
5 select CRYPTO_CRC32C_INTEL if X86
6 help
7 Say M here to enable the ConfigFS enabled Linux-iSCSI.org iSCSI
8 Target Mode Stack.
diff --git a/drivers/target/iscsi/Makefile b/drivers/target/iscsi/Makefile
new file mode 100644
index 000000000000..5b9a2cf7f0a9
--- /dev/null
+++ b/drivers/target/iscsi/Makefile
@@ -0,0 +1,20 @@
1iscsi_target_mod-y += iscsi_target_parameters.o \
2 iscsi_target_seq_pdu_list.o \
3 iscsi_target_tq.o \
4 iscsi_target_auth.o \
5 iscsi_target_datain_values.o \
6 iscsi_target_device.o \
7 iscsi_target_erl0.o \
8 iscsi_target_erl1.o \
9 iscsi_target_erl2.o \
10 iscsi_target_login.o \
11 iscsi_target_nego.o \
12 iscsi_target_nodeattrib.o \
13 iscsi_target_tmr.o \
14 iscsi_target_tpg.o \
15 iscsi_target_util.o \
16 iscsi_target.o \
17 iscsi_target_configfs.o \
18 iscsi_target_stat.o
19
20obj-$(CONFIG_ISCSI_TARGET) += iscsi_target_mod.o
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
new file mode 100644
index 000000000000..14c81c4265bd
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -0,0 +1,4559 @@
1/*******************************************************************************
2 * This file contains main functions related to the iSCSI Target Core Driver.
3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 ******************************************************************************/
20
21#include <linux/string.h>
22#include <linux/kthread.h>
23#include <linux/crypto.h>
24#include <linux/completion.h>
25#include <asm/unaligned.h>
26#include <scsi/scsi_device.h>
27#include <scsi/iscsi_proto.h>
28#include <target/target_core_base.h>
29#include <target/target_core_tmr.h>
30#include <target/target_core_transport.h>
31
32#include "iscsi_target_core.h"
33#include "iscsi_target_parameters.h"
34#include "iscsi_target_seq_pdu_list.h"
35#include "iscsi_target_tq.h"
36#include "iscsi_target_configfs.h"
37#include "iscsi_target_datain_values.h"
38#include "iscsi_target_erl0.h"
39#include "iscsi_target_erl1.h"
40#include "iscsi_target_erl2.h"
41#include "iscsi_target_login.h"
42#include "iscsi_target_tmr.h"
43#include "iscsi_target_tpg.h"
44#include "iscsi_target_util.h"
45#include "iscsi_target.h"
46#include "iscsi_target_device.h"
47#include "iscsi_target_stat.h"
48
49static LIST_HEAD(g_tiqn_list);
50static LIST_HEAD(g_np_list);
51static DEFINE_SPINLOCK(tiqn_lock);
52static DEFINE_SPINLOCK(np_lock);
53
54static struct idr tiqn_idr;
55struct idr sess_idr;
56struct mutex auth_id_lock;
57spinlock_t sess_idr_lock;
58
59struct iscsit_global *iscsit_global;
60
61struct kmem_cache *lio_cmd_cache;
62struct kmem_cache *lio_qr_cache;
63struct kmem_cache *lio_dr_cache;
64struct kmem_cache *lio_ooo_cache;
65struct kmem_cache *lio_r2t_cache;
66
67static int iscsit_handle_immediate_data(struct iscsi_cmd *,
68 unsigned char *buf, u32);
69static int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
70
71struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf)
72{
73 struct iscsi_tiqn *tiqn = NULL;
74
75 spin_lock(&tiqn_lock);
76 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
77 if (!strcmp(tiqn->tiqn, buf)) {
78
79 spin_lock(&tiqn->tiqn_state_lock);
80 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
81 tiqn->tiqn_access_count++;
82 spin_unlock(&tiqn->tiqn_state_lock);
83 spin_unlock(&tiqn_lock);
84 return tiqn;
85 }
86 spin_unlock(&tiqn->tiqn_state_lock);
87 }
88 }
89 spin_unlock(&tiqn_lock);
90
91 return NULL;
92}
93
94static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn)
95{
96 spin_lock(&tiqn->tiqn_state_lock);
97 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
98 tiqn->tiqn_state = TIQN_STATE_SHUTDOWN;
99 spin_unlock(&tiqn->tiqn_state_lock);
100 return 0;
101 }
102 spin_unlock(&tiqn->tiqn_state_lock);
103
104 return -1;
105}
106
107void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn)
108{
109 spin_lock(&tiqn->tiqn_state_lock);
110 tiqn->tiqn_access_count--;
111 spin_unlock(&tiqn->tiqn_state_lock);
112}
113
114/*
115 * Note that IQN formatting is expected to be done in userspace, and
116 * no explict IQN format checks are done here.
117 */
118struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
119{
120 struct iscsi_tiqn *tiqn = NULL;
121 int ret;
122
123 if (strlen(buf) > ISCSI_IQN_LEN) {
124 pr_err("Target IQN exceeds %d bytes\n",
125 ISCSI_IQN_LEN);
126 return ERR_PTR(-EINVAL);
127 }
128
129 tiqn = kzalloc(sizeof(struct iscsi_tiqn), GFP_KERNEL);
130 if (!tiqn) {
131 pr_err("Unable to allocate struct iscsi_tiqn\n");
132 return ERR_PTR(-ENOMEM);
133 }
134
135 sprintf(tiqn->tiqn, "%s", buf);
136 INIT_LIST_HEAD(&tiqn->tiqn_list);
137 INIT_LIST_HEAD(&tiqn->tiqn_tpg_list);
138 spin_lock_init(&tiqn->tiqn_state_lock);
139 spin_lock_init(&tiqn->tiqn_tpg_lock);
140 spin_lock_init(&tiqn->sess_err_stats.lock);
141 spin_lock_init(&tiqn->login_stats.lock);
142 spin_lock_init(&tiqn->logout_stats.lock);
143
144 if (!idr_pre_get(&tiqn_idr, GFP_KERNEL)) {
145 pr_err("idr_pre_get() for tiqn_idr failed\n");
146 kfree(tiqn);
147 return ERR_PTR(-ENOMEM);
148 }
149 tiqn->tiqn_state = TIQN_STATE_ACTIVE;
150
151 spin_lock(&tiqn_lock);
152 ret = idr_get_new(&tiqn_idr, NULL, &tiqn->tiqn_index);
153 if (ret < 0) {
154 pr_err("idr_get_new() failed for tiqn->tiqn_index\n");
155 spin_unlock(&tiqn_lock);
156 kfree(tiqn);
157 return ERR_PTR(ret);
158 }
159 list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
160 spin_unlock(&tiqn_lock);
161
162 pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);
163
164 return tiqn;
165
166}
167
168static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn)
169{
170 /*
171 * Wait for accesses to said struct iscsi_tiqn to end.
172 */
173 spin_lock(&tiqn->tiqn_state_lock);
174 while (tiqn->tiqn_access_count != 0) {
175 spin_unlock(&tiqn->tiqn_state_lock);
176 msleep(10);
177 spin_lock(&tiqn->tiqn_state_lock);
178 }
179 spin_unlock(&tiqn->tiqn_state_lock);
180}
181
182void iscsit_del_tiqn(struct iscsi_tiqn *tiqn)
183{
184 /*
185 * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN
186 * while holding tiqn->tiqn_state_lock. This means that all subsequent
187 * attempts to access this struct iscsi_tiqn will fail from both transport
188 * fabric and control code paths.
189 */
190 if (iscsit_set_tiqn_shutdown(tiqn) < 0) {
191 pr_err("iscsit_set_tiqn_shutdown() failed\n");
192 return;
193 }
194
195 iscsit_wait_for_tiqn(tiqn);
196
197 spin_lock(&tiqn_lock);
198 list_del(&tiqn->tiqn_list);
199 idr_remove(&tiqn_idr, tiqn->tiqn_index);
200 spin_unlock(&tiqn_lock);
201
202 pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n",
203 tiqn->tiqn);
204 kfree(tiqn);
205}
206
207int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
208{
209 int ret;
210 /*
211 * Determine if the network portal is accepting storage traffic.
212 */
213 spin_lock_bh(&np->np_thread_lock);
214 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
215 spin_unlock_bh(&np->np_thread_lock);
216 return -1;
217 }
218 if (np->np_login_tpg) {
219 pr_err("np->np_login_tpg() is not NULL!\n");
220 spin_unlock_bh(&np->np_thread_lock);
221 return -1;
222 }
223 spin_unlock_bh(&np->np_thread_lock);
224 /*
225 * Determine if the portal group is accepting storage traffic.
226 */
227 spin_lock_bh(&tpg->tpg_state_lock);
228 if (tpg->tpg_state != TPG_STATE_ACTIVE) {
229 spin_unlock_bh(&tpg->tpg_state_lock);
230 return -1;
231 }
232 spin_unlock_bh(&tpg->tpg_state_lock);
233
234 /*
235 * Here we serialize access across the TIQN+TPG Tuple.
236 */
237 ret = mutex_lock_interruptible(&tpg->np_login_lock);
238 if ((ret != 0) || signal_pending(current))
239 return -1;
240
241 spin_lock_bh(&np->np_thread_lock);
242 np->np_login_tpg = tpg;
243 spin_unlock_bh(&np->np_thread_lock);
244
245 return 0;
246}
247
248int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
249{
250 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
251
252 spin_lock_bh(&np->np_thread_lock);
253 np->np_login_tpg = NULL;
254 spin_unlock_bh(&np->np_thread_lock);
255
256 mutex_unlock(&tpg->np_login_lock);
257
258 if (tiqn)
259 iscsit_put_tiqn_for_login(tiqn);
260
261 return 0;
262}
263
264static struct iscsi_np *iscsit_get_np(
265 struct __kernel_sockaddr_storage *sockaddr,
266 int network_transport)
267{
268 struct sockaddr_in *sock_in, *sock_in_e;
269 struct sockaddr_in6 *sock_in6, *sock_in6_e;
270 struct iscsi_np *np;
271 int ip_match = 0;
272 u16 port;
273
274 spin_lock_bh(&np_lock);
275 list_for_each_entry(np, &g_np_list, np_list) {
276 spin_lock(&np->np_thread_lock);
277 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
278 spin_unlock(&np->np_thread_lock);
279 continue;
280 }
281
282 if (sockaddr->ss_family == AF_INET6) {
283 sock_in6 = (struct sockaddr_in6 *)sockaddr;
284 sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
285
286 if (!memcmp((void *)&sock_in6->sin6_addr.in6_u,
287 (void *)&sock_in6_e->sin6_addr.in6_u,
288 sizeof(struct in6_addr)))
289 ip_match = 1;
290
291 port = ntohs(sock_in6->sin6_port);
292 } else {
293 sock_in = (struct sockaddr_in *)sockaddr;
294 sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
295
296 if (sock_in->sin_addr.s_addr ==
297 sock_in_e->sin_addr.s_addr)
298 ip_match = 1;
299
300 port = ntohs(sock_in->sin_port);
301 }
302
303 if ((ip_match == 1) && (np->np_port == port) &&
304 (np->np_network_transport == network_transport)) {
305 /*
306 * Increment the np_exports reference count now to
307 * prevent iscsit_del_np() below from being called
308 * while iscsi_tpg_add_network_portal() is called.
309 */
310 np->np_exports++;
311 spin_unlock(&np->np_thread_lock);
312 spin_unlock_bh(&np_lock);
313 return np;
314 }
315 spin_unlock(&np->np_thread_lock);
316 }
317 spin_unlock_bh(&np_lock);
318
319 return NULL;
320}
321
322struct iscsi_np *iscsit_add_np(
323 struct __kernel_sockaddr_storage *sockaddr,
324 char *ip_str,
325 int network_transport)
326{
327 struct sockaddr_in *sock_in;
328 struct sockaddr_in6 *sock_in6;
329 struct iscsi_np *np;
330 int ret;
331 /*
332 * Locate the existing struct iscsi_np if already active..
333 */
334 np = iscsit_get_np(sockaddr, network_transport);
335 if (np)
336 return np;
337
338 np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL);
339 if (!np) {
340 pr_err("Unable to allocate memory for struct iscsi_np\n");
341 return ERR_PTR(-ENOMEM);
342 }
343
344 np->np_flags |= NPF_IP_NETWORK;
345 if (sockaddr->ss_family == AF_INET6) {
346 sock_in6 = (struct sockaddr_in6 *)sockaddr;
347 snprintf(np->np_ip, IPV6_ADDRESS_SPACE, "%s", ip_str);
348 np->np_port = ntohs(sock_in6->sin6_port);
349 } else {
350 sock_in = (struct sockaddr_in *)sockaddr;
351 sprintf(np->np_ip, "%s", ip_str);
352 np->np_port = ntohs(sock_in->sin_port);
353 }
354
355 np->np_network_transport = network_transport;
356 spin_lock_init(&np->np_thread_lock);
357 init_completion(&np->np_restart_comp);
358 INIT_LIST_HEAD(&np->np_list);
359
360 ret = iscsi_target_setup_login_socket(np, sockaddr);
361 if (ret != 0) {
362 kfree(np);
363 return ERR_PTR(ret);
364 }
365
366 np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np");
367 if (IS_ERR(np->np_thread)) {
368 pr_err("Unable to create kthread: iscsi_np\n");
369 ret = PTR_ERR(np->np_thread);
370 kfree(np);
371 return ERR_PTR(ret);
372 }
373 /*
374 * Increment the np_exports reference count now to prevent
375 * iscsit_del_np() below from being run while a new call to
376 * iscsi_tpg_add_network_portal() for a matching iscsi_np is
377 * active. We don't need to hold np->np_thread_lock at this
378 * point because iscsi_np has not been added to g_np_list yet.
379 */
380 np->np_exports = 1;
381
382 spin_lock_bh(&np_lock);
383 list_add_tail(&np->np_list, &g_np_list);
384 spin_unlock_bh(&np_lock);
385
386 pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
387 np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ?
388 "TCP" : "SCTP");
389
390 return np;
391}
392
393int iscsit_reset_np_thread(
394 struct iscsi_np *np,
395 struct iscsi_tpg_np *tpg_np,
396 struct iscsi_portal_group *tpg)
397{
398 spin_lock_bh(&np->np_thread_lock);
399 if (tpg && tpg_np) {
400 /*
401 * The reset operation need only be performed when the
402 * passed struct iscsi_portal_group has a login in progress
403 * to one of the network portals.
404 */
405 if (tpg_np->tpg_np->np_login_tpg != tpg) {
406 spin_unlock_bh(&np->np_thread_lock);
407 return 0;
408 }
409 }
410 if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) {
411 spin_unlock_bh(&np->np_thread_lock);
412 return 0;
413 }
414 np->np_thread_state = ISCSI_NP_THREAD_RESET;
415
416 if (np->np_thread) {
417 spin_unlock_bh(&np->np_thread_lock);
418 send_sig(SIGINT, np->np_thread, 1);
419 wait_for_completion(&np->np_restart_comp);
420 spin_lock_bh(&np->np_thread_lock);
421 }
422 spin_unlock_bh(&np->np_thread_lock);
423
424 return 0;
425}
426
427int iscsit_del_np_comm(struct iscsi_np *np)
428{
429 if (!np->np_socket)
430 return 0;
431
432 /*
433 * Some network transports allocate their own struct sock->file,
434 * see if we need to free any additional allocated resources.
435 */
436 if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
437 kfree(np->np_socket->file);
438 np->np_socket->file = NULL;
439 }
440
441 sock_release(np->np_socket);
442 return 0;
443}
444
445int iscsit_del_np(struct iscsi_np *np)
446{
447 spin_lock_bh(&np->np_thread_lock);
448 np->np_exports--;
449 if (np->np_exports) {
450 spin_unlock_bh(&np->np_thread_lock);
451 return 0;
452 }
453 np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN;
454 spin_unlock_bh(&np->np_thread_lock);
455
456 if (np->np_thread) {
457 /*
458 * We need to send the signal to wakeup Linux/Net
459 * which may be sleeping in sock_accept()..
460 */
461 send_sig(SIGINT, np->np_thread, 1);
462 kthread_stop(np->np_thread);
463 }
464 iscsit_del_np_comm(np);
465
466 spin_lock_bh(&np_lock);
467 list_del(&np->np_list);
468 spin_unlock_bh(&np_lock);
469
470 pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
471 np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ?
472 "TCP" : "SCTP");
473
474 kfree(np);
475 return 0;
476}
477
478static int __init iscsi_target_init_module(void)
479{
480 int ret = 0;
481
482 pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
483
484 iscsit_global = kzalloc(sizeof(struct iscsit_global), GFP_KERNEL);
485 if (!iscsit_global) {
486 pr_err("Unable to allocate memory for iscsit_global\n");
487 return -1;
488 }
489 mutex_init(&auth_id_lock);
490 spin_lock_init(&sess_idr_lock);
491 idr_init(&tiqn_idr);
492 idr_init(&sess_idr);
493
494 ret = iscsi_target_register_configfs();
495 if (ret < 0)
496 goto out;
497
498 ret = iscsi_thread_set_init();
499 if (ret < 0)
500 goto configfs_out;
501
502 if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) !=
503 TARGET_THREAD_SET_COUNT) {
504 pr_err("iscsi_allocate_thread_sets() returned"
505 " unexpected value!\n");
506 goto ts_out1;
507 }
508
509 lio_cmd_cache = kmem_cache_create("lio_cmd_cache",
510 sizeof(struct iscsi_cmd), __alignof__(struct iscsi_cmd),
511 0, NULL);
512 if (!lio_cmd_cache) {
513 pr_err("Unable to kmem_cache_create() for"
514 " lio_cmd_cache\n");
515 goto ts_out2;
516 }
517
518 lio_qr_cache = kmem_cache_create("lio_qr_cache",
519 sizeof(struct iscsi_queue_req),
520 __alignof__(struct iscsi_queue_req), 0, NULL);
521 if (!lio_qr_cache) {
522 pr_err("nable to kmem_cache_create() for"
523 " lio_qr_cache\n");
524 goto cmd_out;
525 }
526
527 lio_dr_cache = kmem_cache_create("lio_dr_cache",
528 sizeof(struct iscsi_datain_req),
529 __alignof__(struct iscsi_datain_req), 0, NULL);
530 if (!lio_dr_cache) {
531 pr_err("Unable to kmem_cache_create() for"
532 " lio_dr_cache\n");
533 goto qr_out;
534 }
535
536 lio_ooo_cache = kmem_cache_create("lio_ooo_cache",
537 sizeof(struct iscsi_ooo_cmdsn),
538 __alignof__(struct iscsi_ooo_cmdsn), 0, NULL);
539 if (!lio_ooo_cache) {
540 pr_err("Unable to kmem_cache_create() for"
541 " lio_ooo_cache\n");
542 goto dr_out;
543 }
544
545 lio_r2t_cache = kmem_cache_create("lio_r2t_cache",
546 sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t),
547 0, NULL);
548 if (!lio_r2t_cache) {
549 pr_err("Unable to kmem_cache_create() for"
550 " lio_r2t_cache\n");
551 goto ooo_out;
552 }
553
554 if (iscsit_load_discovery_tpg() < 0)
555 goto r2t_out;
556
557 return ret;
558r2t_out:
559 kmem_cache_destroy(lio_r2t_cache);
560ooo_out:
561 kmem_cache_destroy(lio_ooo_cache);
562dr_out:
563 kmem_cache_destroy(lio_dr_cache);
564qr_out:
565 kmem_cache_destroy(lio_qr_cache);
566cmd_out:
567 kmem_cache_destroy(lio_cmd_cache);
568ts_out2:
569 iscsi_deallocate_thread_sets();
570ts_out1:
571 iscsi_thread_set_free();
572configfs_out:
573 iscsi_target_deregister_configfs();
574out:
575 kfree(iscsit_global);
576 return -ENOMEM;
577}
578
579static void __exit iscsi_target_cleanup_module(void)
580{
581 iscsi_deallocate_thread_sets();
582 iscsi_thread_set_free();
583 iscsit_release_discovery_tpg();
584 kmem_cache_destroy(lio_cmd_cache);
585 kmem_cache_destroy(lio_qr_cache);
586 kmem_cache_destroy(lio_dr_cache);
587 kmem_cache_destroy(lio_ooo_cache);
588 kmem_cache_destroy(lio_r2t_cache);
589
590 iscsi_target_deregister_configfs();
591
592 kfree(iscsit_global);
593}
594
595int iscsit_add_reject(
596 u8 reason,
597 int fail_conn,
598 unsigned char *buf,
599 struct iscsi_conn *conn)
600{
601 struct iscsi_cmd *cmd;
602 struct iscsi_reject *hdr;
603 int ret;
604
605 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
606 if (!cmd)
607 return -1;
608
609 cmd->iscsi_opcode = ISCSI_OP_REJECT;
610 if (fail_conn)
611 cmd->cmd_flags |= ICF_REJECT_FAIL_CONN;
612
613 hdr = (struct iscsi_reject *) cmd->pdu;
614 hdr->reason = reason;
615
616 cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
617 if (!cmd->buf_ptr) {
618 pr_err("Unable to allocate memory for cmd->buf_ptr\n");
619 iscsit_release_cmd(cmd);
620 return -1;
621 }
622 memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
623
624 spin_lock_bh(&conn->cmd_lock);
625 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
626 spin_unlock_bh(&conn->cmd_lock);
627
628 cmd->i_state = ISTATE_SEND_REJECT;
629 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
630
631 ret = wait_for_completion_interruptible(&cmd->reject_comp);
632 if (ret != 0)
633 return -1;
634
635 return (!fail_conn) ? 0 : -1;
636}
637
638int iscsit_add_reject_from_cmd(
639 u8 reason,
640 int fail_conn,
641 int add_to_conn,
642 unsigned char *buf,
643 struct iscsi_cmd *cmd)
644{
645 struct iscsi_conn *conn;
646 struct iscsi_reject *hdr;
647 int ret;
648
649 if (!cmd->conn) {
650 pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
651 cmd->init_task_tag);
652 return -1;
653 }
654 conn = cmd->conn;
655
656 cmd->iscsi_opcode = ISCSI_OP_REJECT;
657 if (fail_conn)
658 cmd->cmd_flags |= ICF_REJECT_FAIL_CONN;
659
660 hdr = (struct iscsi_reject *) cmd->pdu;
661 hdr->reason = reason;
662
663 cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
664 if (!cmd->buf_ptr) {
665 pr_err("Unable to allocate memory for cmd->buf_ptr\n");
666 iscsit_release_cmd(cmd);
667 return -1;
668 }
669 memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
670
671 if (add_to_conn) {
672 spin_lock_bh(&conn->cmd_lock);
673 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
674 spin_unlock_bh(&conn->cmd_lock);
675 }
676
677 cmd->i_state = ISTATE_SEND_REJECT;
678 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
679
680 ret = wait_for_completion_interruptible(&cmd->reject_comp);
681 if (ret != 0)
682 return -1;
683
684 return (!fail_conn) ? 0 : -1;
685}
686
687/*
688 * Map some portion of the allocated scatterlist to an iovec, suitable for
689 * kernel sockets to copy data in/out. This handles both pages and slab-allocated
690 * buffers, since we have been tricky and mapped t_mem_sg to the buffer in
691 * either case (see iscsit_alloc_buffs)
692 */
693static int iscsit_map_iovec(
694 struct iscsi_cmd *cmd,
695 struct kvec *iov,
696 u32 data_offset,
697 u32 data_length)
698{
699 u32 i = 0;
700 struct scatterlist *sg;
701 unsigned int page_off;
702
703 /*
704 * We have a private mapping of the allocated pages in t_mem_sg.
705 * At this point, we also know each contains a page.
706 */
707 sg = &cmd->t_mem_sg[data_offset / PAGE_SIZE];
708 page_off = (data_offset % PAGE_SIZE);
709
710 cmd->first_data_sg = sg;
711 cmd->first_data_sg_off = page_off;
712
713 while (data_length) {
714 u32 cur_len = min_t(u32, data_length, sg->length - page_off);
715
716 iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off;
717 iov[i].iov_len = cur_len;
718
719 data_length -= cur_len;
720 page_off = 0;
721 sg = sg_next(sg);
722 i++;
723 }
724
725 cmd->kmapped_nents = i;
726
727 return i;
728}
729
730static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
731{
732 u32 i;
733 struct scatterlist *sg;
734
735 sg = cmd->first_data_sg;
736
737 for (i = 0; i < cmd->kmapped_nents; i++)
738 kunmap(sg_page(&sg[i]));
739}
740
741static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
742{
743 struct iscsi_cmd *cmd;
744
745 conn->exp_statsn = exp_statsn;
746
747 spin_lock_bh(&conn->cmd_lock);
748 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
749 spin_lock(&cmd->istate_lock);
750 if ((cmd->i_state == ISTATE_SENT_STATUS) &&
751 (cmd->stat_sn < exp_statsn)) {
752 cmd->i_state = ISTATE_REMOVE;
753 spin_unlock(&cmd->istate_lock);
754 iscsit_add_cmd_to_immediate_queue(cmd, conn,
755 cmd->i_state);
756 continue;
757 }
758 spin_unlock(&cmd->istate_lock);
759 }
760 spin_unlock_bh(&conn->cmd_lock);
761}
762
763static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
764{
765 u32 iov_count = (cmd->se_cmd.t_data_nents == 0) ? 1 :
766 cmd->se_cmd.t_data_nents;
767
768 iov_count += TRANSPORT_IOV_DATA_BUFFER;
769
770 cmd->iov_data = kzalloc(iov_count * sizeof(struct kvec), GFP_KERNEL);
771 if (!cmd->iov_data) {
772 pr_err("Unable to allocate cmd->iov_data\n");
773 return -ENOMEM;
774 }
775
776 cmd->orig_iov_data_count = iov_count;
777 return 0;
778}
779
780static int iscsit_alloc_buffs(struct iscsi_cmd *cmd)
781{
782 struct scatterlist *sgl;
783 u32 length = cmd->se_cmd.data_length;
784 int nents = DIV_ROUND_UP(length, PAGE_SIZE);
785 int i = 0, ret;
786 /*
787 * If no SCSI payload is present, allocate the default iovecs used for
788 * iSCSI PDU Header
789 */
790 if (!length)
791 return iscsit_allocate_iovecs(cmd);
792
793 sgl = kzalloc(sizeof(*sgl) * nents, GFP_KERNEL);
794 if (!sgl)
795 return -ENOMEM;
796
797 sg_init_table(sgl, nents);
798
799 while (length) {
800 int buf_size = min_t(int, length, PAGE_SIZE);
801 struct page *page;
802
803 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
804 if (!page)
805 goto page_alloc_failed;
806
807 sg_set_page(&sgl[i], page, buf_size, 0);
808
809 length -= buf_size;
810 i++;
811 }
812
813 cmd->t_mem_sg = sgl;
814 cmd->t_mem_sg_nents = nents;
815
816 /* BIDI ops not supported */
817
818 /* Tell the core about our preallocated memory */
819 transport_generic_map_mem_to_cmd(&cmd->se_cmd, sgl, nents, NULL, 0);
820 /*
821 * Allocate iovecs for SCSI payload after transport_generic_map_mem_to_cmd
822 * so that cmd->se_cmd.t_tasks_se_num has been set.
823 */
824 ret = iscsit_allocate_iovecs(cmd);
825 if (ret < 0)
826 goto page_alloc_failed;
827
828 return 0;
829
830page_alloc_failed:
831 while (i >= 0) {
832 __free_page(sg_page(&sgl[i]));
833 i--;
834 }
835 kfree(cmd->t_mem_sg);
836 cmd->t_mem_sg = NULL;
837 return -ENOMEM;
838}
839
840static int iscsit_handle_scsi_cmd(
841 struct iscsi_conn *conn,
842 unsigned char *buf)
843{
844 int data_direction, cmdsn_ret = 0, immed_ret, ret, transport_ret;
845 int dump_immediate_data = 0, send_check_condition = 0, payload_length;
846 struct iscsi_cmd *cmd = NULL;
847 struct iscsi_scsi_req *hdr;
848
849 spin_lock_bh(&conn->sess->session_stats_lock);
850 conn->sess->cmd_pdus++;
851 if (conn->sess->se_sess->se_node_acl) {
852 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
853 conn->sess->se_sess->se_node_acl->num_cmds++;
854 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
855 }
856 spin_unlock_bh(&conn->sess->session_stats_lock);
857
858 hdr = (struct iscsi_scsi_req *) buf;
859 payload_length = ntoh24(hdr->dlength);
860 hdr->itt = be32_to_cpu(hdr->itt);
861 hdr->data_length = be32_to_cpu(hdr->data_length);
862 hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
863 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
864
865 /* FIXME; Add checks for AdditionalHeaderSegment */
866
867 if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) &&
868 !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
869 pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
870 " not set. Bad iSCSI Initiator.\n");
871 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
872 buf, conn);
873 }
874
875 if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
876 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
877 /*
878 * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2)
879 * that adds support for RESERVE/RELEASE. There is a bug
880 * add with this new functionality that sets R/W bits when
881 * neither CDB carries any READ or WRITE datapayloads.
882 */
883 if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) {
884 hdr->flags &= ~ISCSI_FLAG_CMD_READ;
885 hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
886 goto done;
887 }
888
889 pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
890 " set when Expected Data Transfer Length is 0 for"
891 " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
892 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
893 buf, conn);
894 }
895done:
896
897 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
898 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
899 pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
900 " MUST be set if Expected Data Transfer Length is not 0."
901 " Bad iSCSI Initiator\n");
902 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
903 buf, conn);
904 }
905
906 if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
907 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
908 pr_err("Bidirectional operations not supported!\n");
909 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
910 buf, conn);
911 }
912
913 if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
914 pr_err("Illegally set Immediate Bit in iSCSI Initiator"
915 " Scsi Command PDU.\n");
916 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
917 buf, conn);
918 }
919
920 if (payload_length && !conn->sess->sess_ops->ImmediateData) {
921 pr_err("ImmediateData=No but DataSegmentLength=%u,"
922 " protocol error.\n", payload_length);
923 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
924 buf, conn);
925 }
926
927 if ((hdr->data_length == payload_length) &&
928 (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) {
929 pr_err("Expected Data Transfer Length and Length of"
930 " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
931 " bit is not set protocol error\n");
932 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
933 buf, conn);
934 }
935
936 if (payload_length > hdr->data_length) {
937 pr_err("DataSegmentLength: %u is greater than"
938 " EDTL: %u, protocol error.\n", payload_length,
939 hdr->data_length);
940 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
941 buf, conn);
942 }
943
944 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
945 pr_err("DataSegmentLength: %u is greater than"
946 " MaxRecvDataSegmentLength: %u, protocol error.\n",
947 payload_length, conn->conn_ops->MaxRecvDataSegmentLength);
948 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
949 buf, conn);
950 }
951
952 if (payload_length > conn->sess->sess_ops->FirstBurstLength) {
953 pr_err("DataSegmentLength: %u is greater than"
954 " FirstBurstLength: %u, protocol error.\n",
955 payload_length, conn->sess->sess_ops->FirstBurstLength);
956 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
957 buf, conn);
958 }
959
960 data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
961 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
962 DMA_NONE;
963
964 cmd = iscsit_allocate_se_cmd(conn, hdr->data_length, data_direction,
965 (hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK));
966 if (!cmd)
967 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1,
968 buf, conn);
969
970 pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
971 " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
972 hdr->cmdsn, hdr->data_length, payload_length, conn->cid);
973
974 cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD;
975 cmd->i_state = ISTATE_NEW_CMD;
976 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
977 cmd->immediate_data = (payload_length) ? 1 : 0;
978 cmd->unsolicited_data = ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) &&
979 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0);
980 if (cmd->unsolicited_data)
981 cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
982
983 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
984 if (hdr->flags & ISCSI_FLAG_CMD_READ) {
985 spin_lock_bh(&conn->sess->ttt_lock);
986 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
987 if (cmd->targ_xfer_tag == 0xFFFFFFFF)
988 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
989 spin_unlock_bh(&conn->sess->ttt_lock);
990 } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
991 cmd->targ_xfer_tag = 0xFFFFFFFF;
992 cmd->cmd_sn = hdr->cmdsn;
993 cmd->exp_stat_sn = hdr->exp_statsn;
994 cmd->first_burst_len = payload_length;
995
996 if (cmd->data_direction == DMA_FROM_DEVICE) {
997 struct iscsi_datain_req *dr;
998
999 dr = iscsit_allocate_datain_req();
1000 if (!dr)
1001 return iscsit_add_reject_from_cmd(
1002 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1003 1, 1, buf, cmd);
1004
1005 iscsit_attach_datain_req(cmd, dr);
1006 }
1007
1008 /*
1009 * The CDB is going to an se_device_t.
1010 */
1011 ret = iscsit_get_lun_for_cmd(cmd, hdr->cdb,
1012 get_unaligned_le64(&hdr->lun));
1013 if (ret < 0) {
1014 if (cmd->se_cmd.scsi_sense_reason == TCM_NON_EXISTENT_LUN) {
1015 pr_debug("Responding to non-acl'ed,"
1016 " non-existent or non-exported iSCSI LUN:"
1017 " 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
1018 }
1019 if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES)
1020 return iscsit_add_reject_from_cmd(
1021 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1022 1, 1, buf, cmd);
1023
1024 send_check_condition = 1;
1025 goto attach_cmd;
1026 }
1027 /*
1028 * The Initiator Node has access to the LUN (the addressing method
1029 * is handled inside of iscsit_get_lun_for_cmd()). Now it's time to
1030 * allocate 1->N transport tasks (depending on sector count and
1031 * maximum request size the physical HBA(s) can handle.
1032 */
1033 transport_ret = transport_generic_allocate_tasks(&cmd->se_cmd, hdr->cdb);
1034 if (transport_ret == -ENOMEM) {
1035 return iscsit_add_reject_from_cmd(
1036 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1037 1, 1, buf, cmd);
1038 } else if (transport_ret == -EINVAL) {
1039 /*
1040 * Unsupported SAM Opcode. CHECK_CONDITION will be sent
1041 * in iscsit_execute_cmd() during the CmdSN OOO Execution
1042 * Mechinism.
1043 */
1044 send_check_condition = 1;
1045 } else {
1046 if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
1047 return iscsit_add_reject_from_cmd(
1048 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1049 1, 1, buf, cmd);
1050 }
1051
1052attach_cmd:
1053 spin_lock_bh(&conn->cmd_lock);
1054 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
1055 spin_unlock_bh(&conn->cmd_lock);
1056 /*
1057 * Check if we need to delay processing because of ALUA
1058 * Active/NonOptimized primary access state..
1059 */
1060 core_alua_check_nonop_delay(&cmd->se_cmd);
1061 /*
1062 * Allocate and setup SGL used with transport_generic_map_mem_to_cmd().
1063 * also call iscsit_allocate_iovecs()
1064 */
1065 ret = iscsit_alloc_buffs(cmd);
1066 if (ret < 0)
1067 return iscsit_add_reject_from_cmd(
1068 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1069 1, 1, buf, cmd);
1070 /*
1071 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if
1072 * the Immediate Bit is not set, and no Immediate
1073 * Data is attached.
1074 *
1075 * A PDU/CmdSN carrying Immediate Data can only
1076 * be processed after the DataCRC has passed.
1077 * If the DataCRC fails, the CmdSN MUST NOT
1078 * be acknowledged. (See below)
1079 */
1080 if (!cmd->immediate_data) {
1081 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
1082 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1083 return iscsit_add_reject_from_cmd(
1084 ISCSI_REASON_PROTOCOL_ERROR,
1085 1, 0, buf, cmd);
1086 }
1087
1088 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
1089
1090 /*
1091 * If no Immediate Data is attached, it's OK to return now.
1092 */
1093 if (!cmd->immediate_data) {
1094 if (send_check_condition)
1095 return 0;
1096
1097 if (cmd->unsolicited_data) {
1098 iscsit_set_dataout_sequence_values(cmd);
1099
1100 spin_lock_bh(&cmd->dataout_timeout_lock);
1101 iscsit_start_dataout_timer(cmd, cmd->conn);
1102 spin_unlock_bh(&cmd->dataout_timeout_lock);
1103 }
1104
1105 return 0;
1106 }
1107
1108 /*
1109 * Early CHECK_CONDITIONs never make it to the transport processing
1110 * thread. They are processed in CmdSN order by
1111 * iscsit_check_received_cmdsn() below.
1112 */
1113 if (send_check_condition) {
1114 immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
1115 dump_immediate_data = 1;
1116 goto after_immediate_data;
1117 }
1118 /*
1119 * Call directly into transport_generic_new_cmd() to perform
1120 * the backend memory allocation.
1121 */
1122 ret = transport_generic_new_cmd(&cmd->se_cmd);
1123 if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) {
1124 immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
1125 dump_immediate_data = 1;
1126 goto after_immediate_data;
1127 }
1128
1129 immed_ret = iscsit_handle_immediate_data(cmd, buf, payload_length);
1130after_immediate_data:
1131 if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
1132 /*
1133 * A PDU/CmdSN carrying Immediate Data passed
1134 * DataCRC, check against ExpCmdSN/MaxCmdSN if
1135 * Immediate Bit is not set.
1136 */
1137 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
1138 /*
1139 * Special case for Unsupported SAM WRITE Opcodes
1140 * and ImmediateData=Yes.
1141 */
1142 if (dump_immediate_data) {
1143 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
1144 return -1;
1145 } else if (cmd->unsolicited_data) {
1146 iscsit_set_dataout_sequence_values(cmd);
1147
1148 spin_lock_bh(&cmd->dataout_timeout_lock);
1149 iscsit_start_dataout_timer(cmd, cmd->conn);
1150 spin_unlock_bh(&cmd->dataout_timeout_lock);
1151 }
1152
1153 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1154 return iscsit_add_reject_from_cmd(
1155 ISCSI_REASON_PROTOCOL_ERROR,
1156 1, 0, buf, cmd);
1157
1158 } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
1159 /*
1160 * Immediate Data failed DataCRC and ERL>=1,
1161 * silently drop this PDU and let the initiator
1162 * plug the CmdSN gap.
1163 *
1164 * FIXME: Send Unsolicited NOPIN with reserved
1165 * TTT here to help the initiator figure out
1166 * the missing CmdSN, although they should be
1167 * intelligent enough to determine the missing
1168 * CmdSN and issue a retry to plug the sequence.
1169 */
1170 cmd->i_state = ISTATE_REMOVE;
1171 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
1172 } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
1173 return -1;
1174
1175 return 0;
1176}
1177
1178static u32 iscsit_do_crypto_hash_sg(
1179 struct hash_desc *hash,
1180 struct iscsi_cmd *cmd,
1181 u32 data_offset,
1182 u32 data_length,
1183 u32 padding,
1184 u8 *pad_bytes)
1185{
1186 u32 data_crc;
1187 u32 i;
1188 struct scatterlist *sg;
1189 unsigned int page_off;
1190
1191 crypto_hash_init(hash);
1192
1193 sg = cmd->first_data_sg;
1194 page_off = cmd->first_data_sg_off;
1195
1196 i = 0;
1197 while (data_length) {
1198 u32 cur_len = min_t(u32, data_length, (sg[i].length - page_off));
1199
1200 crypto_hash_update(hash, &sg[i], cur_len);
1201
1202 data_length -= cur_len;
1203 page_off = 0;
1204 i++;
1205 }
1206
1207 if (padding) {
1208 struct scatterlist pad_sg;
1209
1210 sg_init_one(&pad_sg, pad_bytes, padding);
1211 crypto_hash_update(hash, &pad_sg, padding);
1212 }
1213 crypto_hash_final(hash, (u8 *) &data_crc);
1214
1215 return data_crc;
1216}
1217
1218static void iscsit_do_crypto_hash_buf(
1219 struct hash_desc *hash,
1220 unsigned char *buf,
1221 u32 payload_length,
1222 u32 padding,
1223 u8 *pad_bytes,
1224 u8 *data_crc)
1225{
1226 struct scatterlist sg;
1227
1228 crypto_hash_init(hash);
1229
1230 sg_init_one(&sg, (u8 *)buf, payload_length);
1231 crypto_hash_update(hash, &sg, payload_length);
1232
1233 if (padding) {
1234 sg_init_one(&sg, pad_bytes, padding);
1235 crypto_hash_update(hash, &sg, padding);
1236 }
1237 crypto_hash_final(hash, data_crc);
1238}
1239
1240static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
1241{
1242 int iov_ret, ooo_cmdsn = 0, ret;
1243 u8 data_crc_failed = 0;
1244 u32 checksum, iov_count = 0, padding = 0, rx_got = 0;
1245 u32 rx_size = 0, payload_length;
1246 struct iscsi_cmd *cmd = NULL;
1247 struct se_cmd *se_cmd;
1248 struct iscsi_data *hdr;
1249 struct kvec *iov;
1250 unsigned long flags;
1251
1252 hdr = (struct iscsi_data *) buf;
1253 payload_length = ntoh24(hdr->dlength);
1254 hdr->itt = be32_to_cpu(hdr->itt);
1255 hdr->ttt = be32_to_cpu(hdr->ttt);
1256 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
1257 hdr->datasn = be32_to_cpu(hdr->datasn);
1258 hdr->offset = be32_to_cpu(hdr->offset);
1259
1260 if (!payload_length) {
1261 pr_err("DataOUT payload is ZERO, protocol error.\n");
1262 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1263 buf, conn);
1264 }
1265
1266 /* iSCSI write */
1267 spin_lock_bh(&conn->sess->session_stats_lock);
1268 conn->sess->rx_data_octets += payload_length;
1269 if (conn->sess->se_sess->se_node_acl) {
1270 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
1271 conn->sess->se_sess->se_node_acl->write_bytes += payload_length;
1272 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
1273 }
1274 spin_unlock_bh(&conn->sess->session_stats_lock);
1275
1276 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
1277 pr_err("DataSegmentLength: %u is greater than"
1278 " MaxRecvDataSegmentLength: %u\n", payload_length,
1279 conn->conn_ops->MaxRecvDataSegmentLength);
1280 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1281 buf, conn);
1282 }
1283
1284 cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt,
1285 payload_length);
1286 if (!cmd)
1287 return 0;
1288
1289 pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x,"
1290 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
1291 hdr->itt, hdr->ttt, hdr->datasn, hdr->offset,
1292 payload_length, conn->cid);
1293
1294 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
1295 pr_err("Command ITT: 0x%08x received DataOUT after"
1296 " last DataOUT received, dumping payload\n",
1297 cmd->init_task_tag);
1298 return iscsit_dump_data_payload(conn, payload_length, 1);
1299 }
1300
1301 if (cmd->data_direction != DMA_TO_DEVICE) {
1302 pr_err("Command ITT: 0x%08x received DataOUT for a"
1303 " NON-WRITE command.\n", cmd->init_task_tag);
1304 return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
1305 1, 0, buf, cmd);
1306 }
1307 se_cmd = &cmd->se_cmd;
1308 iscsit_mod_dataout_timer(cmd);
1309
1310 if ((hdr->offset + payload_length) > cmd->data_length) {
1311 pr_err("DataOut Offset: %u, Length %u greater than"
1312 " iSCSI Command EDTL %u, protocol error.\n",
1313 hdr->offset, payload_length, cmd->data_length);
1314 return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
1315 1, 0, buf, cmd);
1316 }
1317
1318 if (cmd->unsolicited_data) {
1319 int dump_unsolicited_data = 0;
1320
1321 if (conn->sess->sess_ops->InitialR2T) {
1322 pr_err("Received unexpected unsolicited data"
1323 " while InitialR2T=Yes, protocol error.\n");
1324 transport_send_check_condition_and_sense(&cmd->se_cmd,
1325 TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
1326 return -1;
1327 }
1328 /*
1329 * Special case for dealing with Unsolicited DataOUT
1330 * and Unsupported SAM WRITE Opcodes and SE resource allocation
1331 * failures;
1332 */
1333
1334 /* Something's amiss if we're not in WRITE_PENDING state... */
1335 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
1336 WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING);
1337 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
1338
1339 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
1340 if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
1341 (se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED))
1342 dump_unsolicited_data = 1;
1343 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
1344
1345 if (dump_unsolicited_data) {
1346 /*
1347 * Check if a delayed TASK_ABORTED status needs to
1348 * be sent now if the ISCSI_FLAG_CMD_FINAL has been
1349 * received with the unsolicitied data out.
1350 */
1351 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
1352 iscsit_stop_dataout_timer(cmd);
1353
1354 transport_check_aborted_status(se_cmd,
1355 (hdr->flags & ISCSI_FLAG_CMD_FINAL));
1356 return iscsit_dump_data_payload(conn, payload_length, 1);
1357 }
1358 } else {
1359 /*
1360 * For the normal solicited data path:
1361 *
1362 * Check for a delayed TASK_ABORTED status and dump any
1363 * incoming data out payload if one exists. Also, when the
1364 * ISCSI_FLAG_CMD_FINAL is set to denote the end of the current
1365 * data out sequence, we decrement outstanding_r2ts. Once
1366 * outstanding_r2ts reaches zero, go ahead and send the delayed
1367 * TASK_ABORTED status.
1368 */
1369 if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
1370 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
1371 if (--cmd->outstanding_r2ts < 1) {
1372 iscsit_stop_dataout_timer(cmd);
1373 transport_check_aborted_status(
1374 se_cmd, 1);
1375 }
1376
1377 return iscsit_dump_data_payload(conn, payload_length, 1);
1378 }
1379 }
1380 /*
1381 * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and
1382 * within-command recovery checks before receiving the payload.
1383 */
1384 ret = iscsit_check_pre_dataout(cmd, buf);
1385 if (ret == DATAOUT_WITHIN_COMMAND_RECOVERY)
1386 return 0;
1387 else if (ret == DATAOUT_CANNOT_RECOVER)
1388 return -1;
1389
1390 rx_size += payload_length;
1391 iov = &cmd->iov_data[0];
1392
1393 iov_ret = iscsit_map_iovec(cmd, iov, hdr->offset, payload_length);
1394 if (iov_ret < 0)
1395 return -1;
1396
1397 iov_count += iov_ret;
1398
1399 padding = ((-payload_length) & 3);
1400 if (padding != 0) {
1401 iov[iov_count].iov_base = cmd->pad_bytes;
1402 iov[iov_count++].iov_len = padding;
1403 rx_size += padding;
1404 pr_debug("Receiving %u padding bytes.\n", padding);
1405 }
1406
1407 if (conn->conn_ops->DataDigest) {
1408 iov[iov_count].iov_base = &checksum;
1409 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
1410 rx_size += ISCSI_CRC_LEN;
1411 }
1412
1413 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
1414
1415 iscsit_unmap_iovec(cmd);
1416
1417 if (rx_got != rx_size)
1418 return -1;
1419
1420 if (conn->conn_ops->DataDigest) {
1421 u32 data_crc;
1422
1423 data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
1424 hdr->offset, payload_length, padding,
1425 cmd->pad_bytes);
1426
1427 if (checksum != data_crc) {
1428 pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
1429 " DataSN: 0x%08x, CRC32C DataDigest 0x%08x"
1430 " does not match computed 0x%08x\n",
1431 hdr->itt, hdr->offset, payload_length,
1432 hdr->datasn, checksum, data_crc);
1433 data_crc_failed = 1;
1434 } else {
1435 pr_debug("Got CRC32C DataDigest 0x%08x for"
1436 " %u bytes of Data Out\n", checksum,
1437 payload_length);
1438 }
1439 }
1440 /*
1441 * Increment post receive data and CRC values or perform
1442 * within-command recovery.
1443 */
1444 ret = iscsit_check_post_dataout(cmd, buf, data_crc_failed);
1445 if ((ret == DATAOUT_NORMAL) || (ret == DATAOUT_WITHIN_COMMAND_RECOVERY))
1446 return 0;
1447 else if (ret == DATAOUT_SEND_R2T) {
1448 iscsit_set_dataout_sequence_values(cmd);
1449 iscsit_build_r2ts_for_cmd(cmd, conn, 0);
1450 } else if (ret == DATAOUT_SEND_TO_TRANSPORT) {
1451 /*
1452 * Handle extra special case for out of order
1453 * Unsolicited Data Out.
1454 */
1455 spin_lock_bh(&cmd->istate_lock);
1456 ooo_cmdsn = (cmd->cmd_flags & ICF_OOO_CMDSN);
1457 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1458 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1459 spin_unlock_bh(&cmd->istate_lock);
1460
1461 iscsit_stop_dataout_timer(cmd);
1462 return (!ooo_cmdsn) ? transport_generic_handle_data(
1463 &cmd->se_cmd) : 0;
1464 } else /* DATAOUT_CANNOT_RECOVER */
1465 return -1;
1466
1467 return 0;
1468}
1469
1470static int iscsit_handle_nop_out(
1471 struct iscsi_conn *conn,
1472 unsigned char *buf)
1473{
1474 unsigned char *ping_data = NULL;
1475 int cmdsn_ret, niov = 0, ret = 0, rx_got, rx_size;
1476 u32 checksum, data_crc, padding = 0, payload_length;
1477 u64 lun;
1478 struct iscsi_cmd *cmd = NULL;
1479 struct kvec *iov = NULL;
1480 struct iscsi_nopout *hdr;
1481
1482 hdr = (struct iscsi_nopout *) buf;
1483 payload_length = ntoh24(hdr->dlength);
1484 lun = get_unaligned_le64(&hdr->lun);
1485 hdr->itt = be32_to_cpu(hdr->itt);
1486 hdr->ttt = be32_to_cpu(hdr->ttt);
1487 hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
1488 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
1489
1490 if ((hdr->itt == 0xFFFFFFFF) && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1491 pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
1492 " not set, protocol error.\n");
1493 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1494 buf, conn);
1495 }
1496
1497 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
1498 pr_err("NOPOUT Ping Data DataSegmentLength: %u is"
1499 " greater than MaxRecvDataSegmentLength: %u, protocol"
1500 " error.\n", payload_length,
1501 conn->conn_ops->MaxRecvDataSegmentLength);
1502 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1503 buf, conn);
1504 }
1505
1506 pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%09x,"
1507 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
1508 (hdr->itt == 0xFFFFFFFF) ? "Response" : "Request",
1509 hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn,
1510 payload_length);
1511 /*
1512 * This is not a response to a Unsolicited NopIN, which means
1513 * it can either be a NOPOUT ping request (with a valid ITT),
1514 * or a NOPOUT not requesting a NOPIN (with a reserved ITT).
1515 * Either way, make sure we allocate an struct iscsi_cmd, as both
1516 * can contain ping data.
1517 */
1518 if (hdr->ttt == 0xFFFFFFFF) {
1519 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1520 if (!cmd)
1521 return iscsit_add_reject(
1522 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1523 1, buf, conn);
1524
1525 cmd->iscsi_opcode = ISCSI_OP_NOOP_OUT;
1526 cmd->i_state = ISTATE_SEND_NOPIN;
1527 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ?
1528 1 : 0);
1529 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1530 cmd->targ_xfer_tag = 0xFFFFFFFF;
1531 cmd->cmd_sn = hdr->cmdsn;
1532 cmd->exp_stat_sn = hdr->exp_statsn;
1533 cmd->data_direction = DMA_NONE;
1534 }
1535
1536 if (payload_length && (hdr->ttt == 0xFFFFFFFF)) {
1537 rx_size = payload_length;
1538 ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
1539 if (!ping_data) {
1540 pr_err("Unable to allocate memory for"
1541 " NOPOUT ping data.\n");
1542 ret = -1;
1543 goto out;
1544 }
1545
1546 iov = &cmd->iov_misc[0];
1547 iov[niov].iov_base = ping_data;
1548 iov[niov++].iov_len = payload_length;
1549
1550 padding = ((-payload_length) & 3);
1551 if (padding != 0) {
1552 pr_debug("Receiving %u additional bytes"
1553 " for padding.\n", padding);
1554 iov[niov].iov_base = &cmd->pad_bytes;
1555 iov[niov++].iov_len = padding;
1556 rx_size += padding;
1557 }
1558 if (conn->conn_ops->DataDigest) {
1559 iov[niov].iov_base = &checksum;
1560 iov[niov++].iov_len = ISCSI_CRC_LEN;
1561 rx_size += ISCSI_CRC_LEN;
1562 }
1563
1564 rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size);
1565 if (rx_got != rx_size) {
1566 ret = -1;
1567 goto out;
1568 }
1569
1570 if (conn->conn_ops->DataDigest) {
1571 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
1572 ping_data, payload_length,
1573 padding, cmd->pad_bytes,
1574 (u8 *)&data_crc);
1575
1576 if (checksum != data_crc) {
1577 pr_err("Ping data CRC32C DataDigest"
1578 " 0x%08x does not match computed 0x%08x\n",
1579 checksum, data_crc);
1580 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
1581 pr_err("Unable to recover from"
1582 " NOPOUT Ping DataCRC failure while in"
1583 " ERL=0.\n");
1584 ret = -1;
1585 goto out;
1586 } else {
1587 /*
1588 * Silently drop this PDU and let the
1589 * initiator plug the CmdSN gap.
1590 */
1591 pr_debug("Dropping NOPOUT"
1592 " Command CmdSN: 0x%08x due to"
1593 " DataCRC error.\n", hdr->cmdsn);
1594 ret = 0;
1595 goto out;
1596 }
1597 } else {
1598 pr_debug("Got CRC32C DataDigest"
1599 " 0x%08x for %u bytes of ping data.\n",
1600 checksum, payload_length);
1601 }
1602 }
1603
1604 ping_data[payload_length] = '\0';
1605 /*
1606 * Attach ping data to struct iscsi_cmd->buf_ptr.
1607 */
1608 cmd->buf_ptr = (void *)ping_data;
1609 cmd->buf_ptr_size = payload_length;
1610
1611 pr_debug("Got %u bytes of NOPOUT ping"
1612 " data.\n", payload_length);
1613 pr_debug("Ping Data: \"%s\"\n", ping_data);
1614 }
1615
1616 if (hdr->itt != 0xFFFFFFFF) {
1617 if (!cmd) {
1618 pr_err("Checking CmdSN for NOPOUT,"
1619 " but cmd is NULL!\n");
1620 return -1;
1621 }
1622 /*
1623 * Initiator is expecting a NopIN ping reply,
1624 */
1625 spin_lock_bh(&conn->cmd_lock);
1626 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
1627 spin_unlock_bh(&conn->cmd_lock);
1628
1629 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
1630
1631 if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
1632 iscsit_add_cmd_to_response_queue(cmd, conn,
1633 cmd->i_state);
1634 return 0;
1635 }
1636
1637 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
1638 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
1639 ret = 0;
1640 goto ping_out;
1641 }
1642 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1643 return iscsit_add_reject_from_cmd(
1644 ISCSI_REASON_PROTOCOL_ERROR,
1645 1, 0, buf, cmd);
1646
1647 return 0;
1648 }
1649
1650 if (hdr->ttt != 0xFFFFFFFF) {
1651 /*
1652 * This was a response to a unsolicited NOPIN ping.
1653 */
1654 cmd = iscsit_find_cmd_from_ttt(conn, hdr->ttt);
1655 if (!cmd)
1656 return -1;
1657
1658 iscsit_stop_nopin_response_timer(conn);
1659
1660 cmd->i_state = ISTATE_REMOVE;
1661 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
1662 iscsit_start_nopin_timer(conn);
1663 } else {
1664 /*
1665 * Initiator is not expecting a NOPIN is response.
1666 * Just ignore for now.
1667 *
1668 * iSCSI v19-91 10.18
1669 * "A NOP-OUT may also be used to confirm a changed
1670 * ExpStatSN if another PDU will not be available
1671 * for a long time."
1672 */
1673 ret = 0;
1674 goto out;
1675 }
1676
1677 return 0;
1678out:
1679 if (cmd)
1680 iscsit_release_cmd(cmd);
1681ping_out:
1682 kfree(ping_data);
1683 return ret;
1684}
1685
1686static int iscsit_handle_task_mgt_cmd(
1687 struct iscsi_conn *conn,
1688 unsigned char *buf)
1689{
1690 struct iscsi_cmd *cmd;
1691 struct se_tmr_req *se_tmr;
1692 struct iscsi_tmr_req *tmr_req;
1693 struct iscsi_tm *hdr;
1694 u32 payload_length;
1695 int out_of_order_cmdsn = 0;
1696 int ret;
1697 u8 function;
1698
1699 hdr = (struct iscsi_tm *) buf;
1700 payload_length = ntoh24(hdr->dlength);
1701 hdr->itt = be32_to_cpu(hdr->itt);
1702 hdr->rtt = be32_to_cpu(hdr->rtt);
1703 hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
1704 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
1705 hdr->refcmdsn = be32_to_cpu(hdr->refcmdsn);
1706 hdr->exp_datasn = be32_to_cpu(hdr->exp_datasn);
1707 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
1708 function = hdr->flags;
1709
1710 pr_debug("Got Task Management Request ITT: 0x%08x, CmdSN:"
1711 " 0x%08x, Function: 0x%02x, RefTaskTag: 0x%08x, RefCmdSN:"
1712 " 0x%08x, CID: %hu\n", hdr->itt, hdr->cmdsn, function,
1713 hdr->rtt, hdr->refcmdsn, conn->cid);
1714
1715 if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
1716 ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
1717 (hdr->rtt != ISCSI_RESERVED_TAG))) {
1718 pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n");
1719 hdr->rtt = ISCSI_RESERVED_TAG;
1720 }
1721
1722 if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) &&
1723 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1724 pr_err("Task Management Request TASK_REASSIGN not"
1725 " issued as immediate command, bad iSCSI Initiator"
1726 "implementation\n");
1727 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1728 buf, conn);
1729 }
1730 if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
1731 (hdr->refcmdsn != ISCSI_RESERVED_TAG))
1732 hdr->refcmdsn = ISCSI_RESERVED_TAG;
1733
1734 cmd = iscsit_allocate_se_cmd_for_tmr(conn, function);
1735 if (!cmd)
1736 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1737 1, buf, conn);
1738
1739 cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC;
1740 cmd->i_state = ISTATE_SEND_TASKMGTRSP;
1741 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
1742 cmd->init_task_tag = hdr->itt;
1743 cmd->targ_xfer_tag = 0xFFFFFFFF;
1744 cmd->cmd_sn = hdr->cmdsn;
1745 cmd->exp_stat_sn = hdr->exp_statsn;
1746 se_tmr = cmd->se_cmd.se_tmr_req;
1747 tmr_req = cmd->tmr_req;
1748 /*
1749 * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN
1750 */
1751 if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
1752 ret = iscsit_get_lun_for_tmr(cmd,
1753 get_unaligned_le64(&hdr->lun));
1754 if (ret < 0) {
1755 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1756 se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
1757 goto attach;
1758 }
1759 }
1760
1761 switch (function) {
1762 case ISCSI_TM_FUNC_ABORT_TASK:
1763 se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
1764 if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) {
1765 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1766 goto attach;
1767 }
1768 break;
1769 case ISCSI_TM_FUNC_ABORT_TASK_SET:
1770 case ISCSI_TM_FUNC_CLEAR_ACA:
1771 case ISCSI_TM_FUNC_CLEAR_TASK_SET:
1772 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
1773 break;
1774 case ISCSI_TM_FUNC_TARGET_WARM_RESET:
1775 if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
1776 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1777 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
1778 goto attach;
1779 }
1780 break;
1781 case ISCSI_TM_FUNC_TARGET_COLD_RESET:
1782 if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
1783 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1784 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
1785 goto attach;
1786 }
1787 break;
1788 case ISCSI_TM_FUNC_TASK_REASSIGN:
1789 se_tmr->response = iscsit_tmr_task_reassign(cmd, buf);
1790 /*
1791 * Perform sanity checks on the ExpDataSN only if the
1792 * TASK_REASSIGN was successful.
1793 */
1794 if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE)
1795 break;
1796
1797 if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
1798 return iscsit_add_reject_from_cmd(
1799 ISCSI_REASON_BOOKMARK_INVALID, 1, 1,
1800 buf, cmd);
1801 break;
1802 default:
1803 pr_err("Unknown TMR function: 0x%02x, protocol"
1804 " error.\n", function);
1805 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1806 se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED;
1807 goto attach;
1808 }
1809
1810 if ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
1811 (se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
1812 se_tmr->call_transport = 1;
1813attach:
1814 spin_lock_bh(&conn->cmd_lock);
1815 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
1816 spin_unlock_bh(&conn->cmd_lock);
1817
1818 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1819 int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
1820 if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
1821 out_of_order_cmdsn = 1;
1822 else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
1823 return 0;
1824 } else { /* (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) */
1825 return iscsit_add_reject_from_cmd(
1826 ISCSI_REASON_PROTOCOL_ERROR,
1827 1, 0, buf, cmd);
1828 }
1829 }
1830 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
1831
1832 if (out_of_order_cmdsn)
1833 return 0;
1834 /*
1835 * Found the referenced task, send to transport for processing.
1836 */
1837 if (se_tmr->call_transport)
1838 return transport_generic_handle_tmr(&cmd->se_cmd);
1839
1840 /*
1841 * Could not find the referenced LUN, task, or Task Management
1842 * command not authorized or supported. Change state and
1843 * let the tx_thread send the response.
1844 *
1845 * For connection recovery, this is also the default action for
1846 * TMR TASK_REASSIGN.
1847 */
1848 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
1849 return 0;
1850}
1851
1852/* #warning FIXME: Support Text Command parameters besides SendTargets */
1853static int iscsit_handle_text_cmd(
1854 struct iscsi_conn *conn,
1855 unsigned char *buf)
1856{
1857 char *text_ptr, *text_in;
1858 int cmdsn_ret, niov = 0, rx_got, rx_size;
1859 u32 checksum = 0, data_crc = 0, payload_length;
1860 u32 padding = 0, text_length = 0;
1861 struct iscsi_cmd *cmd;
1862 struct kvec iov[3];
1863 struct iscsi_text *hdr;
1864
1865 hdr = (struct iscsi_text *) buf;
1866 payload_length = ntoh24(hdr->dlength);
1867 hdr->itt = be32_to_cpu(hdr->itt);
1868 hdr->ttt = be32_to_cpu(hdr->ttt);
1869 hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
1870 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
1871
1872 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
1873 pr_err("Unable to accept text parameter length: %u"
1874 "greater than MaxRecvDataSegmentLength %u.\n",
1875 payload_length, conn->conn_ops->MaxRecvDataSegmentLength);
1876 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1877 buf, conn);
1878 }
1879
1880 pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
1881 " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
1882 hdr->exp_statsn, payload_length);
1883
1884 rx_size = text_length = payload_length;
1885 if (text_length) {
1886 text_in = kzalloc(text_length, GFP_KERNEL);
1887 if (!text_in) {
1888 pr_err("Unable to allocate memory for"
1889 " incoming text parameters\n");
1890 return -1;
1891 }
1892
1893 memset(iov, 0, 3 * sizeof(struct kvec));
1894 iov[niov].iov_base = text_in;
1895 iov[niov++].iov_len = text_length;
1896
1897 padding = ((-payload_length) & 3);
1898 if (padding != 0) {
1899 iov[niov].iov_base = cmd->pad_bytes;
1900 iov[niov++].iov_len = padding;
1901 rx_size += padding;
1902 pr_debug("Receiving %u additional bytes"
1903 " for padding.\n", padding);
1904 }
1905 if (conn->conn_ops->DataDigest) {
1906 iov[niov].iov_base = &checksum;
1907 iov[niov++].iov_len = ISCSI_CRC_LEN;
1908 rx_size += ISCSI_CRC_LEN;
1909 }
1910
1911 rx_got = rx_data(conn, &iov[0], niov, rx_size);
1912 if (rx_got != rx_size) {
1913 kfree(text_in);
1914 return -1;
1915 }
1916
1917 if (conn->conn_ops->DataDigest) {
1918 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
1919 text_in, text_length,
1920 padding, cmd->pad_bytes,
1921 (u8 *)&data_crc);
1922
1923 if (checksum != data_crc) {
1924 pr_err("Text data CRC32C DataDigest"
1925 " 0x%08x does not match computed"
1926 " 0x%08x\n", checksum, data_crc);
1927 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
1928 pr_err("Unable to recover from"
1929 " Text Data digest failure while in"
1930 " ERL=0.\n");
1931 kfree(text_in);
1932 return -1;
1933 } else {
1934 /*
1935 * Silently drop this PDU and let the
1936 * initiator plug the CmdSN gap.
1937 */
1938 pr_debug("Dropping Text"
1939 " Command CmdSN: 0x%08x due to"
1940 " DataCRC error.\n", hdr->cmdsn);
1941 kfree(text_in);
1942 return 0;
1943 }
1944 } else {
1945 pr_debug("Got CRC32C DataDigest"
1946 " 0x%08x for %u bytes of text data.\n",
1947 checksum, text_length);
1948 }
1949 }
1950 text_in[text_length - 1] = '\0';
1951 pr_debug("Successfully read %d bytes of text"
1952 " data.\n", text_length);
1953
1954 if (strncmp("SendTargets", text_in, 11) != 0) {
1955 pr_err("Received Text Data that is not"
1956 " SendTargets, cannot continue.\n");
1957 kfree(text_in);
1958 return -1;
1959 }
1960 text_ptr = strchr(text_in, '=');
1961 if (!text_ptr) {
1962 pr_err("No \"=\" separator found in Text Data,"
1963 " cannot continue.\n");
1964 kfree(text_in);
1965 return -1;
1966 }
1967 if (strncmp("=All", text_ptr, 4) != 0) {
1968 pr_err("Unable to locate All value for"
1969 " SendTargets key, cannot continue.\n");
1970 kfree(text_in);
1971 return -1;
1972 }
1973/*#warning Support SendTargets=(iSCSI Target Name/Nothing) values. */
1974 kfree(text_in);
1975 }
1976
1977 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1978 if (!cmd)
1979 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1980 1, buf, conn);
1981
1982 cmd->iscsi_opcode = ISCSI_OP_TEXT;
1983 cmd->i_state = ISTATE_SEND_TEXTRSP;
1984 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
1985 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1986 cmd->targ_xfer_tag = 0xFFFFFFFF;
1987 cmd->cmd_sn = hdr->cmdsn;
1988 cmd->exp_stat_sn = hdr->exp_statsn;
1989 cmd->data_direction = DMA_NONE;
1990
1991 spin_lock_bh(&conn->cmd_lock);
1992 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
1993 spin_unlock_bh(&conn->cmd_lock);
1994
1995 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
1996
1997 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1998 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
1999 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
2000 return iscsit_add_reject_from_cmd(
2001 ISCSI_REASON_PROTOCOL_ERROR,
2002 1, 0, buf, cmd);
2003
2004 return 0;
2005 }
2006
2007 return iscsit_execute_cmd(cmd, 0);
2008}
2009
2010int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2011{
2012 struct iscsi_conn *conn_p;
2013 struct iscsi_session *sess = conn->sess;
2014
2015 pr_debug("Received logout request CLOSESESSION on CID: %hu"
2016 " for SID: %u.\n", conn->cid, conn->sess->sid);
2017
2018 atomic_set(&sess->session_logout, 1);
2019 atomic_set(&conn->conn_logout_remove, 1);
2020 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_SESSION;
2021
2022 iscsit_inc_conn_usage_count(conn);
2023 iscsit_inc_session_usage_count(sess);
2024
2025 spin_lock_bh(&sess->conn_lock);
2026 list_for_each_entry(conn_p, &sess->sess_conn_list, conn_list) {
2027 if (conn_p->conn_state != TARG_CONN_STATE_LOGGED_IN)
2028 continue;
2029
2030 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
2031 conn_p->conn_state = TARG_CONN_STATE_IN_LOGOUT;
2032 }
2033 spin_unlock_bh(&sess->conn_lock);
2034
2035 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2036
2037 return 0;
2038}
2039
2040int iscsit_logout_closeconnection(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2041{
2042 struct iscsi_conn *l_conn;
2043 struct iscsi_session *sess = conn->sess;
2044
2045 pr_debug("Received logout request CLOSECONNECTION for CID:"
2046 " %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
2047
2048 /*
2049 * A Logout Request with a CLOSECONNECTION reason code for a CID
2050 * can arrive on a connection with a differing CID.
2051 */
2052 if (conn->cid == cmd->logout_cid) {
2053 spin_lock_bh(&conn->state_lock);
2054 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
2055 conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
2056
2057 atomic_set(&conn->conn_logout_remove, 1);
2058 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_CONNECTION;
2059 iscsit_inc_conn_usage_count(conn);
2060
2061 spin_unlock_bh(&conn->state_lock);
2062 } else {
2063 /*
2064 * Handle all different cid CLOSECONNECTION requests in
2065 * iscsit_logout_post_handler_diffcid() as to give enough
2066 * time for any non immediate command's CmdSN to be
2067 * acknowledged on the connection in question.
2068 *
2069 * Here we simply make sure the CID is still around.
2070 */
2071 l_conn = iscsit_get_conn_from_cid(sess,
2072 cmd->logout_cid);
2073 if (!l_conn) {
2074 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
2075 iscsit_add_cmd_to_response_queue(cmd, conn,
2076 cmd->i_state);
2077 return 0;
2078 }
2079
2080 iscsit_dec_conn_usage_count(l_conn);
2081 }
2082
2083 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2084
2085 return 0;
2086}
2087
2088int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2089{
2090 struct iscsi_session *sess = conn->sess;
2091
2092 pr_debug("Received explicit REMOVECONNFORRECOVERY logout for"
2093 " CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
2094
2095 if (sess->sess_ops->ErrorRecoveryLevel != 2) {
2096 pr_err("Received Logout Request REMOVECONNFORRECOVERY"
2097 " while ERL!=2.\n");
2098 cmd->logout_response = ISCSI_LOGOUT_RECOVERY_UNSUPPORTED;
2099 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2100 return 0;
2101 }
2102
2103 if (conn->cid == cmd->logout_cid) {
2104 pr_err("Received Logout Request REMOVECONNFORRECOVERY"
2105 " with CID: %hu on CID: %hu, implementation error.\n",
2106 cmd->logout_cid, conn->cid);
2107 cmd->logout_response = ISCSI_LOGOUT_CLEANUP_FAILED;
2108 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2109 return 0;
2110 }
2111
2112 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2113
2114 return 0;
2115}
2116
2117static int iscsit_handle_logout_cmd(
2118 struct iscsi_conn *conn,
2119 unsigned char *buf)
2120{
2121 int cmdsn_ret, logout_remove = 0;
2122 u8 reason_code = 0;
2123 struct iscsi_cmd *cmd;
2124 struct iscsi_logout *hdr;
2125 struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn);
2126
2127 hdr = (struct iscsi_logout *) buf;
2128 reason_code = (hdr->flags & 0x7f);
2129 hdr->itt = be32_to_cpu(hdr->itt);
2130 hdr->cid = be16_to_cpu(hdr->cid);
2131 hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
2132 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
2133
2134 if (tiqn) {
2135 spin_lock(&tiqn->logout_stats.lock);
2136 if (reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION)
2137 tiqn->logout_stats.normal_logouts++;
2138 else
2139 tiqn->logout_stats.abnormal_logouts++;
2140 spin_unlock(&tiqn->logout_stats.lock);
2141 }
2142
2143 pr_debug("Got Logout Request ITT: 0x%08x CmdSN: 0x%08x"
2144 " ExpStatSN: 0x%08x Reason: 0x%02x CID: %hu on CID: %hu\n",
2145 hdr->itt, hdr->cmdsn, hdr->exp_statsn, reason_code,
2146 hdr->cid, conn->cid);
2147
2148 if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) {
2149 pr_err("Received logout request on connection that"
2150 " is not in logged in state, ignoring request.\n");
2151 return 0;
2152 }
2153
2154 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
2155 if (!cmd)
2156 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1,
2157 buf, conn);
2158
2159 cmd->iscsi_opcode = ISCSI_OP_LOGOUT;
2160 cmd->i_state = ISTATE_SEND_LOGOUTRSP;
2161 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
2162 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
2163 cmd->targ_xfer_tag = 0xFFFFFFFF;
2164 cmd->cmd_sn = hdr->cmdsn;
2165 cmd->exp_stat_sn = hdr->exp_statsn;
2166 cmd->logout_cid = hdr->cid;
2167 cmd->logout_reason = reason_code;
2168 cmd->data_direction = DMA_NONE;
2169
2170 /*
2171 * We need to sleep in these cases (by returning 1) until the Logout
2172 * Response gets sent in the tx thread.
2173 */
2174 if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) ||
2175 ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) &&
2176 (hdr->cid == conn->cid)))
2177 logout_remove = 1;
2178
2179 spin_lock_bh(&conn->cmd_lock);
2180 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
2181 spin_unlock_bh(&conn->cmd_lock);
2182
2183 if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY)
2184 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
2185
2186 /*
2187 * Immediate commands are executed, well, immediately.
2188 * Non-Immediate Logout Commands are executed in CmdSN order.
2189 */
2190 if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
2191 int ret = iscsit_execute_cmd(cmd, 0);
2192
2193 if (ret < 0)
2194 return ret;
2195 } else {
2196 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
2197 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
2198 logout_remove = 0;
2199 } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
2200 return iscsit_add_reject_from_cmd(
2201 ISCSI_REASON_PROTOCOL_ERROR,
2202 1, 0, buf, cmd);
2203 }
2204 }
2205
2206 return logout_remove;
2207}
2208
2209static int iscsit_handle_snack(
2210 struct iscsi_conn *conn,
2211 unsigned char *buf)
2212{
2213 u32 unpacked_lun;
2214 u64 lun;
2215 struct iscsi_snack *hdr;
2216
2217 hdr = (struct iscsi_snack *) buf;
2218 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
2219 lun = get_unaligned_le64(&hdr->lun);
2220 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
2221 hdr->itt = be32_to_cpu(hdr->itt);
2222 hdr->ttt = be32_to_cpu(hdr->ttt);
2223 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
2224 hdr->begrun = be32_to_cpu(hdr->begrun);
2225 hdr->runlength = be32_to_cpu(hdr->runlength);
2226
2227 pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:"
2228 " 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x,"
2229 " CID: %hu\n", hdr->itt, hdr->exp_statsn, hdr->flags,
2230 hdr->begrun, hdr->runlength, conn->cid);
2231
2232 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2233 pr_err("Initiator sent SNACK request while in"
2234 " ErrorRecoveryLevel=0.\n");
2235 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
2236 buf, conn);
2237 }
2238 /*
2239 * SNACK_DATA and SNACK_R2T are both 0, so check which function to
2240 * call from inside iscsi_send_recovery_datain_or_r2t().
2241 */
2242 switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) {
2243 case 0:
2244 return iscsit_handle_recovery_datain_or_r2t(conn, buf,
2245 hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength);
2246 return 0;
2247 case ISCSI_FLAG_SNACK_TYPE_STATUS:
2248 return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt,
2249 hdr->begrun, hdr->runlength);
2250 case ISCSI_FLAG_SNACK_TYPE_DATA_ACK:
2251 return iscsit_handle_data_ack(conn, hdr->ttt, hdr->begrun,
2252 hdr->runlength);
2253 case ISCSI_FLAG_SNACK_TYPE_RDATA:
2254 /* FIXME: Support R-Data SNACK */
2255 pr_err("R-Data SNACK Not Supported.\n");
2256 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
2257 buf, conn);
2258 default:
2259 pr_err("Unknown SNACK type 0x%02x, protocol"
2260 " error.\n", hdr->flags & 0x0f);
2261 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
2262 buf, conn);
2263 }
2264
2265 return 0;
2266}
2267
2268static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn)
2269{
2270 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
2271 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
2272 wait_for_completion_interruptible_timeout(
2273 &conn->rx_half_close_comp,
2274 ISCSI_RX_THREAD_TCP_TIMEOUT * HZ);
2275 }
2276}
2277
2278static int iscsit_handle_immediate_data(
2279 struct iscsi_cmd *cmd,
2280 unsigned char *buf,
2281 u32 length)
2282{
2283 int iov_ret, rx_got = 0, rx_size = 0;
2284 u32 checksum, iov_count = 0, padding = 0;
2285 struct iscsi_conn *conn = cmd->conn;
2286 struct kvec *iov;
2287
2288 iov_ret = iscsit_map_iovec(cmd, cmd->iov_data, cmd->write_data_done, length);
2289 if (iov_ret < 0)
2290 return IMMEDIATE_DATA_CANNOT_RECOVER;
2291
2292 rx_size = length;
2293 iov_count = iov_ret;
2294 iov = &cmd->iov_data[0];
2295
2296 padding = ((-length) & 3);
2297 if (padding != 0) {
2298 iov[iov_count].iov_base = cmd->pad_bytes;
2299 iov[iov_count++].iov_len = padding;
2300 rx_size += padding;
2301 }
2302
2303 if (conn->conn_ops->DataDigest) {
2304 iov[iov_count].iov_base = &checksum;
2305 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
2306 rx_size += ISCSI_CRC_LEN;
2307 }
2308
2309 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
2310
2311 iscsit_unmap_iovec(cmd);
2312
2313 if (rx_got != rx_size) {
2314 iscsit_rx_thread_wait_for_tcp(conn);
2315 return IMMEDIATE_DATA_CANNOT_RECOVER;
2316 }
2317
2318 if (conn->conn_ops->DataDigest) {
2319 u32 data_crc;
2320
2321 data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
2322 cmd->write_data_done, length, padding,
2323 cmd->pad_bytes);
2324
2325 if (checksum != data_crc) {
2326 pr_err("ImmediateData CRC32C DataDigest 0x%08x"
2327 " does not match computed 0x%08x\n", checksum,
2328 data_crc);
2329
2330 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2331 pr_err("Unable to recover from"
2332 " Immediate Data digest failure while"
2333 " in ERL=0.\n");
2334 iscsit_add_reject_from_cmd(
2335 ISCSI_REASON_DATA_DIGEST_ERROR,
2336 1, 0, buf, cmd);
2337 return IMMEDIATE_DATA_CANNOT_RECOVER;
2338 } else {
2339 iscsit_add_reject_from_cmd(
2340 ISCSI_REASON_DATA_DIGEST_ERROR,
2341 0, 0, buf, cmd);
2342 return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
2343 }
2344 } else {
2345 pr_debug("Got CRC32C DataDigest 0x%08x for"
2346 " %u bytes of Immediate Data\n", checksum,
2347 length);
2348 }
2349 }
2350
2351 cmd->write_data_done += length;
2352
2353 if (cmd->write_data_done == cmd->data_length) {
2354 spin_lock_bh(&cmd->istate_lock);
2355 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
2356 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
2357 spin_unlock_bh(&cmd->istate_lock);
2358 }
2359
2360 return IMMEDIATE_DATA_NORMAL_OPERATION;
2361}
2362
2363/*
2364 * Called with sess->conn_lock held.
2365 */
2366/* #warning iscsi_build_conn_drop_async_message() only sends out on connections
2367 with active network interface */
2368static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
2369{
2370 struct iscsi_cmd *cmd;
2371 struct iscsi_conn *conn_p;
2372
2373 /*
2374 * Only send a Asynchronous Message on connections whos network
2375 * interface is still functional.
2376 */
2377 list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
2378 if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
2379 iscsit_inc_conn_usage_count(conn_p);
2380 break;
2381 }
2382 }
2383
2384 if (!conn_p)
2385 return;
2386
2387 cmd = iscsit_allocate_cmd(conn_p, GFP_KERNEL);
2388 if (!cmd) {
2389 iscsit_dec_conn_usage_count(conn_p);
2390 return;
2391 }
2392
2393 cmd->logout_cid = conn->cid;
2394 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
2395 cmd->i_state = ISTATE_SEND_ASYNCMSG;
2396
2397 spin_lock_bh(&conn_p->cmd_lock);
2398 list_add_tail(&cmd->i_list, &conn_p->conn_cmd_list);
2399 spin_unlock_bh(&conn_p->cmd_lock);
2400
2401 iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state);
2402 iscsit_dec_conn_usage_count(conn_p);
2403}
2404
2405static int iscsit_send_conn_drop_async_message(
2406 struct iscsi_cmd *cmd,
2407 struct iscsi_conn *conn)
2408{
2409 struct iscsi_async *hdr;
2410
2411 cmd->tx_size = ISCSI_HDR_LEN;
2412 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
2413
2414 hdr = (struct iscsi_async *) cmd->pdu;
2415 hdr->opcode = ISCSI_OP_ASYNC_EVENT;
2416 hdr->flags = ISCSI_FLAG_CMD_FINAL;
2417 cmd->init_task_tag = 0xFFFFFFFF;
2418 cmd->targ_xfer_tag = 0xFFFFFFFF;
2419 put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]);
2420 cmd->stat_sn = conn->stat_sn++;
2421 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2422 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2423 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2424 hdr->async_event = ISCSI_ASYNC_MSG_DROPPING_CONNECTION;
2425 hdr->param1 = cpu_to_be16(cmd->logout_cid);
2426 hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
2427 hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain);
2428
2429 if (conn->conn_ops->HeaderDigest) {
2430 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2431
2432 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2433 (unsigned char *)hdr, ISCSI_HDR_LEN,
2434 0, NULL, (u8 *)header_digest);
2435
2436 cmd->tx_size += ISCSI_CRC_LEN;
2437 pr_debug("Attaching CRC32C HeaderDigest to"
2438 " Async Message 0x%08x\n", *header_digest);
2439 }
2440
2441 cmd->iov_misc[0].iov_base = cmd->pdu;
2442 cmd->iov_misc[0].iov_len = cmd->tx_size;
2443 cmd->iov_misc_count = 1;
2444
2445 pr_debug("Sending Connection Dropped Async Message StatSN:"
2446 " 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn,
2447 cmd->logout_cid, conn->cid);
2448 return 0;
2449}
2450
2451static int iscsit_send_data_in(
2452 struct iscsi_cmd *cmd,
2453 struct iscsi_conn *conn,
2454 int *eodr)
2455{
2456 int iov_ret = 0, set_statsn = 0;
2457 u32 iov_count = 0, tx_size = 0;
2458 struct iscsi_datain datain;
2459 struct iscsi_datain_req *dr;
2460 struct iscsi_data_rsp *hdr;
2461 struct kvec *iov;
2462
2463 memset(&datain, 0, sizeof(struct iscsi_datain));
2464 dr = iscsit_get_datain_values(cmd, &datain);
2465 if (!dr) {
2466 pr_err("iscsit_get_datain_values failed for ITT: 0x%08x\n",
2467 cmd->init_task_tag);
2468 return -1;
2469 }
2470
2471 /*
2472 * Be paranoid and double check the logic for now.
2473 */
2474 if ((datain.offset + datain.length) > cmd->data_length) {
2475 pr_err("Command ITT: 0x%08x, datain.offset: %u and"
2476 " datain.length: %u exceeds cmd->data_length: %u\n",
2477 cmd->init_task_tag, datain.offset, datain.length,
2478 cmd->data_length);
2479 return -1;
2480 }
2481
2482 spin_lock_bh(&conn->sess->session_stats_lock);
2483 conn->sess->tx_data_octets += datain.length;
2484 if (conn->sess->se_sess->se_node_acl) {
2485 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
2486 conn->sess->se_sess->se_node_acl->read_bytes += datain.length;
2487 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
2488 }
2489 spin_unlock_bh(&conn->sess->session_stats_lock);
2490 /*
2491 * Special case for successfully execution w/ both DATAIN
2492 * and Sense Data.
2493 */
2494 if ((datain.flags & ISCSI_FLAG_DATA_STATUS) &&
2495 (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
2496 datain.flags &= ~ISCSI_FLAG_DATA_STATUS;
2497 else {
2498 if ((dr->dr_complete == DATAIN_COMPLETE_NORMAL) ||
2499 (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) {
2500 iscsit_increment_maxcmdsn(cmd, conn->sess);
2501 cmd->stat_sn = conn->stat_sn++;
2502 set_statsn = 1;
2503 } else if (dr->dr_complete ==
2504 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY)
2505 set_statsn = 1;
2506 }
2507
2508 hdr = (struct iscsi_data_rsp *) cmd->pdu;
2509 memset(hdr, 0, ISCSI_HDR_LEN);
2510 hdr->opcode = ISCSI_OP_SCSI_DATA_IN;
2511 hdr->flags = datain.flags;
2512 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
2513 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
2514 hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
2515 hdr->residual_count = cpu_to_be32(cmd->residual_count);
2516 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
2517 hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
2518 hdr->residual_count = cpu_to_be32(cmd->residual_count);
2519 }
2520 }
2521 hton24(hdr->dlength, datain.length);
2522 if (hdr->flags & ISCSI_FLAG_DATA_ACK)
2523 int_to_scsilun(cmd->se_cmd.orig_fe_lun,
2524 (struct scsi_lun *)&hdr->lun);
2525 else
2526 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
2527
2528 hdr->itt = cpu_to_be32(cmd->init_task_tag);
2529 hdr->ttt = (hdr->flags & ISCSI_FLAG_DATA_ACK) ?
2530 cpu_to_be32(cmd->targ_xfer_tag) :
2531 0xFFFFFFFF;
2532 hdr->statsn = (set_statsn) ? cpu_to_be32(cmd->stat_sn) :
2533 0xFFFFFFFF;
2534 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2535 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2536 hdr->datasn = cpu_to_be32(datain.data_sn);
2537 hdr->offset = cpu_to_be32(datain.offset);
2538
2539 iov = &cmd->iov_data[0];
2540 iov[iov_count].iov_base = cmd->pdu;
2541 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
2542 tx_size += ISCSI_HDR_LEN;
2543
2544 if (conn->conn_ops->HeaderDigest) {
2545 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2546
2547 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2548 (unsigned char *)hdr, ISCSI_HDR_LEN,
2549 0, NULL, (u8 *)header_digest);
2550
2551 iov[0].iov_len += ISCSI_CRC_LEN;
2552 tx_size += ISCSI_CRC_LEN;
2553
2554 pr_debug("Attaching CRC32 HeaderDigest"
2555 " for DataIN PDU 0x%08x\n", *header_digest);
2556 }
2557
2558 iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1], datain.offset, datain.length);
2559 if (iov_ret < 0)
2560 return -1;
2561
2562 iov_count += iov_ret;
2563 tx_size += datain.length;
2564
2565 cmd->padding = ((-datain.length) & 3);
2566 if (cmd->padding) {
2567 iov[iov_count].iov_base = cmd->pad_bytes;
2568 iov[iov_count++].iov_len = cmd->padding;
2569 tx_size += cmd->padding;
2570
2571 pr_debug("Attaching %u padding bytes\n",
2572 cmd->padding);
2573 }
2574 if (conn->conn_ops->DataDigest) {
2575 cmd->data_crc = iscsit_do_crypto_hash_sg(&conn->conn_tx_hash, cmd,
2576 datain.offset, datain.length, cmd->padding, cmd->pad_bytes);
2577
2578 iov[iov_count].iov_base = &cmd->data_crc;
2579 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
2580 tx_size += ISCSI_CRC_LEN;
2581
2582 pr_debug("Attached CRC32C DataDigest %d bytes, crc"
2583 " 0x%08x\n", datain.length+cmd->padding, cmd->data_crc);
2584 }
2585
2586 cmd->iov_data_count = iov_count;
2587 cmd->tx_size = tx_size;
2588
2589 pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x,"
2590 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
2591 cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
2592 ntohl(hdr->offset), datain.length, conn->cid);
2593
2594 if (dr->dr_complete) {
2595 *eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
2596 2 : 1;
2597 iscsit_free_datain_req(cmd, dr);
2598 }
2599
2600 return 0;
2601}
2602
2603static int iscsit_send_logout_response(
2604 struct iscsi_cmd *cmd,
2605 struct iscsi_conn *conn)
2606{
2607 int niov = 0, tx_size;
2608 struct iscsi_conn *logout_conn = NULL;
2609 struct iscsi_conn_recovery *cr = NULL;
2610 struct iscsi_session *sess = conn->sess;
2611 struct kvec *iov;
2612 struct iscsi_logout_rsp *hdr;
2613 /*
2614 * The actual shutting down of Sessions and/or Connections
2615 * for CLOSESESSION and CLOSECONNECTION Logout Requests
2616 * is done in scsi_logout_post_handler().
2617 */
2618 switch (cmd->logout_reason) {
2619 case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
2620 pr_debug("iSCSI session logout successful, setting"
2621 " logout response to ISCSI_LOGOUT_SUCCESS.\n");
2622 cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2623 break;
2624 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
2625 if (cmd->logout_response == ISCSI_LOGOUT_CID_NOT_FOUND)
2626 break;
2627 /*
2628 * For CLOSECONNECTION logout requests carrying
2629 * a matching logout CID -> local CID, the reference
2630 * for the local CID will have been incremented in
2631 * iscsi_logout_closeconnection().
2632 *
2633 * For CLOSECONNECTION logout requests carrying
2634 * a different CID than the connection it arrived
2635 * on, the connection responding to cmd->logout_cid
2636 * is stopped in iscsit_logout_post_handler_diffcid().
2637 */
2638
2639 pr_debug("iSCSI CID: %hu logout on CID: %hu"
2640 " successful.\n", cmd->logout_cid, conn->cid);
2641 cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2642 break;
2643 case ISCSI_LOGOUT_REASON_RECOVERY:
2644 if ((cmd->logout_response == ISCSI_LOGOUT_RECOVERY_UNSUPPORTED) ||
2645 (cmd->logout_response == ISCSI_LOGOUT_CLEANUP_FAILED))
2646 break;
2647 /*
2648 * If the connection is still active from our point of view
2649 * force connection recovery to occur.
2650 */
2651 logout_conn = iscsit_get_conn_from_cid_rcfr(sess,
2652 cmd->logout_cid);
2653 if ((logout_conn)) {
2654 iscsit_connection_reinstatement_rcfr(logout_conn);
2655 iscsit_dec_conn_usage_count(logout_conn);
2656 }
2657
2658 cr = iscsit_get_inactive_connection_recovery_entry(
2659 conn->sess, cmd->logout_cid);
2660 if (!cr) {
2661 pr_err("Unable to locate CID: %hu for"
2662 " REMOVECONNFORRECOVERY Logout Request.\n",
2663 cmd->logout_cid);
2664 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
2665 break;
2666 }
2667
2668 iscsit_discard_cr_cmds_by_expstatsn(cr, cmd->exp_stat_sn);
2669
2670 pr_debug("iSCSI REMOVECONNFORRECOVERY logout"
2671 " for recovery for CID: %hu on CID: %hu successful.\n",
2672 cmd->logout_cid, conn->cid);
2673 cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2674 break;
2675 default:
2676 pr_err("Unknown cmd->logout_reason: 0x%02x\n",
2677 cmd->logout_reason);
2678 return -1;
2679 }
2680
2681 tx_size = ISCSI_HDR_LEN;
2682 hdr = (struct iscsi_logout_rsp *)cmd->pdu;
2683 memset(hdr, 0, ISCSI_HDR_LEN);
2684 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
2685 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2686 hdr->response = cmd->logout_response;
2687 hdr->itt = cpu_to_be32(cmd->init_task_tag);
2688 cmd->stat_sn = conn->stat_sn++;
2689 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2690
2691 iscsit_increment_maxcmdsn(cmd, conn->sess);
2692 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2693 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2694
2695 iov = &cmd->iov_misc[0];
2696 iov[niov].iov_base = cmd->pdu;
2697 iov[niov++].iov_len = ISCSI_HDR_LEN;
2698
2699 if (conn->conn_ops->HeaderDigest) {
2700 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2701
2702 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2703 (unsigned char *)hdr, ISCSI_HDR_LEN,
2704 0, NULL, (u8 *)header_digest);
2705
2706 iov[0].iov_len += ISCSI_CRC_LEN;
2707 tx_size += ISCSI_CRC_LEN;
2708 pr_debug("Attaching CRC32C HeaderDigest to"
2709 " Logout Response 0x%08x\n", *header_digest);
2710 }
2711 cmd->iov_misc_count = niov;
2712 cmd->tx_size = tx_size;
2713
2714 pr_debug("Sending Logout Response ITT: 0x%08x StatSN:"
2715 " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
2716 cmd->init_task_tag, cmd->stat_sn, hdr->response,
2717 cmd->logout_cid, conn->cid);
2718
2719 return 0;
2720}
2721
2722/*
2723 * Unsolicited NOPIN, either requesting a response or not.
2724 */
2725static int iscsit_send_unsolicited_nopin(
2726 struct iscsi_cmd *cmd,
2727 struct iscsi_conn *conn,
2728 int want_response)
2729{
2730 int tx_size = ISCSI_HDR_LEN;
2731 struct iscsi_nopin *hdr;
2732
2733 hdr = (struct iscsi_nopin *) cmd->pdu;
2734 memset(hdr, 0, ISCSI_HDR_LEN);
2735 hdr->opcode = ISCSI_OP_NOOP_IN;
2736 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2737 hdr->itt = cpu_to_be32(cmd->init_task_tag);
2738 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
2739 cmd->stat_sn = conn->stat_sn;
2740 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2741 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2742 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2743
2744 if (conn->conn_ops->HeaderDigest) {
2745 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2746
2747 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2748 (unsigned char *)hdr, ISCSI_HDR_LEN,
2749 0, NULL, (u8 *)header_digest);
2750
2751 tx_size += ISCSI_CRC_LEN;
2752 pr_debug("Attaching CRC32C HeaderDigest to"
2753 " NopIN 0x%08x\n", *header_digest);
2754 }
2755
2756 cmd->iov_misc[0].iov_base = cmd->pdu;
2757 cmd->iov_misc[0].iov_len = tx_size;
2758 cmd->iov_misc_count = 1;
2759 cmd->tx_size = tx_size;
2760
2761 pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
2762 " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
2763
2764 return 0;
2765}
2766
2767static int iscsit_send_nopin_response(
2768 struct iscsi_cmd *cmd,
2769 struct iscsi_conn *conn)
2770{
2771 int niov = 0, tx_size;
2772 u32 padding = 0;
2773 struct kvec *iov;
2774 struct iscsi_nopin *hdr;
2775
2776 tx_size = ISCSI_HDR_LEN;
2777 hdr = (struct iscsi_nopin *) cmd->pdu;
2778 memset(hdr, 0, ISCSI_HDR_LEN);
2779 hdr->opcode = ISCSI_OP_NOOP_IN;
2780 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2781 hton24(hdr->dlength, cmd->buf_ptr_size);
2782 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
2783 hdr->itt = cpu_to_be32(cmd->init_task_tag);
2784 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
2785 cmd->stat_sn = conn->stat_sn++;
2786 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2787
2788 iscsit_increment_maxcmdsn(cmd, conn->sess);
2789 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2790 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2791
2792 iov = &cmd->iov_misc[0];
2793 iov[niov].iov_base = cmd->pdu;
2794 iov[niov++].iov_len = ISCSI_HDR_LEN;
2795
2796 if (conn->conn_ops->HeaderDigest) {
2797 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2798
2799 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2800 (unsigned char *)hdr, ISCSI_HDR_LEN,
2801 0, NULL, (u8 *)header_digest);
2802
2803 iov[0].iov_len += ISCSI_CRC_LEN;
2804 tx_size += ISCSI_CRC_LEN;
2805 pr_debug("Attaching CRC32C HeaderDigest"
2806 " to NopIn 0x%08x\n", *header_digest);
2807 }
2808
2809 /*
2810 * NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr.
2811 * NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size.
2812 */
2813 if (cmd->buf_ptr_size) {
2814 iov[niov].iov_base = cmd->buf_ptr;
2815 iov[niov++].iov_len = cmd->buf_ptr_size;
2816 tx_size += cmd->buf_ptr_size;
2817
2818 pr_debug("Echoing back %u bytes of ping"
2819 " data.\n", cmd->buf_ptr_size);
2820
2821 padding = ((-cmd->buf_ptr_size) & 3);
2822 if (padding != 0) {
2823 iov[niov].iov_base = &cmd->pad_bytes;
2824 iov[niov++].iov_len = padding;
2825 tx_size += padding;
2826 pr_debug("Attaching %u additional"
2827 " padding bytes.\n", padding);
2828 }
2829 if (conn->conn_ops->DataDigest) {
2830 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2831 cmd->buf_ptr, cmd->buf_ptr_size,
2832 padding, (u8 *)&cmd->pad_bytes,
2833 (u8 *)&cmd->data_crc);
2834
2835 iov[niov].iov_base = &cmd->data_crc;
2836 iov[niov++].iov_len = ISCSI_CRC_LEN;
2837 tx_size += ISCSI_CRC_LEN;
2838 pr_debug("Attached DataDigest for %u"
2839 " bytes of ping data, CRC 0x%08x\n",
2840 cmd->buf_ptr_size, cmd->data_crc);
2841 }
2842 }
2843
2844 cmd->iov_misc_count = niov;
2845 cmd->tx_size = tx_size;
2846
2847 pr_debug("Sending NOPIN Response ITT: 0x%08x, TTT:"
2848 " 0x%08x, StatSN: 0x%08x, Length %u\n", cmd->init_task_tag,
2849 cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
2850
2851 return 0;
2852}
2853
2854int iscsit_send_r2t(
2855 struct iscsi_cmd *cmd,
2856 struct iscsi_conn *conn)
2857{
2858 int tx_size = 0;
2859 struct iscsi_r2t *r2t;
2860 struct iscsi_r2t_rsp *hdr;
2861
2862 r2t = iscsit_get_r2t_from_list(cmd);
2863 if (!r2t)
2864 return -1;
2865
2866 hdr = (struct iscsi_r2t_rsp *) cmd->pdu;
2867 memset(hdr, 0, ISCSI_HDR_LEN);
2868 hdr->opcode = ISCSI_OP_R2T;
2869 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2870 int_to_scsilun(cmd->se_cmd.orig_fe_lun,
2871 (struct scsi_lun *)&hdr->lun);
2872 hdr->itt = cpu_to_be32(cmd->init_task_tag);
2873 spin_lock_bh(&conn->sess->ttt_lock);
2874 r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
2875 if (r2t->targ_xfer_tag == 0xFFFFFFFF)
2876 r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
2877 spin_unlock_bh(&conn->sess->ttt_lock);
2878 hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag);
2879 hdr->statsn = cpu_to_be32(conn->stat_sn);
2880 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2881 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2882 hdr->r2tsn = cpu_to_be32(r2t->r2t_sn);
2883 hdr->data_offset = cpu_to_be32(r2t->offset);
2884 hdr->data_length = cpu_to_be32(r2t->xfer_len);
2885
2886 cmd->iov_misc[0].iov_base = cmd->pdu;
2887 cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
2888 tx_size += ISCSI_HDR_LEN;
2889
2890 if (conn->conn_ops->HeaderDigest) {
2891 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2892
2893 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2894 (unsigned char *)hdr, ISCSI_HDR_LEN,
2895 0, NULL, (u8 *)header_digest);
2896
2897 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
2898 tx_size += ISCSI_CRC_LEN;
2899 pr_debug("Attaching CRC32 HeaderDigest for R2T"
2900 " PDU 0x%08x\n", *header_digest);
2901 }
2902
2903 pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:"
2904 " 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n",
2905 (!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag,
2906 r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn,
2907 r2t->offset, r2t->xfer_len, conn->cid);
2908
2909 cmd->iov_misc_count = 1;
2910 cmd->tx_size = tx_size;
2911
2912 spin_lock_bh(&cmd->r2t_lock);
2913 r2t->sent_r2t = 1;
2914 spin_unlock_bh(&cmd->r2t_lock);
2915
2916 return 0;
2917}
2918
2919/*
2920 * type 0: Normal Operation.
2921 * type 1: Called from Storage Transport.
2922 * type 2: Called from iscsi_task_reassign_complete_write() for
2923 * connection recovery.
2924 */
2925int iscsit_build_r2ts_for_cmd(
2926 struct iscsi_cmd *cmd,
2927 struct iscsi_conn *conn,
2928 int type)
2929{
2930 int first_r2t = 1;
2931 u32 offset = 0, xfer_len = 0;
2932
2933 spin_lock_bh(&cmd->r2t_lock);
2934 if (cmd->cmd_flags & ICF_SENT_LAST_R2T) {
2935 spin_unlock_bh(&cmd->r2t_lock);
2936 return 0;
2937 }
2938
2939 if (conn->sess->sess_ops->DataSequenceInOrder && (type != 2))
2940 if (cmd->r2t_offset < cmd->write_data_done)
2941 cmd->r2t_offset = cmd->write_data_done;
2942
2943 while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) {
2944 if (conn->sess->sess_ops->DataSequenceInOrder) {
2945 offset = cmd->r2t_offset;
2946
2947 if (first_r2t && (type == 2)) {
2948 xfer_len = ((offset +
2949 (conn->sess->sess_ops->MaxBurstLength -
2950 cmd->next_burst_len) >
2951 cmd->data_length) ?
2952 (cmd->data_length - offset) :
2953 (conn->sess->sess_ops->MaxBurstLength -
2954 cmd->next_burst_len));
2955 } else {
2956 xfer_len = ((offset +
2957 conn->sess->sess_ops->MaxBurstLength) >
2958 cmd->data_length) ?
2959 (cmd->data_length - offset) :
2960 conn->sess->sess_ops->MaxBurstLength;
2961 }
2962 cmd->r2t_offset += xfer_len;
2963
2964 if (cmd->r2t_offset == cmd->data_length)
2965 cmd->cmd_flags |= ICF_SENT_LAST_R2T;
2966 } else {
2967 struct iscsi_seq *seq;
2968
2969 seq = iscsit_get_seq_holder_for_r2t(cmd);
2970 if (!seq) {
2971 spin_unlock_bh(&cmd->r2t_lock);
2972 return -1;
2973 }
2974
2975 offset = seq->offset;
2976 xfer_len = seq->xfer_len;
2977
2978 if (cmd->seq_send_order == cmd->seq_count)
2979 cmd->cmd_flags |= ICF_SENT_LAST_R2T;
2980 }
2981 cmd->outstanding_r2ts++;
2982 first_r2t = 0;
2983
2984 if (iscsit_add_r2t_to_list(cmd, offset, xfer_len, 0, 0) < 0) {
2985 spin_unlock_bh(&cmd->r2t_lock);
2986 return -1;
2987 }
2988
2989 if (cmd->cmd_flags & ICF_SENT_LAST_R2T)
2990 break;
2991 }
2992 spin_unlock_bh(&cmd->r2t_lock);
2993
2994 return 0;
2995}
2996
2997static int iscsit_send_status(
2998 struct iscsi_cmd *cmd,
2999 struct iscsi_conn *conn)
3000{
3001 u8 iov_count = 0, recovery;
3002 u32 padding = 0, tx_size = 0;
3003 struct iscsi_scsi_rsp *hdr;
3004 struct kvec *iov;
3005
3006 recovery = (cmd->i_state != ISTATE_SEND_STATUS);
3007 if (!recovery)
3008 cmd->stat_sn = conn->stat_sn++;
3009
3010 spin_lock_bh(&conn->sess->session_stats_lock);
3011 conn->sess->rsp_pdus++;
3012 spin_unlock_bh(&conn->sess->session_stats_lock);
3013
3014 hdr = (struct iscsi_scsi_rsp *) cmd->pdu;
3015 memset(hdr, 0, ISCSI_HDR_LEN);
3016 hdr->opcode = ISCSI_OP_SCSI_CMD_RSP;
3017 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3018 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
3019 hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
3020 hdr->residual_count = cpu_to_be32(cmd->residual_count);
3021 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
3022 hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
3023 hdr->residual_count = cpu_to_be32(cmd->residual_count);
3024 }
3025 hdr->response = cmd->iscsi_response;
3026 hdr->cmd_status = cmd->se_cmd.scsi_status;
3027 hdr->itt = cpu_to_be32(cmd->init_task_tag);
3028 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3029
3030 iscsit_increment_maxcmdsn(cmd, conn->sess);
3031 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3032 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3033
3034 iov = &cmd->iov_misc[0];
3035 iov[iov_count].iov_base = cmd->pdu;
3036 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3037 tx_size += ISCSI_HDR_LEN;
3038
3039 /*
3040 * Attach SENSE DATA payload to iSCSI Response PDU
3041 */
3042 if (cmd->se_cmd.sense_buffer &&
3043 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
3044 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
3045 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
3046 hton24(hdr->dlength, cmd->se_cmd.scsi_sense_length);
3047 iov[iov_count].iov_base = cmd->se_cmd.sense_buffer;
3048 iov[iov_count++].iov_len =
3049 (cmd->se_cmd.scsi_sense_length + padding);
3050 tx_size += cmd->se_cmd.scsi_sense_length;
3051
3052 if (padding) {
3053 memset(cmd->se_cmd.sense_buffer +
3054 cmd->se_cmd.scsi_sense_length, 0, padding);
3055 tx_size += padding;
3056 pr_debug("Adding %u bytes of padding to"
3057 " SENSE.\n", padding);
3058 }
3059
3060 if (conn->conn_ops->DataDigest) {
3061 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3062 cmd->se_cmd.sense_buffer,
3063 (cmd->se_cmd.scsi_sense_length + padding),
3064 0, NULL, (u8 *)&cmd->data_crc);
3065
3066 iov[iov_count].iov_base = &cmd->data_crc;
3067 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
3068 tx_size += ISCSI_CRC_LEN;
3069
3070 pr_debug("Attaching CRC32 DataDigest for"
3071 " SENSE, %u bytes CRC 0x%08x\n",
3072 (cmd->se_cmd.scsi_sense_length + padding),
3073 cmd->data_crc);
3074 }
3075
3076 pr_debug("Attaching SENSE DATA: %u bytes to iSCSI"
3077 " Response PDU\n",
3078 cmd->se_cmd.scsi_sense_length);
3079 }
3080
3081 if (conn->conn_ops->HeaderDigest) {
3082 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3083
3084 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3085 (unsigned char *)hdr, ISCSI_HDR_LEN,
3086 0, NULL, (u8 *)header_digest);
3087
3088 iov[0].iov_len += ISCSI_CRC_LEN;
3089 tx_size += ISCSI_CRC_LEN;
3090 pr_debug("Attaching CRC32 HeaderDigest for Response"
3091 " PDU 0x%08x\n", *header_digest);
3092 }
3093
3094 cmd->iov_misc_count = iov_count;
3095 cmd->tx_size = tx_size;
3096
3097 pr_debug("Built %sSCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
3098 " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
3099 (!recovery) ? "" : "Recovery ", cmd->init_task_tag,
3100 cmd->stat_sn, 0x00, cmd->se_cmd.scsi_status, conn->cid);
3101
3102 return 0;
3103}
3104
3105static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
3106{
3107 switch (se_tmr->response) {
3108 case TMR_FUNCTION_COMPLETE:
3109 return ISCSI_TMF_RSP_COMPLETE;
3110 case TMR_TASK_DOES_NOT_EXIST:
3111 return ISCSI_TMF_RSP_NO_TASK;
3112 case TMR_LUN_DOES_NOT_EXIST:
3113 return ISCSI_TMF_RSP_NO_LUN;
3114 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
3115 return ISCSI_TMF_RSP_NOT_SUPPORTED;
3116 case TMR_FUNCTION_AUTHORIZATION_FAILED:
3117 return ISCSI_TMF_RSP_AUTH_FAILED;
3118 case TMR_FUNCTION_REJECTED:
3119 default:
3120 return ISCSI_TMF_RSP_REJECTED;
3121 }
3122}
3123
3124static int iscsit_send_task_mgt_rsp(
3125 struct iscsi_cmd *cmd,
3126 struct iscsi_conn *conn)
3127{
3128 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
3129 struct iscsi_tm_rsp *hdr;
3130 u32 tx_size = 0;
3131
3132 hdr = (struct iscsi_tm_rsp *) cmd->pdu;
3133 memset(hdr, 0, ISCSI_HDR_LEN);
3134 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
3135 hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
3136 hdr->itt = cpu_to_be32(cmd->init_task_tag);
3137 cmd->stat_sn = conn->stat_sn++;
3138 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3139
3140 iscsit_increment_maxcmdsn(cmd, conn->sess);
3141 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3142 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3143
3144 cmd->iov_misc[0].iov_base = cmd->pdu;
3145 cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
3146 tx_size += ISCSI_HDR_LEN;
3147
3148 if (conn->conn_ops->HeaderDigest) {
3149 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3150
3151 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3152 (unsigned char *)hdr, ISCSI_HDR_LEN,
3153 0, NULL, (u8 *)header_digest);
3154
3155 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
3156 tx_size += ISCSI_CRC_LEN;
3157 pr_debug("Attaching CRC32 HeaderDigest for Task"
3158 " Mgmt Response PDU 0x%08x\n", *header_digest);
3159 }
3160
3161 cmd->iov_misc_count = 1;
3162 cmd->tx_size = tx_size;
3163
3164 pr_debug("Built Task Management Response ITT: 0x%08x,"
3165 " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
3166 cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid);
3167
3168 return 0;
3169}
3170
3171static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
3172{
3173 char *payload = NULL;
3174 struct iscsi_conn *conn = cmd->conn;
3175 struct iscsi_portal_group *tpg;
3176 struct iscsi_tiqn *tiqn;
3177 struct iscsi_tpg_np *tpg_np;
3178 int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
3179 unsigned char buf[256];
3180
3181 buffer_len = (conn->conn_ops->MaxRecvDataSegmentLength > 32768) ?
3182 32768 : conn->conn_ops->MaxRecvDataSegmentLength;
3183
3184 memset(buf, 0, 256);
3185
3186 payload = kzalloc(buffer_len, GFP_KERNEL);
3187 if (!payload) {
3188 pr_err("Unable to allocate memory for sendtargets"
3189 " response.\n");
3190 return -ENOMEM;
3191 }
3192
3193 spin_lock(&tiqn_lock);
3194 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
3195 len = sprintf(buf, "TargetName=%s", tiqn->tiqn);
3196 len += 1;
3197
3198 if ((len + payload_len) > buffer_len) {
3199 spin_unlock(&tiqn->tiqn_tpg_lock);
3200 end_of_buf = 1;
3201 goto eob;
3202 }
3203 memcpy((void *)payload + payload_len, buf, len);
3204 payload_len += len;
3205
3206 spin_lock(&tiqn->tiqn_tpg_lock);
3207 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
3208
3209 spin_lock(&tpg->tpg_state_lock);
3210 if ((tpg->tpg_state == TPG_STATE_FREE) ||
3211 (tpg->tpg_state == TPG_STATE_INACTIVE)) {
3212 spin_unlock(&tpg->tpg_state_lock);
3213 continue;
3214 }
3215 spin_unlock(&tpg->tpg_state_lock);
3216
3217 spin_lock(&tpg->tpg_np_lock);
3218 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
3219 tpg_np_list) {
3220 len = sprintf(buf, "TargetAddress="
3221 "%s%s%s:%hu,%hu",
3222 (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ?
3223 "[" : "", tpg_np->tpg_np->np_ip,
3224 (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ?
3225 "]" : "", tpg_np->tpg_np->np_port,
3226 tpg->tpgt);
3227 len += 1;
3228
3229 if ((len + payload_len) > buffer_len) {
3230 spin_unlock(&tpg->tpg_np_lock);
3231 spin_unlock(&tiqn->tiqn_tpg_lock);
3232 end_of_buf = 1;
3233 goto eob;
3234 }
3235 memcpy((void *)payload + payload_len, buf, len);
3236 payload_len += len;
3237 }
3238 spin_unlock(&tpg->tpg_np_lock);
3239 }
3240 spin_unlock(&tiqn->tiqn_tpg_lock);
3241eob:
3242 if (end_of_buf)
3243 break;
3244 }
3245 spin_unlock(&tiqn_lock);
3246
3247 cmd->buf_ptr = payload;
3248
3249 return payload_len;
3250}
3251
3252/*
3253 * FIXME: Add support for F_BIT and C_BIT when the length is longer than
3254 * MaxRecvDataSegmentLength.
3255 */
3256static int iscsit_send_text_rsp(
3257 struct iscsi_cmd *cmd,
3258 struct iscsi_conn *conn)
3259{
3260 struct iscsi_text_rsp *hdr;
3261 struct kvec *iov;
3262 u32 padding = 0, tx_size = 0;
3263 int text_length, iov_count = 0;
3264
3265 text_length = iscsit_build_sendtargets_response(cmd);
3266 if (text_length < 0)
3267 return text_length;
3268
3269 padding = ((-text_length) & 3);
3270 if (padding != 0) {
3271 memset(cmd->buf_ptr + text_length, 0, padding);
3272 pr_debug("Attaching %u additional bytes for"
3273 " padding.\n", padding);
3274 }
3275
3276 hdr = (struct iscsi_text_rsp *) cmd->pdu;
3277 memset(hdr, 0, ISCSI_HDR_LEN);
3278 hdr->opcode = ISCSI_OP_TEXT_RSP;
3279 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3280 hton24(hdr->dlength, text_length);
3281 hdr->itt = cpu_to_be32(cmd->init_task_tag);
3282 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
3283 cmd->stat_sn = conn->stat_sn++;
3284 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3285
3286 iscsit_increment_maxcmdsn(cmd, conn->sess);
3287 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3288 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3289
3290 iov = &cmd->iov_misc[0];
3291
3292 iov[iov_count].iov_base = cmd->pdu;
3293 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3294 iov[iov_count].iov_base = cmd->buf_ptr;
3295 iov[iov_count++].iov_len = text_length + padding;
3296
3297 tx_size += (ISCSI_HDR_LEN + text_length + padding);
3298
3299 if (conn->conn_ops->HeaderDigest) {
3300 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3301
3302 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3303 (unsigned char *)hdr, ISCSI_HDR_LEN,
3304 0, NULL, (u8 *)header_digest);
3305
3306 iov[0].iov_len += ISCSI_CRC_LEN;
3307 tx_size += ISCSI_CRC_LEN;
3308 pr_debug("Attaching CRC32 HeaderDigest for"
3309 " Text Response PDU 0x%08x\n", *header_digest);
3310 }
3311
3312 if (conn->conn_ops->DataDigest) {
3313 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3314 cmd->buf_ptr, (text_length + padding),
3315 0, NULL, (u8 *)&cmd->data_crc);
3316
3317 iov[iov_count].iov_base = &cmd->data_crc;
3318 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
3319 tx_size += ISCSI_CRC_LEN;
3320
3321 pr_debug("Attaching DataDigest for %u bytes of text"
3322 " data, CRC 0x%08x\n", (text_length + padding),
3323 cmd->data_crc);
3324 }
3325
3326 cmd->iov_misc_count = iov_count;
3327 cmd->tx_size = tx_size;
3328
3329 pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x,"
3330 " Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn,
3331 text_length, conn->cid);
3332 return 0;
3333}
3334
3335static int iscsit_send_reject(
3336 struct iscsi_cmd *cmd,
3337 struct iscsi_conn *conn)
3338{
3339 u32 iov_count = 0, tx_size = 0;
3340 struct iscsi_reject *hdr;
3341 struct kvec *iov;
3342
3343 hdr = (struct iscsi_reject *) cmd->pdu;
3344 hdr->opcode = ISCSI_OP_REJECT;
3345 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3346 hton24(hdr->dlength, ISCSI_HDR_LEN);
3347 cmd->stat_sn = conn->stat_sn++;
3348 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3349 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3350 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3351
3352 iov = &cmd->iov_misc[0];
3353
3354 iov[iov_count].iov_base = cmd->pdu;
3355 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3356 iov[iov_count].iov_base = cmd->buf_ptr;
3357 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3358
3359 tx_size = (ISCSI_HDR_LEN + ISCSI_HDR_LEN);
3360
3361 if (conn->conn_ops->HeaderDigest) {
3362 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3363
3364 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3365 (unsigned char *)hdr, ISCSI_HDR_LEN,
3366 0, NULL, (u8 *)header_digest);
3367
3368 iov[0].iov_len += ISCSI_CRC_LEN;
3369 tx_size += ISCSI_CRC_LEN;
3370 pr_debug("Attaching CRC32 HeaderDigest for"
3371 " REJECT PDU 0x%08x\n", *header_digest);
3372 }
3373
3374 if (conn->conn_ops->DataDigest) {
3375 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3376 (unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN,
3377 0, NULL, (u8 *)&cmd->data_crc);
3378
3379 iov[iov_count].iov_base = &cmd->data_crc;
3380 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
3381 tx_size += ISCSI_CRC_LEN;
3382 pr_debug("Attaching CRC32 DataDigest for REJECT"
3383 " PDU 0x%08x\n", cmd->data_crc);
3384 }
3385
3386 cmd->iov_misc_count = iov_count;
3387 cmd->tx_size = tx_size;
3388
3389 pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x,"
3390 " CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid);
3391
3392 return 0;
3393}
3394
3395static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
3396{
3397 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
3398 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
3399 wait_for_completion_interruptible_timeout(
3400 &conn->tx_half_close_comp,
3401 ISCSI_TX_THREAD_TCP_TIMEOUT * HZ);
3402 }
3403}
3404
3405#ifdef CONFIG_SMP
3406
3407void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
3408{
3409 struct iscsi_thread_set *ts = conn->thread_set;
3410 int ord, cpu;
3411 /*
3412 * thread_id is assigned from iscsit_global->ts_bitmap from
3413 * within iscsi_thread_set.c:iscsi_allocate_thread_sets()
3414 *
3415 * Here we use thread_id to determine which CPU that this
3416 * iSCSI connection's iscsi_thread_set will be scheduled to
3417 * execute upon.
3418 */
3419 ord = ts->thread_id % cpumask_weight(cpu_online_mask);
3420#if 0
3421 pr_debug(">>>>>>>>>>>>>>>>>>>> Generated ord: %d from"
3422 " thread_id: %d\n", ord, ts->thread_id);
3423#endif
3424 for_each_online_cpu(cpu) {
3425 if (ord-- == 0) {
3426 cpumask_set_cpu(cpu, conn->conn_cpumask);
3427 return;
3428 }
3429 }
3430 /*
3431 * This should never be reached..
3432 */
3433 dump_stack();
3434 cpumask_setall(conn->conn_cpumask);
3435}
3436
3437static inline void iscsit_thread_check_cpumask(
3438 struct iscsi_conn *conn,
3439 struct task_struct *p,
3440 int mode)
3441{
3442 char buf[128];
3443 /*
3444 * mode == 1 signals iscsi_target_tx_thread() usage.
3445 * mode == 0 signals iscsi_target_rx_thread() usage.
3446 */
3447 if (mode == 1) {
3448 if (!conn->conn_tx_reset_cpumask)
3449 return;
3450 conn->conn_tx_reset_cpumask = 0;
3451 } else {
3452 if (!conn->conn_rx_reset_cpumask)
3453 return;
3454 conn->conn_rx_reset_cpumask = 0;
3455 }
3456 /*
3457 * Update the CPU mask for this single kthread so that
3458 * both TX and RX kthreads are scheduled to run on the
3459 * same CPU.
3460 */
3461 memset(buf, 0, 128);
3462 cpumask_scnprintf(buf, 128, conn->conn_cpumask);
3463#if 0
3464 pr_debug(">>>>>>>>>>>>>> Calling set_cpus_allowed_ptr():"
3465 " %s for %s\n", buf, p->comm);
3466#endif
3467 set_cpus_allowed_ptr(p, conn->conn_cpumask);
3468}
3469
3470#else
3471#define iscsit_thread_get_cpumask(X) ({})
3472#define iscsit_thread_check_cpumask(X, Y, Z) ({})
3473#endif /* CONFIG_SMP */
3474
3475int iscsi_target_tx_thread(void *arg)
3476{
3477 u8 state;
3478 int eodr = 0;
3479 int ret = 0;
3480 int sent_status = 0;
3481 int use_misc = 0;
3482 int map_sg = 0;
3483 struct iscsi_cmd *cmd = NULL;
3484 struct iscsi_conn *conn;
3485 struct iscsi_queue_req *qr = NULL;
3486 struct se_cmd *se_cmd;
3487 struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg;
3488 /*
3489 * Allow ourselves to be interrupted by SIGINT so that a
3490 * connection recovery / failure event can be triggered externally.
3491 */
3492 allow_signal(SIGINT);
3493
3494restart:
3495 conn = iscsi_tx_thread_pre_handler(ts);
3496 if (!conn)
3497 goto out;
3498
3499 eodr = map_sg = ret = sent_status = use_misc = 0;
3500
3501 while (!kthread_should_stop()) {
3502 /*
3503 * Ensure that both TX and RX per connection kthreads
3504 * are scheduled to run on the same CPU.
3505 */
3506 iscsit_thread_check_cpumask(conn, current, 1);
3507
3508 schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
3509
3510 if ((ts->status == ISCSI_THREAD_SET_RESET) ||
3511 signal_pending(current))
3512 goto transport_err;
3513
3514get_immediate:
3515 qr = iscsit_get_cmd_from_immediate_queue(conn);
3516 if (qr) {
3517 atomic_set(&conn->check_immediate_queue, 0);
3518 cmd = qr->cmd;
3519 state = qr->state;
3520 kmem_cache_free(lio_qr_cache, qr);
3521
3522 spin_lock_bh(&cmd->istate_lock);
3523 switch (state) {
3524 case ISTATE_SEND_R2T:
3525 spin_unlock_bh(&cmd->istate_lock);
3526 ret = iscsit_send_r2t(cmd, conn);
3527 break;
3528 case ISTATE_REMOVE:
3529 spin_unlock_bh(&cmd->istate_lock);
3530
3531 if (cmd->data_direction == DMA_TO_DEVICE)
3532 iscsit_stop_dataout_timer(cmd);
3533
3534 spin_lock_bh(&conn->cmd_lock);
3535 list_del(&cmd->i_list);
3536 spin_unlock_bh(&conn->cmd_lock);
3537 /*
3538 * Determine if a struct se_cmd is assoicated with
3539 * this struct iscsi_cmd.
3540 */
3541 if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) &&
3542 !(cmd->tmr_req))
3543 iscsit_release_cmd(cmd);
3544 else
3545 transport_generic_free_cmd(&cmd->se_cmd,
3546 1, 0);
3547 goto get_immediate;
3548 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3549 spin_unlock_bh(&cmd->istate_lock);
3550 iscsit_mod_nopin_response_timer(conn);
3551 ret = iscsit_send_unsolicited_nopin(cmd,
3552 conn, 1);
3553 break;
3554 case ISTATE_SEND_NOPIN_NO_RESPONSE:
3555 spin_unlock_bh(&cmd->istate_lock);
3556 ret = iscsit_send_unsolicited_nopin(cmd,
3557 conn, 0);
3558 break;
3559 default:
3560 pr_err("Unknown Opcode: 0x%02x ITT:"
3561 " 0x%08x, i_state: %d on CID: %hu\n",
3562 cmd->iscsi_opcode, cmd->init_task_tag, state,
3563 conn->cid);
3564 spin_unlock_bh(&cmd->istate_lock);
3565 goto transport_err;
3566 }
3567 if (ret < 0) {
3568 conn->tx_immediate_queue = 0;
3569 goto transport_err;
3570 }
3571
3572 if (iscsit_send_tx_data(cmd, conn, 1) < 0) {
3573 conn->tx_immediate_queue = 0;
3574 iscsit_tx_thread_wait_for_tcp(conn);
3575 goto transport_err;
3576 }
3577
3578 spin_lock_bh(&cmd->istate_lock);
3579 switch (state) {
3580 case ISTATE_SEND_R2T:
3581 spin_unlock_bh(&cmd->istate_lock);
3582 spin_lock_bh(&cmd->dataout_timeout_lock);
3583 iscsit_start_dataout_timer(cmd, conn);
3584 spin_unlock_bh(&cmd->dataout_timeout_lock);
3585 break;
3586 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3587 cmd->i_state = ISTATE_SENT_NOPIN_WANT_RESPONSE;
3588 spin_unlock_bh(&cmd->istate_lock);
3589 break;
3590 case ISTATE_SEND_NOPIN_NO_RESPONSE:
3591 cmd->i_state = ISTATE_SENT_STATUS;
3592 spin_unlock_bh(&cmd->istate_lock);
3593 break;
3594 default:
3595 pr_err("Unknown Opcode: 0x%02x ITT:"
3596 " 0x%08x, i_state: %d on CID: %hu\n",
3597 cmd->iscsi_opcode, cmd->init_task_tag,
3598 state, conn->cid);
3599 spin_unlock_bh(&cmd->istate_lock);
3600 goto transport_err;
3601 }
3602 goto get_immediate;
3603 } else
3604 conn->tx_immediate_queue = 0;
3605
3606get_response:
3607 qr = iscsit_get_cmd_from_response_queue(conn);
3608 if (qr) {
3609 cmd = qr->cmd;
3610 state = qr->state;
3611 kmem_cache_free(lio_qr_cache, qr);
3612
3613 spin_lock_bh(&cmd->istate_lock);
3614check_rsp_state:
3615 switch (state) {
3616 case ISTATE_SEND_DATAIN:
3617 spin_unlock_bh(&cmd->istate_lock);
3618 ret = iscsit_send_data_in(cmd, conn,
3619 &eodr);
3620 map_sg = 1;
3621 break;
3622 case ISTATE_SEND_STATUS:
3623 case ISTATE_SEND_STATUS_RECOVERY:
3624 spin_unlock_bh(&cmd->istate_lock);
3625 use_misc = 1;
3626 ret = iscsit_send_status(cmd, conn);
3627 break;
3628 case ISTATE_SEND_LOGOUTRSP:
3629 spin_unlock_bh(&cmd->istate_lock);
3630 use_misc = 1;
3631 ret = iscsit_send_logout_response(cmd, conn);
3632 break;
3633 case ISTATE_SEND_ASYNCMSG:
3634 spin_unlock_bh(&cmd->istate_lock);
3635 use_misc = 1;
3636 ret = iscsit_send_conn_drop_async_message(
3637 cmd, conn);
3638 break;
3639 case ISTATE_SEND_NOPIN:
3640 spin_unlock_bh(&cmd->istate_lock);
3641 use_misc = 1;
3642 ret = iscsit_send_nopin_response(cmd, conn);
3643 break;
3644 case ISTATE_SEND_REJECT:
3645 spin_unlock_bh(&cmd->istate_lock);
3646 use_misc = 1;
3647 ret = iscsit_send_reject(cmd, conn);
3648 break;
3649 case ISTATE_SEND_TASKMGTRSP:
3650 spin_unlock_bh(&cmd->istate_lock);
3651 use_misc = 1;
3652 ret = iscsit_send_task_mgt_rsp(cmd, conn);
3653 if (ret != 0)
3654 break;
3655 ret = iscsit_tmr_post_handler(cmd, conn);
3656 if (ret != 0)
3657 iscsit_fall_back_to_erl0(conn->sess);
3658 break;
3659 case ISTATE_SEND_TEXTRSP:
3660 spin_unlock_bh(&cmd->istate_lock);
3661 use_misc = 1;
3662 ret = iscsit_send_text_rsp(cmd, conn);
3663 break;
3664 default:
3665 pr_err("Unknown Opcode: 0x%02x ITT:"
3666 " 0x%08x, i_state: %d on CID: %hu\n",
3667 cmd->iscsi_opcode, cmd->init_task_tag,
3668 state, conn->cid);
3669 spin_unlock_bh(&cmd->istate_lock);
3670 goto transport_err;
3671 }
3672 if (ret < 0) {
3673 conn->tx_response_queue = 0;
3674 goto transport_err;
3675 }
3676
3677 se_cmd = &cmd->se_cmd;
3678
3679 if (map_sg && !conn->conn_ops->IFMarker) {
3680 if (iscsit_fe_sendpage_sg(cmd, conn) < 0) {
3681 conn->tx_response_queue = 0;
3682 iscsit_tx_thread_wait_for_tcp(conn);
3683 iscsit_unmap_iovec(cmd);
3684 goto transport_err;
3685 }
3686 } else {
3687 if (iscsit_send_tx_data(cmd, conn, use_misc) < 0) {
3688 conn->tx_response_queue = 0;
3689 iscsit_tx_thread_wait_for_tcp(conn);
3690 iscsit_unmap_iovec(cmd);
3691 goto transport_err;
3692 }
3693 }
3694 map_sg = 0;
3695 iscsit_unmap_iovec(cmd);
3696
3697 spin_lock_bh(&cmd->istate_lock);
3698 switch (state) {
3699 case ISTATE_SEND_DATAIN:
3700 if (!eodr)
3701 goto check_rsp_state;
3702
3703 if (eodr == 1) {
3704 cmd->i_state = ISTATE_SENT_LAST_DATAIN;
3705 sent_status = 1;
3706 eodr = use_misc = 0;
3707 } else if (eodr == 2) {
3708 cmd->i_state = state =
3709 ISTATE_SEND_STATUS;
3710 sent_status = 0;
3711 eodr = use_misc = 0;
3712 goto check_rsp_state;
3713 }
3714 break;
3715 case ISTATE_SEND_STATUS:
3716 use_misc = 0;
3717 sent_status = 1;
3718 break;
3719 case ISTATE_SEND_ASYNCMSG:
3720 case ISTATE_SEND_NOPIN:
3721 case ISTATE_SEND_STATUS_RECOVERY:
3722 case ISTATE_SEND_TEXTRSP:
3723 use_misc = 0;
3724 sent_status = 1;
3725 break;
3726 case ISTATE_SEND_REJECT:
3727 use_misc = 0;
3728 if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) {
3729 cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN;
3730 spin_unlock_bh(&cmd->istate_lock);
3731 complete(&cmd->reject_comp);
3732 goto transport_err;
3733 }
3734 complete(&cmd->reject_comp);
3735 break;
3736 case ISTATE_SEND_TASKMGTRSP:
3737 use_misc = 0;
3738 sent_status = 1;
3739 break;
3740 case ISTATE_SEND_LOGOUTRSP:
3741 spin_unlock_bh(&cmd->istate_lock);
3742 if (!iscsit_logout_post_handler(cmd, conn))
3743 goto restart;
3744 spin_lock_bh(&cmd->istate_lock);
3745 use_misc = 0;
3746 sent_status = 1;
3747 break;
3748 default:
3749 pr_err("Unknown Opcode: 0x%02x ITT:"
3750 " 0x%08x, i_state: %d on CID: %hu\n",
3751 cmd->iscsi_opcode, cmd->init_task_tag,
3752 cmd->i_state, conn->cid);
3753 spin_unlock_bh(&cmd->istate_lock);
3754 goto transport_err;
3755 }
3756
3757 if (sent_status) {
3758 cmd->i_state = ISTATE_SENT_STATUS;
3759 sent_status = 0;
3760 }
3761 spin_unlock_bh(&cmd->istate_lock);
3762
3763 if (atomic_read(&conn->check_immediate_queue))
3764 goto get_immediate;
3765
3766 goto get_response;
3767 } else
3768 conn->tx_response_queue = 0;
3769 }
3770
3771transport_err:
3772 iscsit_take_action_for_connection_exit(conn);
3773 goto restart;
3774out:
3775 return 0;
3776}
3777
3778int iscsi_target_rx_thread(void *arg)
3779{
3780 int ret;
3781 u8 buffer[ISCSI_HDR_LEN], opcode;
3782 u32 checksum = 0, digest = 0;
3783 struct iscsi_conn *conn = NULL;
3784 struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg;
3785 struct kvec iov;
3786 /*
3787 * Allow ourselves to be interrupted by SIGINT so that a
3788 * connection recovery / failure event can be triggered externally.
3789 */
3790 allow_signal(SIGINT);
3791
3792restart:
3793 conn = iscsi_rx_thread_pre_handler(ts);
3794 if (!conn)
3795 goto out;
3796
3797 while (!kthread_should_stop()) {
3798 /*
3799 * Ensure that both TX and RX per connection kthreads
3800 * are scheduled to run on the same CPU.
3801 */
3802 iscsit_thread_check_cpumask(conn, current, 0);
3803
3804 memset(buffer, 0, ISCSI_HDR_LEN);
3805 memset(&iov, 0, sizeof(struct kvec));
3806
3807 iov.iov_base = buffer;
3808 iov.iov_len = ISCSI_HDR_LEN;
3809
3810 ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
3811 if (ret != ISCSI_HDR_LEN) {
3812 iscsit_rx_thread_wait_for_tcp(conn);
3813 goto transport_err;
3814 }
3815
3816 /*
3817 * Set conn->bad_hdr for use with REJECT PDUs.
3818 */
3819 memcpy(&conn->bad_hdr, &buffer, ISCSI_HDR_LEN);
3820
3821 if (conn->conn_ops->HeaderDigest) {
3822 iov.iov_base = &digest;
3823 iov.iov_len = ISCSI_CRC_LEN;
3824
3825 ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
3826 if (ret != ISCSI_CRC_LEN) {
3827 iscsit_rx_thread_wait_for_tcp(conn);
3828 goto transport_err;
3829 }
3830
3831 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
3832 buffer, ISCSI_HDR_LEN,
3833 0, NULL, (u8 *)&checksum);
3834
3835 if (digest != checksum) {
3836 pr_err("HeaderDigest CRC32C failed,"
3837 " received 0x%08x, computed 0x%08x\n",
3838 digest, checksum);
3839 /*
3840 * Set the PDU to 0xff so it will intentionally
3841 * hit default in the switch below.
3842 */
3843 memset(buffer, 0xff, ISCSI_HDR_LEN);
3844 spin_lock_bh(&conn->sess->session_stats_lock);
3845 conn->sess->conn_digest_errors++;
3846 spin_unlock_bh(&conn->sess->session_stats_lock);
3847 } else {
3848 pr_debug("Got HeaderDigest CRC32C"
3849 " 0x%08x\n", checksum);
3850 }
3851 }
3852
3853 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
3854 goto transport_err;
3855
3856 opcode = buffer[0] & ISCSI_OPCODE_MASK;
3857
3858 if (conn->sess->sess_ops->SessionType &&
3859 ((!(opcode & ISCSI_OP_TEXT)) ||
3860 (!(opcode & ISCSI_OP_LOGOUT)))) {
3861 pr_err("Received illegal iSCSI Opcode: 0x%02x"
3862 " while in Discovery Session, rejecting.\n", opcode);
3863 iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
3864 buffer, conn);
3865 goto transport_err;
3866 }
3867
3868 switch (opcode) {
3869 case ISCSI_OP_SCSI_CMD:
3870 if (iscsit_handle_scsi_cmd(conn, buffer) < 0)
3871 goto transport_err;
3872 break;
3873 case ISCSI_OP_SCSI_DATA_OUT:
3874 if (iscsit_handle_data_out(conn, buffer) < 0)
3875 goto transport_err;
3876 break;
3877 case ISCSI_OP_NOOP_OUT:
3878 if (iscsit_handle_nop_out(conn, buffer) < 0)
3879 goto transport_err;
3880 break;
3881 case ISCSI_OP_SCSI_TMFUNC:
3882 if (iscsit_handle_task_mgt_cmd(conn, buffer) < 0)
3883 goto transport_err;
3884 break;
3885 case ISCSI_OP_TEXT:
3886 if (iscsit_handle_text_cmd(conn, buffer) < 0)
3887 goto transport_err;
3888 break;
3889 case ISCSI_OP_LOGOUT:
3890 ret = iscsit_handle_logout_cmd(conn, buffer);
3891 if (ret > 0) {
3892 wait_for_completion_timeout(&conn->conn_logout_comp,
3893 SECONDS_FOR_LOGOUT_COMP * HZ);
3894 goto transport_err;
3895 } else if (ret < 0)
3896 goto transport_err;
3897 break;
3898 case ISCSI_OP_SNACK:
3899 if (iscsit_handle_snack(conn, buffer) < 0)
3900 goto transport_err;
3901 break;
3902 default:
3903 pr_err("Got unknown iSCSI OpCode: 0x%02x\n",
3904 opcode);
3905 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
3906 pr_err("Cannot recover from unknown"
3907 " opcode while ERL=0, closing iSCSI connection"
3908 ".\n");
3909 goto transport_err;
3910 }
3911 if (!conn->conn_ops->OFMarker) {
3912 pr_err("Unable to recover from unknown"
3913 " opcode while OFMarker=No, closing iSCSI"
3914 " connection.\n");
3915 goto transport_err;
3916 }
3917 if (iscsit_recover_from_unknown_opcode(conn) < 0) {
3918 pr_err("Unable to recover from unknown"
3919 " opcode, closing iSCSI connection.\n");
3920 goto transport_err;
3921 }
3922 break;
3923 }
3924 }
3925
3926transport_err:
3927 if (!signal_pending(current))
3928 atomic_set(&conn->transport_failed, 1);
3929 iscsit_take_action_for_connection_exit(conn);
3930 goto restart;
3931out:
3932 return 0;
3933}
3934
3935static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
3936{
3937 struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;
3938 struct iscsi_session *sess = conn->sess;
3939 struct se_cmd *se_cmd;
3940 /*
3941 * We expect this function to only ever be called from either RX or TX
3942 * thread context via iscsit_close_connection() once the other context
3943 * has been reset -> returned sleeping pre-handler state.
3944 */
3945 spin_lock_bh(&conn->cmd_lock);
3946 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
3947 if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)) {
3948
3949 list_del(&cmd->i_list);
3950 spin_unlock_bh(&conn->cmd_lock);
3951 iscsit_increment_maxcmdsn(cmd, sess);
3952 se_cmd = &cmd->se_cmd;
3953 /*
3954 * Special cases for active iSCSI TMR, and
3955 * transport_lookup_cmd_lun() failing from
3956 * iscsit_get_lun_for_cmd() in iscsit_handle_scsi_cmd().
3957 */
3958 if (cmd->tmr_req && se_cmd->transport_wait_for_tasks)
3959 se_cmd->transport_wait_for_tasks(se_cmd, 1, 1);
3960 else if (cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)
3961 transport_release_cmd(se_cmd);
3962 else
3963 iscsit_release_cmd(cmd);
3964
3965 spin_lock_bh(&conn->cmd_lock);
3966 continue;
3967 }
3968 list_del(&cmd->i_list);
3969 spin_unlock_bh(&conn->cmd_lock);
3970
3971 iscsit_increment_maxcmdsn(cmd, sess);
3972 se_cmd = &cmd->se_cmd;
3973
3974 if (se_cmd->transport_wait_for_tasks)
3975 se_cmd->transport_wait_for_tasks(se_cmd, 1, 1);
3976
3977 spin_lock_bh(&conn->cmd_lock);
3978 }
3979 spin_unlock_bh(&conn->cmd_lock);
3980}
3981
3982static void iscsit_stop_timers_for_cmds(
3983 struct iscsi_conn *conn)
3984{
3985 struct iscsi_cmd *cmd;
3986
3987 spin_lock_bh(&conn->cmd_lock);
3988 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
3989 if (cmd->data_direction == DMA_TO_DEVICE)
3990 iscsit_stop_dataout_timer(cmd);
3991 }
3992 spin_unlock_bh(&conn->cmd_lock);
3993}
3994
3995int iscsit_close_connection(
3996 struct iscsi_conn *conn)
3997{
3998 int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT);
3999 struct iscsi_session *sess = conn->sess;
4000
4001 pr_debug("Closing iSCSI connection CID %hu on SID:"
4002 " %u\n", conn->cid, sess->sid);
4003 /*
4004 * Always up conn_logout_comp just in case the RX Thread is sleeping
4005 * and the logout response never got sent because the connection
4006 * failed.
4007 */
4008 complete(&conn->conn_logout_comp);
4009
4010 iscsi_release_thread_set(conn);
4011
4012 iscsit_stop_timers_for_cmds(conn);
4013 iscsit_stop_nopin_response_timer(conn);
4014 iscsit_stop_nopin_timer(conn);
4015 iscsit_free_queue_reqs_for_conn(conn);
4016
4017 /*
4018 * During Connection recovery drop unacknowledged out of order
4019 * commands for this connection, and prepare the other commands
4020 * for realligence.
4021 *
4022 * During normal operation clear the out of order commands (but
4023 * do not free the struct iscsi_ooo_cmdsn's) and release all
4024 * struct iscsi_cmds.
4025 */
4026 if (atomic_read(&conn->connection_recovery)) {
4027 iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn);
4028 iscsit_prepare_cmds_for_realligance(conn);
4029 } else {
4030 iscsit_clear_ooo_cmdsns_for_conn(conn);
4031 iscsit_release_commands_from_conn(conn);
4032 }
4033
4034 /*
4035 * Handle decrementing session or connection usage count if
4036 * a logout response was not able to be sent because the
4037 * connection failed. Fall back to Session Recovery here.
4038 */
4039 if (atomic_read(&conn->conn_logout_remove)) {
4040 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) {
4041 iscsit_dec_conn_usage_count(conn);
4042 iscsit_dec_session_usage_count(sess);
4043 }
4044 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION)
4045 iscsit_dec_conn_usage_count(conn);
4046
4047 atomic_set(&conn->conn_logout_remove, 0);
4048 atomic_set(&sess->session_reinstatement, 0);
4049 atomic_set(&sess->session_fall_back_to_erl0, 1);
4050 }
4051
4052 spin_lock_bh(&sess->conn_lock);
4053 list_del(&conn->conn_list);
4054
4055 /*
4056 * Attempt to let the Initiator know this connection failed by
4057 * sending an Connection Dropped Async Message on another
4058 * active connection.
4059 */
4060 if (atomic_read(&conn->connection_recovery))
4061 iscsit_build_conn_drop_async_message(conn);
4062
4063 spin_unlock_bh(&sess->conn_lock);
4064
4065 /*
4066 * If connection reinstatement is being performed on this connection,
4067 * up the connection reinstatement semaphore that is being blocked on
4068 * in iscsit_cause_connection_reinstatement().
4069 */
4070 spin_lock_bh(&conn->state_lock);
4071 if (atomic_read(&conn->sleep_on_conn_wait_comp)) {
4072 spin_unlock_bh(&conn->state_lock);
4073 complete(&conn->conn_wait_comp);
4074 wait_for_completion(&conn->conn_post_wait_comp);
4075 spin_lock_bh(&conn->state_lock);
4076 }
4077
4078 /*
4079 * If connection reinstatement is being performed on this connection
4080 * by receiving a REMOVECONNFORRECOVERY logout request, up the
4081 * connection wait rcfr semaphore that is being blocked on
4082 * an iscsit_connection_reinstatement_rcfr().
4083 */
4084 if (atomic_read(&conn->connection_wait_rcfr)) {
4085 spin_unlock_bh(&conn->state_lock);
4086 complete(&conn->conn_wait_rcfr_comp);
4087 wait_for_completion(&conn->conn_post_wait_comp);
4088 spin_lock_bh(&conn->state_lock);
4089 }
4090 atomic_set(&conn->connection_reinstatement, 1);
4091 spin_unlock_bh(&conn->state_lock);
4092
4093 /*
4094 * If any other processes are accessing this connection pointer we
4095 * must wait until they have completed.
4096 */
4097 iscsit_check_conn_usage_count(conn);
4098
4099 if (conn->conn_rx_hash.tfm)
4100 crypto_free_hash(conn->conn_rx_hash.tfm);
4101 if (conn->conn_tx_hash.tfm)
4102 crypto_free_hash(conn->conn_tx_hash.tfm);
4103
4104 if (conn->conn_cpumask)
4105 free_cpumask_var(conn->conn_cpumask);
4106
4107 kfree(conn->conn_ops);
4108 conn->conn_ops = NULL;
4109
4110 if (conn->sock) {
4111 if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
4112 kfree(conn->sock->file);
4113 conn->sock->file = NULL;
4114 }
4115 sock_release(conn->sock);
4116 }
4117 conn->thread_set = NULL;
4118
4119 pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
4120 conn->conn_state = TARG_CONN_STATE_FREE;
4121 kfree(conn);
4122
4123 spin_lock_bh(&sess->conn_lock);
4124 atomic_dec(&sess->nconn);
4125 pr_debug("Decremented iSCSI connection count to %hu from node:"
4126 " %s\n", atomic_read(&sess->nconn),
4127 sess->sess_ops->InitiatorName);
4128 /*
4129 * Make sure that if one connection fails in an non ERL=2 iSCSI
4130 * Session that they all fail.
4131 */
4132 if ((sess->sess_ops->ErrorRecoveryLevel != 2) && !conn_logout &&
4133 !atomic_read(&sess->session_logout))
4134 atomic_set(&sess->session_fall_back_to_erl0, 1);
4135
4136 /*
4137 * If this was not the last connection in the session, and we are
4138 * performing session reinstatement or falling back to ERL=0, call
4139 * iscsit_stop_session() without sleeping to shutdown the other
4140 * active connections.
4141 */
4142 if (atomic_read(&sess->nconn)) {
4143 if (!atomic_read(&sess->session_reinstatement) &&
4144 !atomic_read(&sess->session_fall_back_to_erl0)) {
4145 spin_unlock_bh(&sess->conn_lock);
4146 return 0;
4147 }
4148 if (!atomic_read(&sess->session_stop_active)) {
4149 atomic_set(&sess->session_stop_active, 1);
4150 spin_unlock_bh(&sess->conn_lock);
4151 iscsit_stop_session(sess, 0, 0);
4152 return 0;
4153 }
4154 spin_unlock_bh(&sess->conn_lock);
4155 return 0;
4156 }
4157
4158 /*
4159 * If this was the last connection in the session and one of the
4160 * following is occurring:
4161 *
4162 * Session Reinstatement is not being performed, and are falling back
4163 * to ERL=0 call iscsit_close_session().
4164 *
4165 * Session Logout was requested. iscsit_close_session() will be called
4166 * elsewhere.
4167 *
4168 * Session Continuation is not being performed, start the Time2Retain
4169 * handler and check if sleep_on_sess_wait_sem is active.
4170 */
4171 if (!atomic_read(&sess->session_reinstatement) &&
4172 atomic_read(&sess->session_fall_back_to_erl0)) {
4173 spin_unlock_bh(&sess->conn_lock);
4174 iscsit_close_session(sess);
4175
4176 return 0;
4177 } else if (atomic_read(&sess->session_logout)) {
4178 pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
4179 sess->session_state = TARG_SESS_STATE_FREE;
4180 spin_unlock_bh(&sess->conn_lock);
4181
4182 if (atomic_read(&sess->sleep_on_sess_wait_comp))
4183 complete(&sess->session_wait_comp);
4184
4185 return 0;
4186 } else {
4187 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
4188 sess->session_state = TARG_SESS_STATE_FAILED;
4189
4190 if (!atomic_read(&sess->session_continuation)) {
4191 spin_unlock_bh(&sess->conn_lock);
4192 iscsit_start_time2retain_handler(sess);
4193 } else
4194 spin_unlock_bh(&sess->conn_lock);
4195
4196 if (atomic_read(&sess->sleep_on_sess_wait_comp))
4197 complete(&sess->session_wait_comp);
4198
4199 return 0;
4200 }
4201 spin_unlock_bh(&sess->conn_lock);
4202
4203 return 0;
4204}
4205
4206int iscsit_close_session(struct iscsi_session *sess)
4207{
4208 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
4209 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4210
4211 if (atomic_read(&sess->nconn)) {
4212 pr_err("%d connection(s) still exist for iSCSI session"
4213 " to %s\n", atomic_read(&sess->nconn),
4214 sess->sess_ops->InitiatorName);
4215 BUG();
4216 }
4217
4218 spin_lock_bh(&se_tpg->session_lock);
4219 atomic_set(&sess->session_logout, 1);
4220 atomic_set(&sess->session_reinstatement, 1);
4221 iscsit_stop_time2retain_timer(sess);
4222 spin_unlock_bh(&se_tpg->session_lock);
4223
4224 /*
4225 * transport_deregister_session_configfs() will clear the
4226 * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context
4227 * can be setting it again with __transport_register_session() in
4228 * iscsi_post_login_handler() again after the iscsit_stop_session()
4229 * completes in iscsi_np context.
4230 */
4231 transport_deregister_session_configfs(sess->se_sess);
4232
4233 /*
4234 * If any other processes are accessing this session pointer we must
4235 * wait until they have completed. If we are in an interrupt (the
4236 * time2retain handler) and contain and active session usage count we
4237 * restart the timer and exit.
4238 */
4239 if (!in_interrupt()) {
4240 if (iscsit_check_session_usage_count(sess) == 1)
4241 iscsit_stop_session(sess, 1, 1);
4242 } else {
4243 if (iscsit_check_session_usage_count(sess) == 2) {
4244 atomic_set(&sess->session_logout, 0);
4245 iscsit_start_time2retain_handler(sess);
4246 return 0;
4247 }
4248 }
4249
4250 transport_deregister_session(sess->se_sess);
4251
4252 if (sess->sess_ops->ErrorRecoveryLevel == 2)
4253 iscsit_free_connection_recovery_entires(sess);
4254
4255 iscsit_free_all_ooo_cmdsns(sess);
4256
4257 spin_lock_bh(&se_tpg->session_lock);
4258 pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
4259 sess->session_state = TARG_SESS_STATE_FREE;
4260 pr_debug("Released iSCSI session from node: %s\n",
4261 sess->sess_ops->InitiatorName);
4262 tpg->nsessions--;
4263 if (tpg->tpg_tiqn)
4264 tpg->tpg_tiqn->tiqn_nsessions--;
4265
4266 pr_debug("Decremented number of active iSCSI Sessions on"
4267 " iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions);
4268
4269 spin_lock(&sess_idr_lock);
4270 idr_remove(&sess_idr, sess->session_index);
4271 spin_unlock(&sess_idr_lock);
4272
4273 kfree(sess->sess_ops);
4274 sess->sess_ops = NULL;
4275 spin_unlock_bh(&se_tpg->session_lock);
4276
4277 kfree(sess);
4278 return 0;
4279}
4280
4281static void iscsit_logout_post_handler_closesession(
4282 struct iscsi_conn *conn)
4283{
4284 struct iscsi_session *sess = conn->sess;
4285
4286 iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
4287 iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
4288
4289 atomic_set(&conn->conn_logout_remove, 0);
4290 complete(&conn->conn_logout_comp);
4291
4292 iscsit_dec_conn_usage_count(conn);
4293 iscsit_stop_session(sess, 1, 1);
4294 iscsit_dec_session_usage_count(sess);
4295 iscsit_close_session(sess);
4296}
4297
4298static void iscsit_logout_post_handler_samecid(
4299 struct iscsi_conn *conn)
4300{
4301 iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
4302 iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
4303
4304 atomic_set(&conn->conn_logout_remove, 0);
4305 complete(&conn->conn_logout_comp);
4306
4307 iscsit_cause_connection_reinstatement(conn, 1);
4308 iscsit_dec_conn_usage_count(conn);
4309}
4310
4311static void iscsit_logout_post_handler_diffcid(
4312 struct iscsi_conn *conn,
4313 u16 cid)
4314{
4315 struct iscsi_conn *l_conn;
4316 struct iscsi_session *sess = conn->sess;
4317
4318 if (!sess)
4319 return;
4320
4321 spin_lock_bh(&sess->conn_lock);
4322 list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
4323 if (l_conn->cid == cid) {
4324 iscsit_inc_conn_usage_count(l_conn);
4325 break;
4326 }
4327 }
4328 spin_unlock_bh(&sess->conn_lock);
4329
4330 if (!l_conn)
4331 return;
4332
4333 if (l_conn->sock)
4334 l_conn->sock->ops->shutdown(l_conn->sock, RCV_SHUTDOWN);
4335
4336 spin_lock_bh(&l_conn->state_lock);
4337 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
4338 l_conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
4339 spin_unlock_bh(&l_conn->state_lock);
4340
4341 iscsit_cause_connection_reinstatement(l_conn, 1);
4342 iscsit_dec_conn_usage_count(l_conn);
4343}
4344
4345/*
4346 * Return of 0 causes the TX thread to restart.
4347 */
4348static int iscsit_logout_post_handler(
4349 struct iscsi_cmd *cmd,
4350 struct iscsi_conn *conn)
4351{
4352 int ret = 0;
4353
4354 switch (cmd->logout_reason) {
4355 case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
4356 switch (cmd->logout_response) {
4357 case ISCSI_LOGOUT_SUCCESS:
4358 case ISCSI_LOGOUT_CLEANUP_FAILED:
4359 default:
4360 iscsit_logout_post_handler_closesession(conn);
4361 break;
4362 }
4363 ret = 0;
4364 break;
4365 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
4366 if (conn->cid == cmd->logout_cid) {
4367 switch (cmd->logout_response) {
4368 case ISCSI_LOGOUT_SUCCESS:
4369 case ISCSI_LOGOUT_CLEANUP_FAILED:
4370 default:
4371 iscsit_logout_post_handler_samecid(conn);
4372 break;
4373 }
4374 ret = 0;
4375 } else {
4376 switch (cmd->logout_response) {
4377 case ISCSI_LOGOUT_SUCCESS:
4378 iscsit_logout_post_handler_diffcid(conn,
4379 cmd->logout_cid);
4380 break;
4381 case ISCSI_LOGOUT_CID_NOT_FOUND:
4382 case ISCSI_LOGOUT_CLEANUP_FAILED:
4383 default:
4384 break;
4385 }
4386 ret = 1;
4387 }
4388 break;
4389 case ISCSI_LOGOUT_REASON_RECOVERY:
4390 switch (cmd->logout_response) {
4391 case ISCSI_LOGOUT_SUCCESS:
4392 case ISCSI_LOGOUT_CID_NOT_FOUND:
4393 case ISCSI_LOGOUT_RECOVERY_UNSUPPORTED:
4394 case ISCSI_LOGOUT_CLEANUP_FAILED:
4395 default:
4396 break;
4397 }
4398 ret = 1;
4399 break;
4400 default:
4401 break;
4402
4403 }
4404 return ret;
4405}
4406
4407void iscsit_fail_session(struct iscsi_session *sess)
4408{
4409 struct iscsi_conn *conn;
4410
4411 spin_lock_bh(&sess->conn_lock);
4412 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
4413 pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
4414 conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
4415 }
4416 spin_unlock_bh(&sess->conn_lock);
4417
4418 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
4419 sess->session_state = TARG_SESS_STATE_FAILED;
4420}
4421
4422int iscsit_free_session(struct iscsi_session *sess)
4423{
4424 u16 conn_count = atomic_read(&sess->nconn);
4425 struct iscsi_conn *conn, *conn_tmp = NULL;
4426 int is_last;
4427
4428 spin_lock_bh(&sess->conn_lock);
4429 atomic_set(&sess->sleep_on_sess_wait_comp, 1);
4430
4431 list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
4432 conn_list) {
4433 if (conn_count == 0)
4434 break;
4435
4436 if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
4437 is_last = 1;
4438 } else {
4439 iscsit_inc_conn_usage_count(conn_tmp);
4440 is_last = 0;
4441 }
4442 iscsit_inc_conn_usage_count(conn);
4443
4444 spin_unlock_bh(&sess->conn_lock);
4445 iscsit_cause_connection_reinstatement(conn, 1);
4446 spin_lock_bh(&sess->conn_lock);
4447
4448 iscsit_dec_conn_usage_count(conn);
4449 if (is_last == 0)
4450 iscsit_dec_conn_usage_count(conn_tmp);
4451
4452 conn_count--;
4453 }
4454
4455 if (atomic_read(&sess->nconn)) {
4456 spin_unlock_bh(&sess->conn_lock);
4457 wait_for_completion(&sess->session_wait_comp);
4458 } else
4459 spin_unlock_bh(&sess->conn_lock);
4460
4461 iscsit_close_session(sess);
4462 return 0;
4463}
4464
4465void iscsit_stop_session(
4466 struct iscsi_session *sess,
4467 int session_sleep,
4468 int connection_sleep)
4469{
4470 u16 conn_count = atomic_read(&sess->nconn);
4471 struct iscsi_conn *conn, *conn_tmp = NULL;
4472 int is_last;
4473
4474 spin_lock_bh(&sess->conn_lock);
4475 if (session_sleep)
4476 atomic_set(&sess->sleep_on_sess_wait_comp, 1);
4477
4478 if (connection_sleep) {
4479 list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
4480 conn_list) {
4481 if (conn_count == 0)
4482 break;
4483
4484 if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
4485 is_last = 1;
4486 } else {
4487 iscsit_inc_conn_usage_count(conn_tmp);
4488 is_last = 0;
4489 }
4490 iscsit_inc_conn_usage_count(conn);
4491
4492 spin_unlock_bh(&sess->conn_lock);
4493 iscsit_cause_connection_reinstatement(conn, 1);
4494 spin_lock_bh(&sess->conn_lock);
4495
4496 iscsit_dec_conn_usage_count(conn);
4497 if (is_last == 0)
4498 iscsit_dec_conn_usage_count(conn_tmp);
4499 conn_count--;
4500 }
4501 } else {
4502 list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
4503 iscsit_cause_connection_reinstatement(conn, 0);
4504 }
4505
4506 if (session_sleep && atomic_read(&sess->nconn)) {
4507 spin_unlock_bh(&sess->conn_lock);
4508 wait_for_completion(&sess->session_wait_comp);
4509 } else
4510 spin_unlock_bh(&sess->conn_lock);
4511}
4512
4513int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
4514{
4515 struct iscsi_session *sess;
4516 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4517 struct se_session *se_sess, *se_sess_tmp;
4518 int session_count = 0;
4519
4520 spin_lock_bh(&se_tpg->session_lock);
4521 if (tpg->nsessions && !force) {
4522 spin_unlock_bh(&se_tpg->session_lock);
4523 return -1;
4524 }
4525
4526 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
4527 sess_list) {
4528 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
4529
4530 spin_lock(&sess->conn_lock);
4531 if (atomic_read(&sess->session_fall_back_to_erl0) ||
4532 atomic_read(&sess->session_logout) ||
4533 (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
4534 spin_unlock(&sess->conn_lock);
4535 continue;
4536 }
4537 atomic_set(&sess->session_reinstatement, 1);
4538 spin_unlock(&sess->conn_lock);
4539 spin_unlock_bh(&se_tpg->session_lock);
4540
4541 iscsit_free_session(sess);
4542 spin_lock_bh(&se_tpg->session_lock);
4543
4544 session_count++;
4545 }
4546 spin_unlock_bh(&se_tpg->session_lock);
4547
4548 pr_debug("Released %d iSCSI Session(s) from Target Portal"
4549 " Group: %hu\n", session_count, tpg->tpgt);
4550 return 0;
4551}
4552
4553MODULE_DESCRIPTION("iSCSI-Target Driver for mainline target infrastructure");
4554MODULE_VERSION("4.1.x");
4555MODULE_AUTHOR("nab@Linux-iSCSI.org");
4556MODULE_LICENSE("GPL");
4557
4558module_init(iscsi_target_init_module);
4559module_exit(iscsi_target_cleanup_module);
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
new file mode 100644
index 000000000000..5db2ddeed5eb
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -0,0 +1,42 @@
1#ifndef ISCSI_TARGET_H
2#define ISCSI_TARGET_H
3
4extern struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *);
5extern struct iscsi_tiqn *iscsit_get_tiqn(unsigned char *, int);
6extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *);
7extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *);
8extern void iscsit_del_tiqn(struct iscsi_tiqn *);
9extern int iscsit_access_np(struct iscsi_np *, struct iscsi_portal_group *);
10extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *);
11extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *,
12 char *, int);
13extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
14 struct iscsi_portal_group *);
15extern int iscsit_del_np(struct iscsi_np *);
16extern int iscsit_add_reject_from_cmd(u8, int, int, unsigned char *, struct iscsi_cmd *);
17extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *);
18extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *);
19extern int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *, struct iscsi_conn *);
20extern int iscsit_send_async_msg(struct iscsi_conn *, u16, u8, u8);
21extern int iscsit_send_r2t(struct iscsi_cmd *, struct iscsi_conn *);
22extern int iscsit_build_r2ts_for_cmd(struct iscsi_cmd *, struct iscsi_conn *, int);
23extern void iscsit_thread_get_cpumask(struct iscsi_conn *);
24extern int iscsi_target_tx_thread(void *);
25extern int iscsi_target_rx_thread(void *);
26extern int iscsit_close_connection(struct iscsi_conn *);
27extern int iscsit_close_session(struct iscsi_session *);
28extern void iscsit_fail_session(struct iscsi_session *);
29extern int iscsit_free_session(struct iscsi_session *);
30extern void iscsit_stop_session(struct iscsi_session *, int, int);
31extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int);
32
33extern struct iscsit_global *iscsit_global;
34extern struct target_fabric_configfs *lio_target_fabric_configfs;
35
36extern struct kmem_cache *lio_dr_cache;
37extern struct kmem_cache *lio_ooo_cache;
38extern struct kmem_cache *lio_cmd_cache;
39extern struct kmem_cache *lio_qr_cache;
40extern struct kmem_cache *lio_r2t_cache;
41
42#endif /*** ISCSI_TARGET_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
new file mode 100644
index 000000000000..11fd74307811
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -0,0 +1,490 @@
1/*******************************************************************************
2 * This file houses the main functions for the iSCSI CHAP support
3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 ******************************************************************************/
20
21#include <linux/string.h>
22#include <linux/crypto.h>
23#include <linux/err.h>
24#include <linux/scatterlist.h>
25
26#include "iscsi_target_core.h"
27#include "iscsi_target_nego.h"
28#include "iscsi_target_auth.h"
29
30static unsigned char chap_asciihex_to_binaryhex(unsigned char val[2])
31{
32 unsigned char result = 0;
33 /*
34 * MSB
35 */
36 if ((val[0] >= 'a') && (val[0] <= 'f'))
37 result = ((val[0] - 'a' + 10) & 0xf) << 4;
38 else
39 if ((val[0] >= 'A') && (val[0] <= 'F'))
40 result = ((val[0] - 'A' + 10) & 0xf) << 4;
41 else /* digit */
42 result = ((val[0] - '0') & 0xf) << 4;
43 /*
44 * LSB
45 */
46 if ((val[1] >= 'a') && (val[1] <= 'f'))
47 result |= ((val[1] - 'a' + 10) & 0xf);
48 else
49 if ((val[1] >= 'A') && (val[1] <= 'F'))
50 result |= ((val[1] - 'A' + 10) & 0xf);
51 else /* digit */
52 result |= ((val[1] - '0') & 0xf);
53
54 return result;
55}
56
57static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
58{
59 int i, j = 0;
60
61 for (i = 0; i < len; i += 2) {
62 dst[j++] = (unsigned char) chap_asciihex_to_binaryhex(&src[i]);
63 }
64
65 dst[j] = '\0';
66 return j;
67}
68
69static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
70{
71 int i;
72
73 for (i = 0; i < src_len; i++) {
74 sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
75 }
76}
77
78static void chap_set_random(char *data, int length)
79{
80 long r;
81 unsigned n;
82
83 while (length > 0) {
84 get_random_bytes(&r, sizeof(long));
85 r = r ^ (r >> 8);
86 r = r ^ (r >> 4);
87 n = r & 0x7;
88
89 get_random_bytes(&r, sizeof(long));
90 r = r ^ (r >> 8);
91 r = r ^ (r >> 5);
92 n = (n << 3) | (r & 0x7);
93
94 get_random_bytes(&r, sizeof(long));
95 r = r ^ (r >> 8);
96 r = r ^ (r >> 5);
97 n = (n << 2) | (r & 0x3);
98
99 *data++ = n;
100 length--;
101 }
102}
103
104static void chap_gen_challenge(
105 struct iscsi_conn *conn,
106 int caller,
107 char *c_str,
108 unsigned int *c_len)
109{
110 unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
111 struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
112
113 memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
114
115 chap_set_random(chap->challenge, CHAP_CHALLENGE_LENGTH);
116 chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
117 CHAP_CHALLENGE_LENGTH);
118 /*
119 * Set CHAP_C, and copy the generated challenge into c_str.
120 */
121 *c_len += sprintf(c_str + *c_len, "CHAP_C=0x%s", challenge_asciihex);
122 *c_len += 1;
123
124 pr_debug("[%s] Sending CHAP_C=0x%s\n\n", (caller) ? "server" : "client",
125 challenge_asciihex);
126}
127
128
129static struct iscsi_chap *chap_server_open(
130 struct iscsi_conn *conn,
131 struct iscsi_node_auth *auth,
132 const char *a_str,
133 char *aic_str,
134 unsigned int *aic_len)
135{
136 struct iscsi_chap *chap;
137
138 if (!(auth->naf_flags & NAF_USERID_SET) ||
139 !(auth->naf_flags & NAF_PASSWORD_SET)) {
140 pr_err("CHAP user or password not set for"
141 " Initiator ACL\n");
142 return NULL;
143 }
144
145 conn->auth_protocol = kzalloc(sizeof(struct iscsi_chap), GFP_KERNEL);
146 if (!conn->auth_protocol)
147 return NULL;
148
149 chap = (struct iscsi_chap *) conn->auth_protocol;
150 /*
151 * We only support MD5 MDA presently.
152 */
153 if (strncmp(a_str, "CHAP_A=5", 8)) {
154 pr_err("CHAP_A is not MD5.\n");
155 return NULL;
156 }
157 pr_debug("[server] Got CHAP_A=5\n");
158 /*
159 * Send back CHAP_A set to MD5.
160 */
161 *aic_len = sprintf(aic_str, "CHAP_A=5");
162 *aic_len += 1;
163 chap->digest_type = CHAP_DIGEST_MD5;
164 pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type);
165 /*
166 * Set Identifier.
167 */
168 chap->id = ISCSI_TPG_C(conn)->tpg_chap_id++;
169 *aic_len += sprintf(aic_str + *aic_len, "CHAP_I=%d", chap->id);
170 *aic_len += 1;
171 pr_debug("[server] Sending CHAP_I=%d\n", chap->id);
172 /*
173 * Generate Challenge.
174 */
175 chap_gen_challenge(conn, 1, aic_str, aic_len);
176
177 return chap;
178}
179
180static void chap_close(struct iscsi_conn *conn)
181{
182 kfree(conn->auth_protocol);
183 conn->auth_protocol = NULL;
184}
185
186static int chap_server_compute_md5(
187 struct iscsi_conn *conn,
188 struct iscsi_node_auth *auth,
189 char *nr_in_ptr,
190 char *nr_out_ptr,
191 unsigned int *nr_out_len)
192{
193 char *endptr;
194 unsigned char id, digest[MD5_SIGNATURE_SIZE];
195 unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
196 unsigned char identifier[10], *challenge = NULL;
197 unsigned char *challenge_binhex = NULL;
198 unsigned char client_digest[MD5_SIGNATURE_SIZE];
199 unsigned char server_digest[MD5_SIGNATURE_SIZE];
200 unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
201 struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
202 struct crypto_hash *tfm;
203 struct hash_desc desc;
204 struct scatterlist sg;
205 int auth_ret = -1, ret, challenge_len;
206
207 memset(identifier, 0, 10);
208 memset(chap_n, 0, MAX_CHAP_N_SIZE);
209 memset(chap_r, 0, MAX_RESPONSE_LENGTH);
210 memset(digest, 0, MD5_SIGNATURE_SIZE);
211 memset(response, 0, MD5_SIGNATURE_SIZE * 2 + 2);
212 memset(client_digest, 0, MD5_SIGNATURE_SIZE);
213 memset(server_digest, 0, MD5_SIGNATURE_SIZE);
214
215 challenge = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
216 if (!challenge) {
217 pr_err("Unable to allocate challenge buffer\n");
218 goto out;
219 }
220
221 challenge_binhex = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
222 if (!challenge_binhex) {
223 pr_err("Unable to allocate challenge_binhex buffer\n");
224 goto out;
225 }
226 /*
227 * Extract CHAP_N.
228 */
229 if (extract_param(nr_in_ptr, "CHAP_N", MAX_CHAP_N_SIZE, chap_n,
230 &type) < 0) {
231 pr_err("Could not find CHAP_N.\n");
232 goto out;
233 }
234 if (type == HEX) {
235 pr_err("Could not find CHAP_N.\n");
236 goto out;
237 }
238
239 if (memcmp(chap_n, auth->userid, strlen(auth->userid)) != 0) {
240 pr_err("CHAP_N values do not match!\n");
241 goto out;
242 }
243 pr_debug("[server] Got CHAP_N=%s\n", chap_n);
244 /*
245 * Extract CHAP_R.
246 */
247 if (extract_param(nr_in_ptr, "CHAP_R", MAX_RESPONSE_LENGTH, chap_r,
248 &type) < 0) {
249 pr_err("Could not find CHAP_R.\n");
250 goto out;
251 }
252 if (type != HEX) {
253 pr_err("Could not find CHAP_R.\n");
254 goto out;
255 }
256
257 pr_debug("[server] Got CHAP_R=%s\n", chap_r);
258 chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
259
260 tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
261 if (IS_ERR(tfm)) {
262 pr_err("Unable to allocate struct crypto_hash\n");
263 goto out;
264 }
265 desc.tfm = tfm;
266 desc.flags = 0;
267
268 ret = crypto_hash_init(&desc);
269 if (ret < 0) {
270 pr_err("crypto_hash_init() failed\n");
271 crypto_free_hash(tfm);
272 goto out;
273 }
274
275 sg_init_one(&sg, (void *)&chap->id, 1);
276 ret = crypto_hash_update(&desc, &sg, 1);
277 if (ret < 0) {
278 pr_err("crypto_hash_update() failed for id\n");
279 crypto_free_hash(tfm);
280 goto out;
281 }
282
283 sg_init_one(&sg, (void *)&auth->password, strlen(auth->password));
284 ret = crypto_hash_update(&desc, &sg, strlen(auth->password));
285 if (ret < 0) {
286 pr_err("crypto_hash_update() failed for password\n");
287 crypto_free_hash(tfm);
288 goto out;
289 }
290
291 sg_init_one(&sg, (void *)chap->challenge, CHAP_CHALLENGE_LENGTH);
292 ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH);
293 if (ret < 0) {
294 pr_err("crypto_hash_update() failed for challenge\n");
295 crypto_free_hash(tfm);
296 goto out;
297 }
298
299 ret = crypto_hash_final(&desc, server_digest);
300 if (ret < 0) {
301 pr_err("crypto_hash_final() failed for server digest\n");
302 crypto_free_hash(tfm);
303 goto out;
304 }
305 crypto_free_hash(tfm);
306
307 chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
308 pr_debug("[server] MD5 Server Digest: %s\n", response);
309
310 if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
311 pr_debug("[server] MD5 Digests do not match!\n\n");
312 goto out;
313 } else
314 pr_debug("[server] MD5 Digests match, CHAP connetication"
315 " successful.\n\n");
316 /*
317 * One way authentication has succeeded, return now if mutual
318 * authentication is not enabled.
319 */
320 if (!auth->authenticate_target) {
321 kfree(challenge);
322 kfree(challenge_binhex);
323 return 0;
324 }
325 /*
326 * Get CHAP_I.
327 */
328 if (extract_param(nr_in_ptr, "CHAP_I", 10, identifier, &type) < 0) {
329 pr_err("Could not find CHAP_I.\n");
330 goto out;
331 }
332
333 if (type == HEX)
334 id = (unsigned char)simple_strtoul((char *)&identifier[2],
335 &endptr, 0);
336 else
337 id = (unsigned char)simple_strtoul(identifier, &endptr, 0);
338 /*
339 * RFC 1994 says Identifier is no more than octet (8 bits).
340 */
341 pr_debug("[server] Got CHAP_I=%d\n", id);
342 /*
343 * Get CHAP_C.
344 */
345 if (extract_param(nr_in_ptr, "CHAP_C", CHAP_CHALLENGE_STR_LEN,
346 challenge, &type) < 0) {
347 pr_err("Could not find CHAP_C.\n");
348 goto out;
349 }
350
351 if (type != HEX) {
352 pr_err("Could not find CHAP_C.\n");
353 goto out;
354 }
355 pr_debug("[server] Got CHAP_C=%s\n", challenge);
356 challenge_len = chap_string_to_hex(challenge_binhex, challenge,
357 strlen(challenge));
358 if (!challenge_len) {
359 pr_err("Unable to convert incoming challenge\n");
360 goto out;
361 }
362 /*
363 * Generate CHAP_N and CHAP_R for mutual authentication.
364 */
365 tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
366 if (IS_ERR(tfm)) {
367 pr_err("Unable to allocate struct crypto_hash\n");
368 goto out;
369 }
370 desc.tfm = tfm;
371 desc.flags = 0;
372
373 ret = crypto_hash_init(&desc);
374 if (ret < 0) {
375 pr_err("crypto_hash_init() failed\n");
376 crypto_free_hash(tfm);
377 goto out;
378 }
379
380 sg_init_one(&sg, (void *)&id, 1);
381 ret = crypto_hash_update(&desc, &sg, 1);
382 if (ret < 0) {
383 pr_err("crypto_hash_update() failed for id\n");
384 crypto_free_hash(tfm);
385 goto out;
386 }
387
388 sg_init_one(&sg, (void *)auth->password_mutual,
389 strlen(auth->password_mutual));
390 ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual));
391 if (ret < 0) {
392 pr_err("crypto_hash_update() failed for"
393 " password_mutual\n");
394 crypto_free_hash(tfm);
395 goto out;
396 }
397 /*
398 * Convert received challenge to binary hex.
399 */
400 sg_init_one(&sg, (void *)challenge_binhex, challenge_len);
401 ret = crypto_hash_update(&desc, &sg, challenge_len);
402 if (ret < 0) {
403 pr_err("crypto_hash_update() failed for ma challenge\n");
404 crypto_free_hash(tfm);
405 goto out;
406 }
407
408 ret = crypto_hash_final(&desc, digest);
409 if (ret < 0) {
410 pr_err("crypto_hash_final() failed for ma digest\n");
411 crypto_free_hash(tfm);
412 goto out;
413 }
414 crypto_free_hash(tfm);
415 /*
416 * Generate CHAP_N and CHAP_R.
417 */
418 *nr_out_len = sprintf(nr_out_ptr, "CHAP_N=%s", auth->userid_mutual);
419 *nr_out_len += 1;
420 pr_debug("[server] Sending CHAP_N=%s\n", auth->userid_mutual);
421 /*
422 * Convert response from binary hex to ascii hext.
423 */
424 chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
425 *nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
426 response);
427 *nr_out_len += 1;
428 pr_debug("[server] Sending CHAP_R=0x%s\n", response);
429 auth_ret = 0;
430out:
431 kfree(challenge);
432 kfree(challenge_binhex);
433 return auth_ret;
434}
435
436static int chap_got_response(
437 struct iscsi_conn *conn,
438 struct iscsi_node_auth *auth,
439 char *nr_in_ptr,
440 char *nr_out_ptr,
441 unsigned int *nr_out_len)
442{
443 struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
444
445 switch (chap->digest_type) {
446 case CHAP_DIGEST_MD5:
447 if (chap_server_compute_md5(conn, auth, nr_in_ptr,
448 nr_out_ptr, nr_out_len) < 0)
449 return -1;
450 return 0;
451 default:
452 pr_err("Unknown CHAP digest type %d!\n",
453 chap->digest_type);
454 return -1;
455 }
456}
457
458u32 chap_main_loop(
459 struct iscsi_conn *conn,
460 struct iscsi_node_auth *auth,
461 char *in_text,
462 char *out_text,
463 int *in_len,
464 int *out_len)
465{
466 struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
467
468 if (!chap) {
469 chap = chap_server_open(conn, auth, in_text, out_text, out_len);
470 if (!chap)
471 return 2;
472 chap->chap_state = CHAP_STAGE_SERVER_AIC;
473 return 0;
474 } else if (chap->chap_state == CHAP_STAGE_SERVER_AIC) {
475 convert_null_to_semi(in_text, *in_len);
476 if (chap_got_response(conn, auth, in_text, out_text,
477 out_len) < 0) {
478 chap_close(conn);
479 return 2;
480 }
481 if (auth->authenticate_target)
482 chap->chap_state = CHAP_STAGE_SERVER_NR;
483 else
484 *out_len = 0;
485 chap_close(conn);
486 return 1;
487 }
488
489 return 2;
490}
diff --git a/drivers/target/iscsi/iscsi_target_auth.h b/drivers/target/iscsi/iscsi_target_auth.h
new file mode 100644
index 000000000000..2f463c09626d
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_auth.h
@@ -0,0 +1,31 @@
1#ifndef _ISCSI_CHAP_H_
2#define _ISCSI_CHAP_H_
3
4#define CHAP_DIGEST_MD5 5
5#define CHAP_DIGEST_SHA 6
6
7#define CHAP_CHALLENGE_LENGTH 16
8#define CHAP_CHALLENGE_STR_LEN 4096
9#define MAX_RESPONSE_LENGTH 64 /* sufficient for MD5 */
10#define MAX_CHAP_N_SIZE 512
11
12#define MD5_SIGNATURE_SIZE 16 /* 16 bytes in a MD5 message digest */
13
14#define CHAP_STAGE_CLIENT_A 1
15#define CHAP_STAGE_SERVER_AIC 2
16#define CHAP_STAGE_CLIENT_NR 3
17#define CHAP_STAGE_CLIENT_NRIC 4
18#define CHAP_STAGE_SERVER_NR 5
19
20extern u32 chap_main_loop(struct iscsi_conn *, struct iscsi_node_auth *, char *, char *,
21 int *, int *);
22
23struct iscsi_chap {
24 unsigned char digest_type;
25 unsigned char id;
26 unsigned char challenge[CHAP_CHALLENGE_LENGTH];
27 unsigned int authenticate_target;
28 unsigned int chap_state;
29} ____cacheline_aligned;
30
31#endif /*** _ISCSI_CHAP_H_ ***/
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
new file mode 100644
index 000000000000..32bb92c44450
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -0,0 +1,1882 @@
1/*******************************************************************************
2 * This file contains the configfs implementation for iSCSI Target mode
3 * from the LIO-Target Project.
4 *
5 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 *
9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 ****************************************************************************/
21
22#include <linux/configfs.h>
23#include <target/target_core_base.h>
24#include <target/target_core_transport.h>
25#include <target/target_core_fabric_ops.h>
26#include <target/target_core_fabric_configfs.h>
27#include <target/target_core_fabric_lib.h>
28#include <target/target_core_device.h>
29#include <target/target_core_tpg.h>
30#include <target/target_core_configfs.h>
31#include <target/configfs_macros.h>
32
33#include "iscsi_target_core.h"
34#include "iscsi_target_parameters.h"
35#include "iscsi_target_device.h"
36#include "iscsi_target_erl0.h"
37#include "iscsi_target_nodeattrib.h"
38#include "iscsi_target_tpg.h"
39#include "iscsi_target_util.h"
40#include "iscsi_target.h"
41#include "iscsi_target_stat.h"
42#include "iscsi_target_configfs.h"
43
44struct target_fabric_configfs *lio_target_fabric_configfs;
45
46struct lio_target_configfs_attribute {
47 struct configfs_attribute attr;
48 ssize_t (*show)(void *, char *);
49 ssize_t (*store)(void *, const char *, size_t);
50};
51
52struct iscsi_portal_group *lio_get_tpg_from_tpg_item(
53 struct config_item *item,
54 struct iscsi_tiqn **tiqn_out)
55{
56 struct se_portal_group *se_tpg = container_of(to_config_group(item),
57 struct se_portal_group, tpg_group);
58 struct iscsi_portal_group *tpg =
59 (struct iscsi_portal_group *)se_tpg->se_tpg_fabric_ptr;
60 int ret;
61
62 if (!tpg) {
63 pr_err("Unable to locate struct iscsi_portal_group "
64 "pointer\n");
65 return NULL;
66 }
67 ret = iscsit_get_tpg(tpg);
68 if (ret < 0)
69 return NULL;
70
71 *tiqn_out = tpg->tpg_tiqn;
72 return tpg;
73}
74
75/* Start items for lio_target_portal_cit */
76
77static ssize_t lio_target_np_show_sctp(
78 struct se_tpg_np *se_tpg_np,
79 char *page)
80{
81 struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
82 struct iscsi_tpg_np, se_tpg_np);
83 struct iscsi_tpg_np *tpg_np_sctp;
84 ssize_t rb;
85
86 tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
87 if (tpg_np_sctp)
88 rb = sprintf(page, "1\n");
89 else
90 rb = sprintf(page, "0\n");
91
92 return rb;
93}
94
95static ssize_t lio_target_np_store_sctp(
96 struct se_tpg_np *se_tpg_np,
97 const char *page,
98 size_t count)
99{
100 struct iscsi_np *np;
101 struct iscsi_portal_group *tpg;
102 struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
103 struct iscsi_tpg_np, se_tpg_np);
104 struct iscsi_tpg_np *tpg_np_sctp = NULL;
105 char *endptr;
106 u32 op;
107 int ret;
108
109 op = simple_strtoul(page, &endptr, 0);
110 if ((op != 1) && (op != 0)) {
111 pr_err("Illegal value for tpg_enable: %u\n", op);
112 return -EINVAL;
113 }
114 np = tpg_np->tpg_np;
115 if (!np) {
116 pr_err("Unable to locate struct iscsi_np from"
117 " struct iscsi_tpg_np\n");
118 return -EINVAL;
119 }
120
121 tpg = tpg_np->tpg;
122 if (iscsit_get_tpg(tpg) < 0)
123 return -EINVAL;
124
125 if (op) {
126 /*
127 * Use existing np->np_sockaddr for SCTP network portal reference
128 */
129 tpg_np_sctp = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
130 np->np_ip, tpg_np, ISCSI_SCTP_TCP);
131 if (!tpg_np_sctp || IS_ERR(tpg_np_sctp))
132 goto out;
133 } else {
134 tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
135 if (!tpg_np_sctp)
136 goto out;
137
138 ret = iscsit_tpg_del_network_portal(tpg, tpg_np_sctp);
139 if (ret < 0)
140 goto out;
141 }
142
143 iscsit_put_tpg(tpg);
144 return count;
145out:
146 iscsit_put_tpg(tpg);
147 return -EINVAL;
148}
149
150TF_NP_BASE_ATTR(lio_target, sctp, S_IRUGO | S_IWUSR);
151
152static struct configfs_attribute *lio_target_portal_attrs[] = {
153 &lio_target_np_sctp.attr,
154 NULL,
155};
156
157/* Stop items for lio_target_portal_cit */
158
159/* Start items for lio_target_np_cit */
160
161#define MAX_PORTAL_LEN 256
162
163struct se_tpg_np *lio_target_call_addnptotpg(
164 struct se_portal_group *se_tpg,
165 struct config_group *group,
166 const char *name)
167{
168 struct iscsi_portal_group *tpg;
169 struct iscsi_tpg_np *tpg_np;
170 char *str, *str2, *ip_str, *port_str;
171 struct __kernel_sockaddr_storage sockaddr;
172 struct sockaddr_in *sock_in;
173 struct sockaddr_in6 *sock_in6;
174 unsigned long port;
175 int ret;
176 char buf[MAX_PORTAL_LEN + 1];
177
178 if (strlen(name) > MAX_PORTAL_LEN) {
179 pr_err("strlen(name): %d exceeds MAX_PORTAL_LEN: %d\n",
180 (int)strlen(name), MAX_PORTAL_LEN);
181 return ERR_PTR(-EOVERFLOW);
182 }
183 memset(buf, 0, MAX_PORTAL_LEN + 1);
184 snprintf(buf, MAX_PORTAL_LEN, "%s", name);
185
186 memset(&sockaddr, 0, sizeof(struct __kernel_sockaddr_storage));
187
188 str = strstr(buf, "[");
189 if (str) {
190 const char *end;
191
192 str2 = strstr(str, "]");
193 if (!str2) {
194 pr_err("Unable to locate trailing \"]\""
195 " in IPv6 iSCSI network portal address\n");
196 return ERR_PTR(-EINVAL);
197 }
198 str++; /* Skip over leading "[" */
199 *str2 = '\0'; /* Terminate the IPv6 address */
200 str2++; /* Skip over the "]" */
201 port_str = strstr(str2, ":");
202 if (!port_str) {
203 pr_err("Unable to locate \":port\""
204 " in IPv6 iSCSI network portal address\n");
205 return ERR_PTR(-EINVAL);
206 }
207 *port_str = '\0'; /* Terminate string for IP */
208 port_str++; /* Skip over ":" */
209
210 ret = strict_strtoul(port_str, 0, &port);
211 if (ret < 0) {
212 pr_err("strict_strtoul() failed for port_str: %d\n", ret);
213 return ERR_PTR(ret);
214 }
215 sock_in6 = (struct sockaddr_in6 *)&sockaddr;
216 sock_in6->sin6_family = AF_INET6;
217 sock_in6->sin6_port = htons((unsigned short)port);
218 ret = in6_pton(str, IPV6_ADDRESS_SPACE,
219 (void *)&sock_in6->sin6_addr.in6_u, -1, &end);
220 if (ret <= 0) {
221 pr_err("in6_pton returned: %d\n", ret);
222 return ERR_PTR(-EINVAL);
223 }
224 } else {
225 str = ip_str = &buf[0];
226 port_str = strstr(ip_str, ":");
227 if (!port_str) {
228 pr_err("Unable to locate \":port\""
229 " in IPv4 iSCSI network portal address\n");
230 return ERR_PTR(-EINVAL);
231 }
232 *port_str = '\0'; /* Terminate string for IP */
233 port_str++; /* Skip over ":" */
234
235 ret = strict_strtoul(port_str, 0, &port);
236 if (ret < 0) {
237 pr_err("strict_strtoul() failed for port_str: %d\n", ret);
238 return ERR_PTR(ret);
239 }
240 sock_in = (struct sockaddr_in *)&sockaddr;
241 sock_in->sin_family = AF_INET;
242 sock_in->sin_port = htons((unsigned short)port);
243 sock_in->sin_addr.s_addr = in_aton(ip_str);
244 }
245 tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
246 ret = iscsit_get_tpg(tpg);
247 if (ret < 0)
248 return ERR_PTR(-EINVAL);
249
250 pr_debug("LIO_Target_ConfigFS: REGISTER -> %s TPGT: %hu"
251 " PORTAL: %s\n",
252 config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
253 tpg->tpgt, name);
254 /*
255 * Assume ISCSI_TCP by default. Other network portals for other
256 * iSCSI fabrics:
257 *
258 * Traditional iSCSI over SCTP (initial support)
259 * iSER/TCP (TODO, hardware available)
260 * iSER/SCTP (TODO, software emulation with osc-iwarp)
261 * iSER/IB (TODO, hardware available)
262 *
263 * can be enabled with atributes under
264 * sys/kernel/config/iscsi/$IQN/$TPG/np/$IP:$PORT/
265 *
266 */
267 tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, str, NULL,
268 ISCSI_TCP);
269 if (IS_ERR(tpg_np)) {
270 iscsit_put_tpg(tpg);
271 return ERR_PTR(PTR_ERR(tpg_np));
272 }
273 pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n");
274
275 iscsit_put_tpg(tpg);
276 return &tpg_np->se_tpg_np;
277}
278
279static void lio_target_call_delnpfromtpg(
280 struct se_tpg_np *se_tpg_np)
281{
282 struct iscsi_portal_group *tpg;
283 struct iscsi_tpg_np *tpg_np;
284 struct se_portal_group *se_tpg;
285 int ret;
286
287 tpg_np = container_of(se_tpg_np, struct iscsi_tpg_np, se_tpg_np);
288 tpg = tpg_np->tpg;
289 ret = iscsit_get_tpg(tpg);
290 if (ret < 0)
291 return;
292
293 se_tpg = &tpg->tpg_se_tpg;
294 pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s TPGT: %hu"
295 " PORTAL: %s:%hu\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
296 tpg->tpgt, tpg_np->tpg_np->np_ip, tpg_np->tpg_np->np_port);
297
298 ret = iscsit_tpg_del_network_portal(tpg, tpg_np);
299 if (ret < 0)
300 goto out;
301
302 pr_debug("LIO_Target_ConfigFS: delnpfromtpg done!\n");
303out:
304 iscsit_put_tpg(tpg);
305}
306
307/* End items for lio_target_np_cit */
308
309/* Start items for lio_target_nacl_attrib_cit */
310
311#define DEF_NACL_ATTRIB(name) \
312static ssize_t iscsi_nacl_attrib_show_##name( \
313 struct se_node_acl *se_nacl, \
314 char *page) \
315{ \
316 struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
317 se_node_acl); \
318 \
319 return sprintf(page, "%u\n", ISCSI_NODE_ATTRIB(nacl)->name); \
320} \
321 \
322static ssize_t iscsi_nacl_attrib_store_##name( \
323 struct se_node_acl *se_nacl, \
324 const char *page, \
325 size_t count) \
326{ \
327 struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
328 se_node_acl); \
329 char *endptr; \
330 u32 val; \
331 int ret; \
332 \
333 val = simple_strtoul(page, &endptr, 0); \
334 ret = iscsit_na_##name(nacl, val); \
335 if (ret < 0) \
336 return ret; \
337 \
338 return count; \
339}
340
341#define NACL_ATTR(_name, _mode) TF_NACL_ATTRIB_ATTR(iscsi, _name, _mode);
342/*
343 * Define iscsi_node_attrib_s_dataout_timeout
344 */
345DEF_NACL_ATTRIB(dataout_timeout);
346NACL_ATTR(dataout_timeout, S_IRUGO | S_IWUSR);
347/*
348 * Define iscsi_node_attrib_s_dataout_timeout_retries
349 */
350DEF_NACL_ATTRIB(dataout_timeout_retries);
351NACL_ATTR(dataout_timeout_retries, S_IRUGO | S_IWUSR);
352/*
353 * Define iscsi_node_attrib_s_default_erl
354 */
355DEF_NACL_ATTRIB(default_erl);
356NACL_ATTR(default_erl, S_IRUGO | S_IWUSR);
357/*
358 * Define iscsi_node_attrib_s_nopin_timeout
359 */
360DEF_NACL_ATTRIB(nopin_timeout);
361NACL_ATTR(nopin_timeout, S_IRUGO | S_IWUSR);
362/*
363 * Define iscsi_node_attrib_s_nopin_response_timeout
364 */
365DEF_NACL_ATTRIB(nopin_response_timeout);
366NACL_ATTR(nopin_response_timeout, S_IRUGO | S_IWUSR);
367/*
368 * Define iscsi_node_attrib_s_random_datain_pdu_offsets
369 */
370DEF_NACL_ATTRIB(random_datain_pdu_offsets);
371NACL_ATTR(random_datain_pdu_offsets, S_IRUGO | S_IWUSR);
372/*
373 * Define iscsi_node_attrib_s_random_datain_seq_offsets
374 */
375DEF_NACL_ATTRIB(random_datain_seq_offsets);
376NACL_ATTR(random_datain_seq_offsets, S_IRUGO | S_IWUSR);
377/*
378 * Define iscsi_node_attrib_s_random_r2t_offsets
379 */
380DEF_NACL_ATTRIB(random_r2t_offsets);
381NACL_ATTR(random_r2t_offsets, S_IRUGO | S_IWUSR);
382
383static struct configfs_attribute *lio_target_nacl_attrib_attrs[] = {
384 &iscsi_nacl_attrib_dataout_timeout.attr,
385 &iscsi_nacl_attrib_dataout_timeout_retries.attr,
386 &iscsi_nacl_attrib_default_erl.attr,
387 &iscsi_nacl_attrib_nopin_timeout.attr,
388 &iscsi_nacl_attrib_nopin_response_timeout.attr,
389 &iscsi_nacl_attrib_random_datain_pdu_offsets.attr,
390 &iscsi_nacl_attrib_random_datain_seq_offsets.attr,
391 &iscsi_nacl_attrib_random_r2t_offsets.attr,
392 NULL,
393};
394
395/* End items for lio_target_nacl_attrib_cit */
396
397/* Start items for lio_target_nacl_auth_cit */
398
399#define __DEF_NACL_AUTH_STR(prefix, name, flags) \
400static ssize_t __iscsi_##prefix##_show_##name( \
401 struct iscsi_node_acl *nacl, \
402 char *page) \
403{ \
404 struct iscsi_node_auth *auth = &nacl->node_auth; \
405 \
406 if (!capable(CAP_SYS_ADMIN)) \
407 return -EPERM; \
408 return snprintf(page, PAGE_SIZE, "%s\n", auth->name); \
409} \
410 \
411static ssize_t __iscsi_##prefix##_store_##name( \
412 struct iscsi_node_acl *nacl, \
413 const char *page, \
414 size_t count) \
415{ \
416 struct iscsi_node_auth *auth = &nacl->node_auth; \
417 \
418 if (!capable(CAP_SYS_ADMIN)) \
419 return -EPERM; \
420 \
421 snprintf(auth->name, PAGE_SIZE, "%s", page); \
422 if (!strncmp("NULL", auth->name, 4)) \
423 auth->naf_flags &= ~flags; \
424 else \
425 auth->naf_flags |= flags; \
426 \
427 if ((auth->naf_flags & NAF_USERID_IN_SET) && \
428 (auth->naf_flags & NAF_PASSWORD_IN_SET)) \
429 auth->authenticate_target = 1; \
430 else \
431 auth->authenticate_target = 0; \
432 \
433 return count; \
434}
435
436#define __DEF_NACL_AUTH_INT(prefix, name) \
437static ssize_t __iscsi_##prefix##_show_##name( \
438 struct iscsi_node_acl *nacl, \
439 char *page) \
440{ \
441 struct iscsi_node_auth *auth = &nacl->node_auth; \
442 \
443 if (!capable(CAP_SYS_ADMIN)) \
444 return -EPERM; \
445 \
446 return snprintf(page, PAGE_SIZE, "%d\n", auth->name); \
447}
448
449#define DEF_NACL_AUTH_STR(name, flags) \
450 __DEF_NACL_AUTH_STR(nacl_auth, name, flags) \
451static ssize_t iscsi_nacl_auth_show_##name( \
452 struct se_node_acl *nacl, \
453 char *page) \
454{ \
455 return __iscsi_nacl_auth_show_##name(container_of(nacl, \
456 struct iscsi_node_acl, se_node_acl), page); \
457} \
458static ssize_t iscsi_nacl_auth_store_##name( \
459 struct se_node_acl *nacl, \
460 const char *page, \
461 size_t count) \
462{ \
463 return __iscsi_nacl_auth_store_##name(container_of(nacl, \
464 struct iscsi_node_acl, se_node_acl), page, count); \
465}
466
467#define DEF_NACL_AUTH_INT(name) \
468 __DEF_NACL_AUTH_INT(nacl_auth, name) \
469static ssize_t iscsi_nacl_auth_show_##name( \
470 struct se_node_acl *nacl, \
471 char *page) \
472{ \
473 return __iscsi_nacl_auth_show_##name(container_of(nacl, \
474 struct iscsi_node_acl, se_node_acl), page); \
475}
476
477#define AUTH_ATTR(_name, _mode) TF_NACL_AUTH_ATTR(iscsi, _name, _mode);
478#define AUTH_ATTR_RO(_name) TF_NACL_AUTH_ATTR_RO(iscsi, _name);
479
480/*
481 * One-way authentication userid
482 */
483DEF_NACL_AUTH_STR(userid, NAF_USERID_SET);
484AUTH_ATTR(userid, S_IRUGO | S_IWUSR);
485/*
486 * One-way authentication password
487 */
488DEF_NACL_AUTH_STR(password, NAF_PASSWORD_SET);
489AUTH_ATTR(password, S_IRUGO | S_IWUSR);
490/*
491 * Enforce mutual authentication
492 */
493DEF_NACL_AUTH_INT(authenticate_target);
494AUTH_ATTR_RO(authenticate_target);
495/*
496 * Mutual authentication userid
497 */
498DEF_NACL_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
499AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR);
500/*
501 * Mutual authentication password
502 */
503DEF_NACL_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
504AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR);
505
506static struct configfs_attribute *lio_target_nacl_auth_attrs[] = {
507 &iscsi_nacl_auth_userid.attr,
508 &iscsi_nacl_auth_password.attr,
509 &iscsi_nacl_auth_authenticate_target.attr,
510 &iscsi_nacl_auth_userid_mutual.attr,
511 &iscsi_nacl_auth_password_mutual.attr,
512 NULL,
513};
514
515/* End items for lio_target_nacl_auth_cit */
516
517/* Start items for lio_target_nacl_param_cit */
518
519#define DEF_NACL_PARAM(name) \
520static ssize_t iscsi_nacl_param_show_##name( \
521 struct se_node_acl *se_nacl, \
522 char *page) \
523{ \
524 struct iscsi_session *sess; \
525 struct se_session *se_sess; \
526 ssize_t rb; \
527 \
528 spin_lock_bh(&se_nacl->nacl_sess_lock); \
529 se_sess = se_nacl->nacl_sess; \
530 if (!se_sess) { \
531 rb = snprintf(page, PAGE_SIZE, \
532 "No Active iSCSI Session\n"); \
533 } else { \
534 sess = se_sess->fabric_sess_ptr; \
535 rb = snprintf(page, PAGE_SIZE, "%u\n", \
536 (u32)sess->sess_ops->name); \
537 } \
538 spin_unlock_bh(&se_nacl->nacl_sess_lock); \
539 \
540 return rb; \
541}
542
543#define NACL_PARAM_ATTR(_name) TF_NACL_PARAM_ATTR_RO(iscsi, _name);
544
545DEF_NACL_PARAM(MaxConnections);
546NACL_PARAM_ATTR(MaxConnections);
547
548DEF_NACL_PARAM(InitialR2T);
549NACL_PARAM_ATTR(InitialR2T);
550
551DEF_NACL_PARAM(ImmediateData);
552NACL_PARAM_ATTR(ImmediateData);
553
554DEF_NACL_PARAM(MaxBurstLength);
555NACL_PARAM_ATTR(MaxBurstLength);
556
557DEF_NACL_PARAM(FirstBurstLength);
558NACL_PARAM_ATTR(FirstBurstLength);
559
560DEF_NACL_PARAM(DefaultTime2Wait);
561NACL_PARAM_ATTR(DefaultTime2Wait);
562
563DEF_NACL_PARAM(DefaultTime2Retain);
564NACL_PARAM_ATTR(DefaultTime2Retain);
565
566DEF_NACL_PARAM(MaxOutstandingR2T);
567NACL_PARAM_ATTR(MaxOutstandingR2T);
568
569DEF_NACL_PARAM(DataPDUInOrder);
570NACL_PARAM_ATTR(DataPDUInOrder);
571
572DEF_NACL_PARAM(DataSequenceInOrder);
573NACL_PARAM_ATTR(DataSequenceInOrder);
574
575DEF_NACL_PARAM(ErrorRecoveryLevel);
576NACL_PARAM_ATTR(ErrorRecoveryLevel);
577
578static struct configfs_attribute *lio_target_nacl_param_attrs[] = {
579 &iscsi_nacl_param_MaxConnections.attr,
580 &iscsi_nacl_param_InitialR2T.attr,
581 &iscsi_nacl_param_ImmediateData.attr,
582 &iscsi_nacl_param_MaxBurstLength.attr,
583 &iscsi_nacl_param_FirstBurstLength.attr,
584 &iscsi_nacl_param_DefaultTime2Wait.attr,
585 &iscsi_nacl_param_DefaultTime2Retain.attr,
586 &iscsi_nacl_param_MaxOutstandingR2T.attr,
587 &iscsi_nacl_param_DataPDUInOrder.attr,
588 &iscsi_nacl_param_DataSequenceInOrder.attr,
589 &iscsi_nacl_param_ErrorRecoveryLevel.attr,
590 NULL,
591};
592
593/* End items for lio_target_nacl_param_cit */
594
595/* Start items for lio_target_acl_cit */
596
597static ssize_t lio_target_nacl_show_info(
598 struct se_node_acl *se_nacl,
599 char *page)
600{
601 struct iscsi_session *sess;
602 struct iscsi_conn *conn;
603 struct se_session *se_sess;
604 ssize_t rb = 0;
605
606 spin_lock_bh(&se_nacl->nacl_sess_lock);
607 se_sess = se_nacl->nacl_sess;
608 if (!se_sess) {
609 rb += sprintf(page+rb, "No active iSCSI Session for Initiator"
610 " Endpoint: %s\n", se_nacl->initiatorname);
611 } else {
612 sess = se_sess->fabric_sess_ptr;
613
614 if (sess->sess_ops->InitiatorName)
615 rb += sprintf(page+rb, "InitiatorName: %s\n",
616 sess->sess_ops->InitiatorName);
617 if (sess->sess_ops->InitiatorAlias)
618 rb += sprintf(page+rb, "InitiatorAlias: %s\n",
619 sess->sess_ops->InitiatorAlias);
620
621 rb += sprintf(page+rb, "LIO Session ID: %u "
622 "ISID: 0x%02x %02x %02x %02x %02x %02x "
623 "TSIH: %hu ", sess->sid,
624 sess->isid[0], sess->isid[1], sess->isid[2],
625 sess->isid[3], sess->isid[4], sess->isid[5],
626 sess->tsih);
627 rb += sprintf(page+rb, "SessionType: %s\n",
628 (sess->sess_ops->SessionType) ?
629 "Discovery" : "Normal");
630 rb += sprintf(page+rb, "Session State: ");
631 switch (sess->session_state) {
632 case TARG_SESS_STATE_FREE:
633 rb += sprintf(page+rb, "TARG_SESS_FREE\n");
634 break;
635 case TARG_SESS_STATE_ACTIVE:
636 rb += sprintf(page+rb, "TARG_SESS_STATE_ACTIVE\n");
637 break;
638 case TARG_SESS_STATE_LOGGED_IN:
639 rb += sprintf(page+rb, "TARG_SESS_STATE_LOGGED_IN\n");
640 break;
641 case TARG_SESS_STATE_FAILED:
642 rb += sprintf(page+rb, "TARG_SESS_STATE_FAILED\n");
643 break;
644 case TARG_SESS_STATE_IN_CONTINUE:
645 rb += sprintf(page+rb, "TARG_SESS_STATE_IN_CONTINUE\n");
646 break;
647 default:
648 rb += sprintf(page+rb, "ERROR: Unknown Session"
649 " State!\n");
650 break;
651 }
652
653 rb += sprintf(page+rb, "---------------------[iSCSI Session"
654 " Values]-----------------------\n");
655 rb += sprintf(page+rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN"
656 " : MaxCmdSN : ITT : TTT\n");
657 rb += sprintf(page+rb, " 0x%08x 0x%08x 0x%08x 0x%08x"
658 " 0x%08x 0x%08x\n",
659 sess->cmdsn_window,
660 (sess->max_cmd_sn - sess->exp_cmd_sn) + 1,
661 sess->exp_cmd_sn, sess->max_cmd_sn,
662 sess->init_task_tag, sess->targ_xfer_tag);
663 rb += sprintf(page+rb, "----------------------[iSCSI"
664 " Connections]-------------------------\n");
665
666 spin_lock(&sess->conn_lock);
667 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
668 rb += sprintf(page+rb, "CID: %hu Connection"
669 " State: ", conn->cid);
670 switch (conn->conn_state) {
671 case TARG_CONN_STATE_FREE:
672 rb += sprintf(page+rb,
673 "TARG_CONN_STATE_FREE\n");
674 break;
675 case TARG_CONN_STATE_XPT_UP:
676 rb += sprintf(page+rb,
677 "TARG_CONN_STATE_XPT_UP\n");
678 break;
679 case TARG_CONN_STATE_IN_LOGIN:
680 rb += sprintf(page+rb,
681 "TARG_CONN_STATE_IN_LOGIN\n");
682 break;
683 case TARG_CONN_STATE_LOGGED_IN:
684 rb += sprintf(page+rb,
685 "TARG_CONN_STATE_LOGGED_IN\n");
686 break;
687 case TARG_CONN_STATE_IN_LOGOUT:
688 rb += sprintf(page+rb,
689 "TARG_CONN_STATE_IN_LOGOUT\n");
690 break;
691 case TARG_CONN_STATE_LOGOUT_REQUESTED:
692 rb += sprintf(page+rb,
693 "TARG_CONN_STATE_LOGOUT_REQUESTED\n");
694 break;
695 case TARG_CONN_STATE_CLEANUP_WAIT:
696 rb += sprintf(page+rb,
697 "TARG_CONN_STATE_CLEANUP_WAIT\n");
698 break;
699 default:
700 rb += sprintf(page+rb,
701 "ERROR: Unknown Connection State!\n");
702 break;
703 }
704
705 rb += sprintf(page+rb, " Address %s %s", conn->login_ip,
706 (conn->network_transport == ISCSI_TCP) ?
707 "TCP" : "SCTP");
708 rb += sprintf(page+rb, " StatSN: 0x%08x\n",
709 conn->stat_sn);
710 }
711 spin_unlock(&sess->conn_lock);
712 }
713 spin_unlock_bh(&se_nacl->nacl_sess_lock);
714
715 return rb;
716}
717
718TF_NACL_BASE_ATTR_RO(lio_target, info);
719
720static ssize_t lio_target_nacl_show_cmdsn_depth(
721 struct se_node_acl *se_nacl,
722 char *page)
723{
724 return sprintf(page, "%u\n", se_nacl->queue_depth);
725}
726
727static ssize_t lio_target_nacl_store_cmdsn_depth(
728 struct se_node_acl *se_nacl,
729 const char *page,
730 size_t count)
731{
732 struct se_portal_group *se_tpg = se_nacl->se_tpg;
733 struct iscsi_portal_group *tpg = container_of(se_tpg,
734 struct iscsi_portal_group, tpg_se_tpg);
735 struct config_item *acl_ci, *tpg_ci, *wwn_ci;
736 char *endptr;
737 u32 cmdsn_depth = 0;
738 int ret;
739
740 cmdsn_depth = simple_strtoul(page, &endptr, 0);
741 if (cmdsn_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) {
742 pr_err("Passed cmdsn_depth: %u exceeds"
743 " TA_DEFAULT_CMDSN_DEPTH_MAX: %u\n", cmdsn_depth,
744 TA_DEFAULT_CMDSN_DEPTH_MAX);
745 return -EINVAL;
746 }
747 acl_ci = &se_nacl->acl_group.cg_item;
748 if (!acl_ci) {
749 pr_err("Unable to locatel acl_ci\n");
750 return -EINVAL;
751 }
752 tpg_ci = &acl_ci->ci_parent->ci_group->cg_item;
753 if (!tpg_ci) {
754 pr_err("Unable to locate tpg_ci\n");
755 return -EINVAL;
756 }
757 wwn_ci = &tpg_ci->ci_group->cg_item;
758 if (!wwn_ci) {
759 pr_err("Unable to locate config_item wwn_ci\n");
760 return -EINVAL;
761 }
762
763 if (iscsit_get_tpg(tpg) < 0)
764 return -EINVAL;
765 /*
766 * iscsit_tpg_set_initiator_node_queue_depth() assumes force=1
767 */
768 ret = iscsit_tpg_set_initiator_node_queue_depth(tpg,
769 config_item_name(acl_ci), cmdsn_depth, 1);
770
771 pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for"
772 "InitiatorName: %s\n", config_item_name(wwn_ci),
773 config_item_name(tpg_ci), cmdsn_depth,
774 config_item_name(acl_ci));
775
776 iscsit_put_tpg(tpg);
777 return (!ret) ? count : (ssize_t)ret;
778}
779
780TF_NACL_BASE_ATTR(lio_target, cmdsn_depth, S_IRUGO | S_IWUSR);
781
782static struct configfs_attribute *lio_target_initiator_attrs[] = {
783 &lio_target_nacl_info.attr,
784 &lio_target_nacl_cmdsn_depth.attr,
785 NULL,
786};
787
788static struct se_node_acl *lio_tpg_alloc_fabric_acl(
789 struct se_portal_group *se_tpg)
790{
791 struct iscsi_node_acl *acl;
792
793 acl = kzalloc(sizeof(struct iscsi_node_acl), GFP_KERNEL);
794 if (!acl) {
795 pr_err("Unable to allocate memory for struct iscsi_node_acl\n");
796 return NULL;
797 }
798
799 return &acl->se_node_acl;
800}
801
802static struct se_node_acl *lio_target_make_nodeacl(
803 struct se_portal_group *se_tpg,
804 struct config_group *group,
805 const char *name)
806{
807 struct config_group *stats_cg;
808 struct iscsi_node_acl *acl;
809 struct se_node_acl *se_nacl_new, *se_nacl;
810 struct iscsi_portal_group *tpg = container_of(se_tpg,
811 struct iscsi_portal_group, tpg_se_tpg);
812 u32 cmdsn_depth;
813
814 se_nacl_new = lio_tpg_alloc_fabric_acl(se_tpg);
815 if (!se_nacl_new)
816 return ERR_PTR(-ENOMEM);
817
818 acl = container_of(se_nacl_new, struct iscsi_node_acl,
819 se_node_acl);
820
821 cmdsn_depth = ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
822 /*
823 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
824 * when converting a NdoeACL from demo mode -> explict
825 */
826 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
827 name, cmdsn_depth);
828 if (IS_ERR(se_nacl))
829 return se_nacl;
830
831 stats_cg = &acl->se_node_acl.acl_fabric_stat_group;
832
833 stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
834 GFP_KERNEL);
835 if (!stats_cg->default_groups) {
836 pr_err("Unable to allocate memory for"
837 " stats_cg->default_groups\n");
838 core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1);
839 kfree(acl);
840 return ERR_PTR(-ENOMEM);
841 }
842
843 stats_cg->default_groups[0] = &NODE_STAT_GRPS(acl)->iscsi_sess_stats_group;
844 stats_cg->default_groups[1] = NULL;
845 config_group_init_type_name(&NODE_STAT_GRPS(acl)->iscsi_sess_stats_group,
846 "iscsi_sess_stats", &iscsi_stat_sess_cit);
847
848 return se_nacl;
849}
850
851static void lio_target_drop_nodeacl(
852 struct se_node_acl *se_nacl)
853{
854 struct se_portal_group *se_tpg = se_nacl->se_tpg;
855 struct iscsi_node_acl *acl = container_of(se_nacl,
856 struct iscsi_node_acl, se_node_acl);
857 struct config_item *df_item;
858 struct config_group *stats_cg;
859 int i;
860
861 stats_cg = &acl->se_node_acl.acl_fabric_stat_group;
862 for (i = 0; stats_cg->default_groups[i]; i++) {
863 df_item = &stats_cg->default_groups[i]->cg_item;
864 stats_cg->default_groups[i] = NULL;
865 config_item_put(df_item);
866 }
867 kfree(stats_cg->default_groups);
868
869 core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1);
870 kfree(acl);
871}
872
873/* End items for lio_target_acl_cit */
874
875/* Start items for lio_target_tpg_attrib_cit */
876
877#define DEF_TPG_ATTRIB(name) \
878 \
879static ssize_t iscsi_tpg_attrib_show_##name( \
880 struct se_portal_group *se_tpg, \
881 char *page) \
882{ \
883 struct iscsi_portal_group *tpg = container_of(se_tpg, \
884 struct iscsi_portal_group, tpg_se_tpg); \
885 ssize_t rb; \
886 \
887 if (iscsit_get_tpg(tpg) < 0) \
888 return -EINVAL; \
889 \
890 rb = sprintf(page, "%u\n", ISCSI_TPG_ATTRIB(tpg)->name); \
891 iscsit_put_tpg(tpg); \
892 return rb; \
893} \
894 \
895static ssize_t iscsi_tpg_attrib_store_##name( \
896 struct se_portal_group *se_tpg, \
897 const char *page, \
898 size_t count) \
899{ \
900 struct iscsi_portal_group *tpg = container_of(se_tpg, \
901 struct iscsi_portal_group, tpg_se_tpg); \
902 char *endptr; \
903 u32 val; \
904 int ret; \
905 \
906 if (iscsit_get_tpg(tpg) < 0) \
907 return -EINVAL; \
908 \
909 val = simple_strtoul(page, &endptr, 0); \
910 ret = iscsit_ta_##name(tpg, val); \
911 if (ret < 0) \
912 goto out; \
913 \
914 iscsit_put_tpg(tpg); \
915 return count; \
916out: \
917 iscsit_put_tpg(tpg); \
918 return ret; \
919}
920
921#define TPG_ATTR(_name, _mode) TF_TPG_ATTRIB_ATTR(iscsi, _name, _mode);
922
923/*
924 * Define iscsi_tpg_attrib_s_authentication
925 */
926DEF_TPG_ATTRIB(authentication);
927TPG_ATTR(authentication, S_IRUGO | S_IWUSR);
928/*
929 * Define iscsi_tpg_attrib_s_login_timeout
930 */
931DEF_TPG_ATTRIB(login_timeout);
932TPG_ATTR(login_timeout, S_IRUGO | S_IWUSR);
933/*
934 * Define iscsi_tpg_attrib_s_netif_timeout
935 */
936DEF_TPG_ATTRIB(netif_timeout);
937TPG_ATTR(netif_timeout, S_IRUGO | S_IWUSR);
938/*
939 * Define iscsi_tpg_attrib_s_generate_node_acls
940 */
941DEF_TPG_ATTRIB(generate_node_acls);
942TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
943/*
944 * Define iscsi_tpg_attrib_s_default_cmdsn_depth
945 */
946DEF_TPG_ATTRIB(default_cmdsn_depth);
947TPG_ATTR(default_cmdsn_depth, S_IRUGO | S_IWUSR);
948/*
949 Define iscsi_tpg_attrib_s_cache_dynamic_acls
950 */
951DEF_TPG_ATTRIB(cache_dynamic_acls);
952TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
953/*
954 * Define iscsi_tpg_attrib_s_demo_mode_write_protect
955 */
956DEF_TPG_ATTRIB(demo_mode_write_protect);
957TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
958/*
959 * Define iscsi_tpg_attrib_s_prod_mode_write_protect
960 */
961DEF_TPG_ATTRIB(prod_mode_write_protect);
962TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
963
964static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
965 &iscsi_tpg_attrib_authentication.attr,
966 &iscsi_tpg_attrib_login_timeout.attr,
967 &iscsi_tpg_attrib_netif_timeout.attr,
968 &iscsi_tpg_attrib_generate_node_acls.attr,
969 &iscsi_tpg_attrib_default_cmdsn_depth.attr,
970 &iscsi_tpg_attrib_cache_dynamic_acls.attr,
971 &iscsi_tpg_attrib_demo_mode_write_protect.attr,
972 &iscsi_tpg_attrib_prod_mode_write_protect.attr,
973 NULL,
974};
975
976/* End items for lio_target_tpg_attrib_cit */
977
978/* Start items for lio_target_tpg_param_cit */
979
980#define DEF_TPG_PARAM(name) \
981static ssize_t iscsi_tpg_param_show_##name( \
982 struct se_portal_group *se_tpg, \
983 char *page) \
984{ \
985 struct iscsi_portal_group *tpg = container_of(se_tpg, \
986 struct iscsi_portal_group, tpg_se_tpg); \
987 struct iscsi_param *param; \
988 ssize_t rb; \
989 \
990 if (iscsit_get_tpg(tpg) < 0) \
991 return -EINVAL; \
992 \
993 param = iscsi_find_param_from_key(__stringify(name), \
994 tpg->param_list); \
995 if (!param) { \
996 iscsit_put_tpg(tpg); \
997 return -EINVAL; \
998 } \
999 rb = snprintf(page, PAGE_SIZE, "%s\n", param->value); \
1000 \
1001 iscsit_put_tpg(tpg); \
1002 return rb; \
1003} \
1004static ssize_t iscsi_tpg_param_store_##name( \
1005 struct se_portal_group *se_tpg, \
1006 const char *page, \
1007 size_t count) \
1008{ \
1009 struct iscsi_portal_group *tpg = container_of(se_tpg, \
1010 struct iscsi_portal_group, tpg_se_tpg); \
1011 char *buf; \
1012 int ret; \
1013 \
1014 buf = kzalloc(PAGE_SIZE, GFP_KERNEL); \
1015 if (!buf) \
1016 return -ENOMEM; \
1017 snprintf(buf, PAGE_SIZE, "%s=%s", __stringify(name), page); \
1018 buf[strlen(buf)-1] = '\0'; /* Kill newline */ \
1019 \
1020 if (iscsit_get_tpg(tpg) < 0) { \
1021 kfree(buf); \
1022 return -EINVAL; \
1023 } \
1024 \
1025 ret = iscsi_change_param_value(buf, tpg->param_list, 1); \
1026 if (ret < 0) \
1027 goto out; \
1028 \
1029 kfree(buf); \
1030 iscsit_put_tpg(tpg); \
1031 return count; \
1032out: \
1033 kfree(buf); \
1034 iscsit_put_tpg(tpg); \
1035 return -EINVAL; \
1036}
1037
1038#define TPG_PARAM_ATTR(_name, _mode) TF_TPG_PARAM_ATTR(iscsi, _name, _mode);
1039
1040DEF_TPG_PARAM(AuthMethod);
1041TPG_PARAM_ATTR(AuthMethod, S_IRUGO | S_IWUSR);
1042
1043DEF_TPG_PARAM(HeaderDigest);
1044TPG_PARAM_ATTR(HeaderDigest, S_IRUGO | S_IWUSR);
1045
1046DEF_TPG_PARAM(DataDigest);
1047TPG_PARAM_ATTR(DataDigest, S_IRUGO | S_IWUSR);
1048
1049DEF_TPG_PARAM(MaxConnections);
1050TPG_PARAM_ATTR(MaxConnections, S_IRUGO | S_IWUSR);
1051
1052DEF_TPG_PARAM(TargetAlias);
1053TPG_PARAM_ATTR(TargetAlias, S_IRUGO | S_IWUSR);
1054
1055DEF_TPG_PARAM(InitialR2T);
1056TPG_PARAM_ATTR(InitialR2T, S_IRUGO | S_IWUSR);
1057
1058DEF_TPG_PARAM(ImmediateData);
1059TPG_PARAM_ATTR(ImmediateData, S_IRUGO | S_IWUSR);
1060
1061DEF_TPG_PARAM(MaxRecvDataSegmentLength);
1062TPG_PARAM_ATTR(MaxRecvDataSegmentLength, S_IRUGO | S_IWUSR);
1063
1064DEF_TPG_PARAM(MaxBurstLength);
1065TPG_PARAM_ATTR(MaxBurstLength, S_IRUGO | S_IWUSR);
1066
1067DEF_TPG_PARAM(FirstBurstLength);
1068TPG_PARAM_ATTR(FirstBurstLength, S_IRUGO | S_IWUSR);
1069
1070DEF_TPG_PARAM(DefaultTime2Wait);
1071TPG_PARAM_ATTR(DefaultTime2Wait, S_IRUGO | S_IWUSR);
1072
1073DEF_TPG_PARAM(DefaultTime2Retain);
1074TPG_PARAM_ATTR(DefaultTime2Retain, S_IRUGO | S_IWUSR);
1075
1076DEF_TPG_PARAM(MaxOutstandingR2T);
1077TPG_PARAM_ATTR(MaxOutstandingR2T, S_IRUGO | S_IWUSR);
1078
1079DEF_TPG_PARAM(DataPDUInOrder);
1080TPG_PARAM_ATTR(DataPDUInOrder, S_IRUGO | S_IWUSR);
1081
1082DEF_TPG_PARAM(DataSequenceInOrder);
1083TPG_PARAM_ATTR(DataSequenceInOrder, S_IRUGO | S_IWUSR);
1084
1085DEF_TPG_PARAM(ErrorRecoveryLevel);
1086TPG_PARAM_ATTR(ErrorRecoveryLevel, S_IRUGO | S_IWUSR);
1087
1088DEF_TPG_PARAM(IFMarker);
1089TPG_PARAM_ATTR(IFMarker, S_IRUGO | S_IWUSR);
1090
1091DEF_TPG_PARAM(OFMarker);
1092TPG_PARAM_ATTR(OFMarker, S_IRUGO | S_IWUSR);
1093
1094DEF_TPG_PARAM(IFMarkInt);
1095TPG_PARAM_ATTR(IFMarkInt, S_IRUGO | S_IWUSR);
1096
1097DEF_TPG_PARAM(OFMarkInt);
1098TPG_PARAM_ATTR(OFMarkInt, S_IRUGO | S_IWUSR);
1099
1100static struct configfs_attribute *lio_target_tpg_param_attrs[] = {
1101 &iscsi_tpg_param_AuthMethod.attr,
1102 &iscsi_tpg_param_HeaderDigest.attr,
1103 &iscsi_tpg_param_DataDigest.attr,
1104 &iscsi_tpg_param_MaxConnections.attr,
1105 &iscsi_tpg_param_TargetAlias.attr,
1106 &iscsi_tpg_param_InitialR2T.attr,
1107 &iscsi_tpg_param_ImmediateData.attr,
1108 &iscsi_tpg_param_MaxRecvDataSegmentLength.attr,
1109 &iscsi_tpg_param_MaxBurstLength.attr,
1110 &iscsi_tpg_param_FirstBurstLength.attr,
1111 &iscsi_tpg_param_DefaultTime2Wait.attr,
1112 &iscsi_tpg_param_DefaultTime2Retain.attr,
1113 &iscsi_tpg_param_MaxOutstandingR2T.attr,
1114 &iscsi_tpg_param_DataPDUInOrder.attr,
1115 &iscsi_tpg_param_DataSequenceInOrder.attr,
1116 &iscsi_tpg_param_ErrorRecoveryLevel.attr,
1117 &iscsi_tpg_param_IFMarker.attr,
1118 &iscsi_tpg_param_OFMarker.attr,
1119 &iscsi_tpg_param_IFMarkInt.attr,
1120 &iscsi_tpg_param_OFMarkInt.attr,
1121 NULL,
1122};
1123
1124/* End items for lio_target_tpg_param_cit */
1125
1126/* Start items for lio_target_tpg_cit */
1127
1128static ssize_t lio_target_tpg_show_enable(
1129 struct se_portal_group *se_tpg,
1130 char *page)
1131{
1132 struct iscsi_portal_group *tpg = container_of(se_tpg,
1133 struct iscsi_portal_group, tpg_se_tpg);
1134 ssize_t len;
1135
1136 spin_lock(&tpg->tpg_state_lock);
1137 len = sprintf(page, "%d\n",
1138 (tpg->tpg_state == TPG_STATE_ACTIVE) ? 1 : 0);
1139 spin_unlock(&tpg->tpg_state_lock);
1140
1141 return len;
1142}
1143
1144static ssize_t lio_target_tpg_store_enable(
1145 struct se_portal_group *se_tpg,
1146 const char *page,
1147 size_t count)
1148{
1149 struct iscsi_portal_group *tpg = container_of(se_tpg,
1150 struct iscsi_portal_group, tpg_se_tpg);
1151 char *endptr;
1152 u32 op;
1153 int ret = 0;
1154
1155 op = simple_strtoul(page, &endptr, 0);
1156 if ((op != 1) && (op != 0)) {
1157 pr_err("Illegal value for tpg_enable: %u\n", op);
1158 return -EINVAL;
1159 }
1160
1161 ret = iscsit_get_tpg(tpg);
1162 if (ret < 0)
1163 return -EINVAL;
1164
1165 if (op) {
1166 ret = iscsit_tpg_enable_portal_group(tpg);
1167 if (ret < 0)
1168 goto out;
1169 } else {
1170 /*
1171 * iscsit_tpg_disable_portal_group() assumes force=1
1172 */
1173 ret = iscsit_tpg_disable_portal_group(tpg, 1);
1174 if (ret < 0)
1175 goto out;
1176 }
1177
1178 iscsit_put_tpg(tpg);
1179 return count;
1180out:
1181 iscsit_put_tpg(tpg);
1182 return -EINVAL;
1183}
1184
1185TF_TPG_BASE_ATTR(lio_target, enable, S_IRUGO | S_IWUSR);
1186
1187static struct configfs_attribute *lio_target_tpg_attrs[] = {
1188 &lio_target_tpg_enable.attr,
1189 NULL,
1190};
1191
1192/* End items for lio_target_tpg_cit */
1193
1194/* Start items for lio_target_tiqn_cit */
1195
1196struct se_portal_group *lio_target_tiqn_addtpg(
1197 struct se_wwn *wwn,
1198 struct config_group *group,
1199 const char *name)
1200{
1201 struct iscsi_portal_group *tpg;
1202 struct iscsi_tiqn *tiqn;
1203 char *tpgt_str, *end_ptr;
1204 int ret = 0;
1205 unsigned short int tpgt;
1206
1207 tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
1208 /*
1209 * Only tpgt_# directory groups can be created below
1210 * target/iscsi/iqn.superturodiskarry/
1211 */
1212 tpgt_str = strstr(name, "tpgt_");
1213 if (!tpgt_str) {
1214 pr_err("Unable to locate \"tpgt_#\" directory"
1215 " group\n");
1216 return NULL;
1217 }
1218 tpgt_str += 5; /* Skip ahead of "tpgt_" */
1219 tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
1220
1221 tpg = iscsit_alloc_portal_group(tiqn, tpgt);
1222 if (!tpg)
1223 return NULL;
1224
1225 ret = core_tpg_register(
1226 &lio_target_fabric_configfs->tf_ops,
1227 wwn, &tpg->tpg_se_tpg, (void *)tpg,
1228 TRANSPORT_TPG_TYPE_NORMAL);
1229 if (ret < 0)
1230 return NULL;
1231
1232 ret = iscsit_tpg_add_portal_group(tiqn, tpg);
1233 if (ret != 0)
1234 goto out;
1235
1236 pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
1237 pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated TPG: %s\n",
1238 name);
1239 return &tpg->tpg_se_tpg;
1240out:
1241 core_tpg_deregister(&tpg->tpg_se_tpg);
1242 kfree(tpg);
1243 return NULL;
1244}
1245
1246void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg)
1247{
1248 struct iscsi_portal_group *tpg;
1249 struct iscsi_tiqn *tiqn;
1250
1251 tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
1252 tiqn = tpg->tpg_tiqn;
1253 /*
1254 * iscsit_tpg_del_portal_group() assumes force=1
1255 */
1256 pr_debug("LIO_Target_ConfigFS: DEREGISTER -> Releasing TPG\n");
1257 iscsit_tpg_del_portal_group(tiqn, tpg, 1);
1258}
1259
1260/* End items for lio_target_tiqn_cit */
1261
1262/* Start LIO-Target TIQN struct contig_item lio_target_cit */
1263
1264static ssize_t lio_target_wwn_show_attr_lio_version(
1265 struct target_fabric_configfs *tf,
1266 char *page)
1267{
1268 return sprintf(page, "RisingTide Systems Linux-iSCSI Target "ISCSIT_VERSION"\n");
1269}
1270
1271TF_WWN_ATTR_RO(lio_target, lio_version);
1272
1273static struct configfs_attribute *lio_target_wwn_attrs[] = {
1274 &lio_target_wwn_lio_version.attr,
1275 NULL,
1276};
1277
1278struct se_wwn *lio_target_call_coreaddtiqn(
1279 struct target_fabric_configfs *tf,
1280 struct config_group *group,
1281 const char *name)
1282{
1283 struct config_group *stats_cg;
1284 struct iscsi_tiqn *tiqn;
1285
1286 tiqn = iscsit_add_tiqn((unsigned char *)name);
1287 if (IS_ERR(tiqn))
1288 return ERR_PTR(PTR_ERR(tiqn));
1289 /*
1290 * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group.
1291 */
1292 stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
1293
1294 stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 6,
1295 GFP_KERNEL);
1296 if (!stats_cg->default_groups) {
1297 pr_err("Unable to allocate memory for"
1298 " stats_cg->default_groups\n");
1299 iscsit_del_tiqn(tiqn);
1300 return ERR_PTR(-ENOMEM);
1301 }
1302
1303 stats_cg->default_groups[0] = &WWN_STAT_GRPS(tiqn)->iscsi_instance_group;
1304 stats_cg->default_groups[1] = &WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group;
1305 stats_cg->default_groups[2] = &WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group;
1306 stats_cg->default_groups[3] = &WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group;
1307 stats_cg->default_groups[4] = &WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group;
1308 stats_cg->default_groups[5] = NULL;
1309 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_instance_group,
1310 "iscsi_instance", &iscsi_stat_instance_cit);
1311 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group,
1312 "iscsi_sess_err", &iscsi_stat_sess_err_cit);
1313 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group,
1314 "iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit);
1315 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group,
1316 "iscsi_login_stats", &iscsi_stat_login_cit);
1317 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group,
1318 "iscsi_logout_stats", &iscsi_stat_logout_cit);
1319
1320 pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
1321 pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
1322 " %s\n", name);
1323 return &tiqn->tiqn_wwn;
1324}
1325
1326void lio_target_call_coredeltiqn(
1327 struct se_wwn *wwn)
1328{
1329 struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
1330 struct config_item *df_item;
1331 struct config_group *stats_cg;
1332 int i;
1333
1334 stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
1335 for (i = 0; stats_cg->default_groups[i]; i++) {
1336 df_item = &stats_cg->default_groups[i]->cg_item;
1337 stats_cg->default_groups[i] = NULL;
1338 config_item_put(df_item);
1339 }
1340 kfree(stats_cg->default_groups);
1341
1342 pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s\n",
1343 tiqn->tiqn);
1344 iscsit_del_tiqn(tiqn);
1345}
1346
1347/* End LIO-Target TIQN struct contig_lio_target_cit */
1348
1349/* Start lio_target_discovery_auth_cit */
1350
1351#define DEF_DISC_AUTH_STR(name, flags) \
1352 __DEF_NACL_AUTH_STR(disc, name, flags) \
1353static ssize_t iscsi_disc_show_##name( \
1354 struct target_fabric_configfs *tf, \
1355 char *page) \
1356{ \
1357 return __iscsi_disc_show_##name(&iscsit_global->discovery_acl, \
1358 page); \
1359} \
1360static ssize_t iscsi_disc_store_##name( \
1361 struct target_fabric_configfs *tf, \
1362 const char *page, \
1363 size_t count) \
1364{ \
1365 return __iscsi_disc_store_##name(&iscsit_global->discovery_acl, \
1366 page, count); \
1367}
1368
1369#define DEF_DISC_AUTH_INT(name) \
1370 __DEF_NACL_AUTH_INT(disc, name) \
1371static ssize_t iscsi_disc_show_##name( \
1372 struct target_fabric_configfs *tf, \
1373 char *page) \
1374{ \
1375 return __iscsi_disc_show_##name(&iscsit_global->discovery_acl, \
1376 page); \
1377}
1378
1379#define DISC_AUTH_ATTR(_name, _mode) TF_DISC_ATTR(iscsi, _name, _mode)
1380#define DISC_AUTH_ATTR_RO(_name) TF_DISC_ATTR_RO(iscsi, _name)
1381
1382/*
1383 * One-way authentication userid
1384 */
1385DEF_DISC_AUTH_STR(userid, NAF_USERID_SET);
1386DISC_AUTH_ATTR(userid, S_IRUGO | S_IWUSR);
1387/*
1388 * One-way authentication password
1389 */
1390DEF_DISC_AUTH_STR(password, NAF_PASSWORD_SET);
1391DISC_AUTH_ATTR(password, S_IRUGO | S_IWUSR);
1392/*
1393 * Enforce mutual authentication
1394 */
1395DEF_DISC_AUTH_INT(authenticate_target);
1396DISC_AUTH_ATTR_RO(authenticate_target);
1397/*
1398 * Mutual authentication userid
1399 */
1400DEF_DISC_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
1401DISC_AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR);
1402/*
1403 * Mutual authentication password
1404 */
1405DEF_DISC_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
1406DISC_AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR);
1407
1408/*
1409 * enforce_discovery_auth
1410 */
1411static ssize_t iscsi_disc_show_enforce_discovery_auth(
1412 struct target_fabric_configfs *tf,
1413 char *page)
1414{
1415 struct iscsi_node_auth *discovery_auth = &iscsit_global->discovery_acl.node_auth;
1416
1417 return sprintf(page, "%d\n", discovery_auth->enforce_discovery_auth);
1418}
1419
1420static ssize_t iscsi_disc_store_enforce_discovery_auth(
1421 struct target_fabric_configfs *tf,
1422 const char *page,
1423 size_t count)
1424{
1425 struct iscsi_param *param;
1426 struct iscsi_portal_group *discovery_tpg = iscsit_global->discovery_tpg;
1427 char *endptr;
1428 u32 op;
1429
1430 op = simple_strtoul(page, &endptr, 0);
1431 if ((op != 1) && (op != 0)) {
1432 pr_err("Illegal value for enforce_discovery_auth:"
1433 " %u\n", op);
1434 return -EINVAL;
1435 }
1436
1437 if (!discovery_tpg) {
1438 pr_err("iscsit_global->discovery_tpg is NULL\n");
1439 return -EINVAL;
1440 }
1441
1442 param = iscsi_find_param_from_key(AUTHMETHOD,
1443 discovery_tpg->param_list);
1444 if (!param)
1445 return -EINVAL;
1446
1447 if (op) {
1448 /*
1449 * Reset the AuthMethod key to CHAP.
1450 */
1451 if (iscsi_update_param_value(param, CHAP) < 0)
1452 return -EINVAL;
1453
1454 discovery_tpg->tpg_attrib.authentication = 1;
1455 iscsit_global->discovery_acl.node_auth.enforce_discovery_auth = 1;
1456 pr_debug("LIO-CORE[0] Successfully enabled"
1457 " authentication enforcement for iSCSI"
1458 " Discovery TPG\n");
1459 } else {
1460 /*
1461 * Reset the AuthMethod key to CHAP,None
1462 */
1463 if (iscsi_update_param_value(param, "CHAP,None") < 0)
1464 return -EINVAL;
1465
1466 discovery_tpg->tpg_attrib.authentication = 0;
1467 iscsit_global->discovery_acl.node_auth.enforce_discovery_auth = 0;
1468 pr_debug("LIO-CORE[0] Successfully disabled"
1469 " authentication enforcement for iSCSI"
1470 " Discovery TPG\n");
1471 }
1472
1473 return count;
1474}
1475
1476DISC_AUTH_ATTR(enforce_discovery_auth, S_IRUGO | S_IWUSR);
1477
1478static struct configfs_attribute *lio_target_discovery_auth_attrs[] = {
1479 &iscsi_disc_userid.attr,
1480 &iscsi_disc_password.attr,
1481 &iscsi_disc_authenticate_target.attr,
1482 &iscsi_disc_userid_mutual.attr,
1483 &iscsi_disc_password_mutual.attr,
1484 &iscsi_disc_enforce_discovery_auth.attr,
1485 NULL,
1486};
1487
1488/* End lio_target_discovery_auth_cit */
1489
1490/* Start functions for target_core_fabric_ops */
1491
1492static char *iscsi_get_fabric_name(void)
1493{
1494 return "iSCSI";
1495}
1496
1497static u32 iscsi_get_task_tag(struct se_cmd *se_cmd)
1498{
1499 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1500
1501 return cmd->init_task_tag;
1502}
1503
1504static int iscsi_get_cmd_state(struct se_cmd *se_cmd)
1505{
1506 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1507
1508 return cmd->i_state;
1509}
1510
1511static int iscsi_is_state_remove(struct se_cmd *se_cmd)
1512{
1513 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1514
1515 return (cmd->i_state == ISTATE_REMOVE);
1516}
1517
1518static int lio_sess_logged_in(struct se_session *se_sess)
1519{
1520 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1521 int ret;
1522 /*
1523 * Called with spin_lock_bh(&tpg_lock); and
1524 * spin_lock(&se_tpg->session_lock); held.
1525 */
1526 spin_lock(&sess->conn_lock);
1527 ret = (sess->session_state != TARG_SESS_STATE_LOGGED_IN);
1528 spin_unlock(&sess->conn_lock);
1529
1530 return ret;
1531}
1532
1533static u32 lio_sess_get_index(struct se_session *se_sess)
1534{
1535 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1536
1537 return sess->session_index;
1538}
1539
1540static u32 lio_sess_get_initiator_sid(
1541 struct se_session *se_sess,
1542 unsigned char *buf,
1543 u32 size)
1544{
1545 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1546 /*
1547 * iSCSI Initiator Session Identifier from RFC-3720.
1548 */
1549 return snprintf(buf, size, "%02x%02x%02x%02x%02x%02x",
1550 sess->isid[0], sess->isid[1], sess->isid[2],
1551 sess->isid[3], sess->isid[4], sess->isid[5]);
1552}
1553
1554static int lio_queue_data_in(struct se_cmd *se_cmd)
1555{
1556 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1557
1558 cmd->i_state = ISTATE_SEND_DATAIN;
1559 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
1560 return 0;
1561}
1562
1563static int lio_write_pending(struct se_cmd *se_cmd)
1564{
1565 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1566
1567 if (!cmd->immediate_data && !cmd->unsolicited_data)
1568 return iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 1);
1569
1570 return 0;
1571}
1572
1573static int lio_write_pending_status(struct se_cmd *se_cmd)
1574{
1575 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1576 int ret;
1577
1578 spin_lock_bh(&cmd->istate_lock);
1579 ret = !(cmd->cmd_flags & ICF_GOT_LAST_DATAOUT);
1580 spin_unlock_bh(&cmd->istate_lock);
1581
1582 return ret;
1583}
1584
1585static int lio_queue_status(struct se_cmd *se_cmd)
1586{
1587 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1588
1589 cmd->i_state = ISTATE_SEND_STATUS;
1590 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
1591 return 0;
1592}
1593
1594static u16 lio_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
1595{
1596 unsigned char *buffer = se_cmd->sense_buffer;
1597 /*
1598 * From RFC-3720 10.4.7. Data Segment - Sense and Response Data Segment
1599 * 16-bit SenseLength.
1600 */
1601 buffer[0] = ((sense_length >> 8) & 0xff);
1602 buffer[1] = (sense_length & 0xff);
1603 /*
1604 * Return two byte offset into allocated sense_buffer.
1605 */
1606 return 2;
1607}
1608
1609static u16 lio_get_fabric_sense_len(void)
1610{
1611 /*
1612 * Return two byte offset into allocated sense_buffer.
1613 */
1614 return 2;
1615}
1616
1617static int lio_queue_tm_rsp(struct se_cmd *se_cmd)
1618{
1619 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1620
1621 cmd->i_state = ISTATE_SEND_TASKMGTRSP;
1622 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
1623 return 0;
1624}
1625
1626static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
1627{
1628 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1629
1630 return &tpg->tpg_tiqn->tiqn[0];
1631}
1632
1633static u16 lio_tpg_get_tag(struct se_portal_group *se_tpg)
1634{
1635 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1636
1637 return tpg->tpgt;
1638}
1639
1640static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg)
1641{
1642 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1643
1644 return ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
1645}
1646
1647static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg)
1648{
1649 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1650
1651 return ISCSI_TPG_ATTRIB(tpg)->generate_node_acls;
1652}
1653
1654static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg)
1655{
1656 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1657
1658 return ISCSI_TPG_ATTRIB(tpg)->cache_dynamic_acls;
1659}
1660
1661static int lio_tpg_check_demo_mode_write_protect(
1662 struct se_portal_group *se_tpg)
1663{
1664 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1665
1666 return ISCSI_TPG_ATTRIB(tpg)->demo_mode_write_protect;
1667}
1668
1669static int lio_tpg_check_prod_mode_write_protect(
1670 struct se_portal_group *se_tpg)
1671{
1672 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1673
1674 return ISCSI_TPG_ATTRIB(tpg)->prod_mode_write_protect;
1675}
1676
1677static void lio_tpg_release_fabric_acl(
1678 struct se_portal_group *se_tpg,
1679 struct se_node_acl *se_acl)
1680{
1681 struct iscsi_node_acl *acl = container_of(se_acl,
1682 struct iscsi_node_acl, se_node_acl);
1683 kfree(acl);
1684}
1685
1686/*
1687 * Called with spin_lock_bh(struct se_portal_group->session_lock) held..
1688 *
1689 * Also, this function calls iscsit_inc_session_usage_count() on the
1690 * struct iscsi_session in question.
1691 */
1692static int lio_tpg_shutdown_session(struct se_session *se_sess)
1693{
1694 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1695
1696 spin_lock(&sess->conn_lock);
1697 if (atomic_read(&sess->session_fall_back_to_erl0) ||
1698 atomic_read(&sess->session_logout) ||
1699 (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
1700 spin_unlock(&sess->conn_lock);
1701 return 0;
1702 }
1703 atomic_set(&sess->session_reinstatement, 1);
1704 spin_unlock(&sess->conn_lock);
1705
1706 iscsit_inc_session_usage_count(sess);
1707 iscsit_stop_time2retain_timer(sess);
1708
1709 return 1;
1710}
1711
1712/*
1713 * Calls iscsit_dec_session_usage_count() as inverse of
1714 * lio_tpg_shutdown_session()
1715 */
1716static void lio_tpg_close_session(struct se_session *se_sess)
1717{
1718 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1719 /*
1720 * If the iSCSI Session for the iSCSI Initiator Node exists,
1721 * forcefully shutdown the iSCSI NEXUS.
1722 */
1723 iscsit_stop_session(sess, 1, 1);
1724 iscsit_dec_session_usage_count(sess);
1725 iscsit_close_session(sess);
1726}
1727
1728static void lio_tpg_stop_session(
1729 struct se_session *se_sess,
1730 int sess_sleep,
1731 int conn_sleep)
1732{
1733 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1734
1735 iscsit_stop_session(sess, sess_sleep, conn_sleep);
1736}
1737
1738static void lio_tpg_fall_back_to_erl0(struct se_session *se_sess)
1739{
1740 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1741
1742 iscsit_fall_back_to_erl0(sess);
1743}
1744
1745static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
1746{
1747 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1748
1749 return tpg->tpg_tiqn->tiqn_index;
1750}
1751
1752static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
1753{
1754 struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl,
1755 se_node_acl);
1756
1757 ISCSI_NODE_ATTRIB(acl)->nacl = acl;
1758 iscsit_set_default_node_attribues(acl);
1759}
1760
1761static void lio_release_cmd(struct se_cmd *se_cmd)
1762{
1763 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1764
1765 iscsit_release_cmd(cmd);
1766}
1767
1768/* End functions for target_core_fabric_ops */
1769
1770int iscsi_target_register_configfs(void)
1771{
1772 struct target_fabric_configfs *fabric;
1773 int ret;
1774
1775 lio_target_fabric_configfs = NULL;
1776 fabric = target_fabric_configfs_init(THIS_MODULE, "iscsi");
1777 if (IS_ERR(fabric)) {
1778 pr_err("target_fabric_configfs_init() for"
1779 " LIO-Target failed!\n");
1780 return PTR_ERR(fabric);
1781 }
1782 /*
1783 * Setup the fabric API of function pointers used by target_core_mod..
1784 */
1785 fabric->tf_ops.get_fabric_name = &iscsi_get_fabric_name;
1786 fabric->tf_ops.get_fabric_proto_ident = &iscsi_get_fabric_proto_ident;
1787 fabric->tf_ops.tpg_get_wwn = &lio_tpg_get_endpoint_wwn;
1788 fabric->tf_ops.tpg_get_tag = &lio_tpg_get_tag;
1789 fabric->tf_ops.tpg_get_default_depth = &lio_tpg_get_default_depth;
1790 fabric->tf_ops.tpg_get_pr_transport_id = &iscsi_get_pr_transport_id;
1791 fabric->tf_ops.tpg_get_pr_transport_id_len =
1792 &iscsi_get_pr_transport_id_len;
1793 fabric->tf_ops.tpg_parse_pr_out_transport_id =
1794 &iscsi_parse_pr_out_transport_id;
1795 fabric->tf_ops.tpg_check_demo_mode = &lio_tpg_check_demo_mode;
1796 fabric->tf_ops.tpg_check_demo_mode_cache =
1797 &lio_tpg_check_demo_mode_cache;
1798 fabric->tf_ops.tpg_check_demo_mode_write_protect =
1799 &lio_tpg_check_demo_mode_write_protect;
1800 fabric->tf_ops.tpg_check_prod_mode_write_protect =
1801 &lio_tpg_check_prod_mode_write_protect;
1802 fabric->tf_ops.tpg_alloc_fabric_acl = &lio_tpg_alloc_fabric_acl;
1803 fabric->tf_ops.tpg_release_fabric_acl = &lio_tpg_release_fabric_acl;
1804 fabric->tf_ops.tpg_get_inst_index = &lio_tpg_get_inst_index;
1805 fabric->tf_ops.release_cmd = &lio_release_cmd;
1806 fabric->tf_ops.shutdown_session = &lio_tpg_shutdown_session;
1807 fabric->tf_ops.close_session = &lio_tpg_close_session;
1808 fabric->tf_ops.stop_session = &lio_tpg_stop_session;
1809 fabric->tf_ops.fall_back_to_erl0 = &lio_tpg_fall_back_to_erl0;
1810 fabric->tf_ops.sess_logged_in = &lio_sess_logged_in;
1811 fabric->tf_ops.sess_get_index = &lio_sess_get_index;
1812 fabric->tf_ops.sess_get_initiator_sid = &lio_sess_get_initiator_sid;
1813 fabric->tf_ops.write_pending = &lio_write_pending;
1814 fabric->tf_ops.write_pending_status = &lio_write_pending_status;
1815 fabric->tf_ops.set_default_node_attributes =
1816 &lio_set_default_node_attributes;
1817 fabric->tf_ops.get_task_tag = &iscsi_get_task_tag;
1818 fabric->tf_ops.get_cmd_state = &iscsi_get_cmd_state;
1819 fabric->tf_ops.queue_data_in = &lio_queue_data_in;
1820 fabric->tf_ops.queue_status = &lio_queue_status;
1821 fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp;
1822 fabric->tf_ops.set_fabric_sense_len = &lio_set_fabric_sense_len;
1823 fabric->tf_ops.get_fabric_sense_len = &lio_get_fabric_sense_len;
1824 fabric->tf_ops.is_state_remove = &iscsi_is_state_remove;
1825 /*
1826 * Setup function pointers for generic logic in target_core_fabric_configfs.c
1827 */
1828 fabric->tf_ops.fabric_make_wwn = &lio_target_call_coreaddtiqn;
1829 fabric->tf_ops.fabric_drop_wwn = &lio_target_call_coredeltiqn;
1830 fabric->tf_ops.fabric_make_tpg = &lio_target_tiqn_addtpg;
1831 fabric->tf_ops.fabric_drop_tpg = &lio_target_tiqn_deltpg;
1832 fabric->tf_ops.fabric_post_link = NULL;
1833 fabric->tf_ops.fabric_pre_unlink = NULL;
1834 fabric->tf_ops.fabric_make_np = &lio_target_call_addnptotpg;
1835 fabric->tf_ops.fabric_drop_np = &lio_target_call_delnpfromtpg;
1836 fabric->tf_ops.fabric_make_nodeacl = &lio_target_make_nodeacl;
1837 fabric->tf_ops.fabric_drop_nodeacl = &lio_target_drop_nodeacl;
1838 /*
1839 * Setup default attribute lists for various fabric->tf_cit_tmpl
1840 * sturct config_item_type's
1841 */
1842 TF_CIT_TMPL(fabric)->tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs;
1843 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs;
1844 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs;
1845 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs;
1846 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs;
1847 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs;
1848 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs;
1849 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs;
1850 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs;
1851 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs;
1852
1853 ret = target_fabric_configfs_register(fabric);
1854 if (ret < 0) {
1855 pr_err("target_fabric_configfs_register() for"
1856 " LIO-Target failed!\n");
1857 target_fabric_configfs_free(fabric);
1858 return ret;
1859 }
1860
1861 lio_target_fabric_configfs = fabric;
1862 pr_debug("LIO_TARGET[0] - Set fabric ->"
1863 " lio_target_fabric_configfs\n");
1864 return 0;
1865}
1866
1867
1868void iscsi_target_deregister_configfs(void)
1869{
1870 if (!lio_target_fabric_configfs)
1871 return;
1872 /*
1873 * Shutdown discovery sessions and disable discovery TPG
1874 */
1875 if (iscsit_global->discovery_tpg)
1876 iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
1877
1878 target_fabric_configfs_deregister(lio_target_fabric_configfs);
1879 lio_target_fabric_configfs = NULL;
1880 pr_debug("LIO_TARGET[0] - Cleared"
1881 " lio_target_fabric_configfs\n");
1882}
diff --git a/drivers/target/iscsi/iscsi_target_configfs.h b/drivers/target/iscsi/iscsi_target_configfs.h
new file mode 100644
index 000000000000..8cd5a63c4edc
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_configfs.h
@@ -0,0 +1,7 @@
1#ifndef ISCSI_TARGET_CONFIGFS_H
2#define ISCSI_TARGET_CONFIGFS_H
3
4extern int iscsi_target_register_configfs(void);
5extern void iscsi_target_deregister_configfs(void);
6
7#endif /* ISCSI_TARGET_CONFIGFS_H */
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
new file mode 100644
index 000000000000..470ed551eeb5
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -0,0 +1,859 @@
1#ifndef ISCSI_TARGET_CORE_H
2#define ISCSI_TARGET_CORE_H
3
4#include <linux/in.h>
5#include <linux/configfs.h>
6#include <net/sock.h>
7#include <net/tcp.h>
8#include <scsi/scsi_cmnd.h>
9#include <scsi/iscsi_proto.h>
10#include <target/target_core_base.h>
11
12#define ISCSIT_VERSION "v4.1.0-rc1"
13#define ISCSI_MAX_DATASN_MISSING_COUNT 16
14#define ISCSI_TX_THREAD_TCP_TIMEOUT 2
15#define ISCSI_RX_THREAD_TCP_TIMEOUT 2
16#define SECONDS_FOR_ASYNC_LOGOUT 10
17#define SECONDS_FOR_ASYNC_TEXT 10
18#define SECONDS_FOR_LOGOUT_COMP 15
19#define WHITE_SPACE " \t\v\f\n\r"
20
21/* struct iscsi_node_attrib sanity values */
22#define NA_DATAOUT_TIMEOUT 3
23#define NA_DATAOUT_TIMEOUT_MAX 60
24#define NA_DATAOUT_TIMEOUT_MIX 2
25#define NA_DATAOUT_TIMEOUT_RETRIES 5
26#define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15
27#define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1
28#define NA_NOPIN_TIMEOUT 5
29#define NA_NOPIN_TIMEOUT_MAX 60
30#define NA_NOPIN_TIMEOUT_MIN 3
31#define NA_NOPIN_RESPONSE_TIMEOUT 5
32#define NA_NOPIN_RESPONSE_TIMEOUT_MAX 60
33#define NA_NOPIN_RESPONSE_TIMEOUT_MIN 3
34#define NA_RANDOM_DATAIN_PDU_OFFSETS 0
35#define NA_RANDOM_DATAIN_SEQ_OFFSETS 0
36#define NA_RANDOM_R2T_OFFSETS 0
37#define NA_DEFAULT_ERL 0
38#define NA_DEFAULT_ERL_MAX 2
39#define NA_DEFAULT_ERL_MIN 0
40
41/* struct iscsi_tpg_attrib sanity values */
42#define TA_AUTHENTICATION 1
43#define TA_LOGIN_TIMEOUT 15
44#define TA_LOGIN_TIMEOUT_MAX 30
45#define TA_LOGIN_TIMEOUT_MIN 5
46#define TA_NETIF_TIMEOUT 2
47#define TA_NETIF_TIMEOUT_MAX 15
48#define TA_NETIF_TIMEOUT_MIN 2
49#define TA_GENERATE_NODE_ACLS 0
50#define TA_DEFAULT_CMDSN_DEPTH 16
51#define TA_DEFAULT_CMDSN_DEPTH_MAX 512
52#define TA_DEFAULT_CMDSN_DEPTH_MIN 1
53#define TA_CACHE_DYNAMIC_ACLS 0
54/* Enabled by default in demo mode (generic_node_acls=1) */
55#define TA_DEMO_MODE_WRITE_PROTECT 1
56/* Disabled by default in production mode w/ explict ACLs */
57#define TA_PROD_MODE_WRITE_PROTECT 0
58#define TA_CACHE_CORE_NPS 0
59
60enum tpg_np_network_transport_table {
61 ISCSI_TCP = 0,
62 ISCSI_SCTP_TCP = 1,
63 ISCSI_SCTP_UDP = 2,
64 ISCSI_IWARP_TCP = 3,
65 ISCSI_IWARP_SCTP = 4,
66 ISCSI_INFINIBAND = 5,
67};
68
69/* RFC-3720 7.1.4 Standard Connection State Diagram for a Target */
70enum target_conn_state_table {
71 TARG_CONN_STATE_FREE = 0x1,
72 TARG_CONN_STATE_XPT_UP = 0x3,
73 TARG_CONN_STATE_IN_LOGIN = 0x4,
74 TARG_CONN_STATE_LOGGED_IN = 0x5,
75 TARG_CONN_STATE_IN_LOGOUT = 0x6,
76 TARG_CONN_STATE_LOGOUT_REQUESTED = 0x7,
77 TARG_CONN_STATE_CLEANUP_WAIT = 0x8,
78};
79
80/* RFC-3720 7.3.2 Session State Diagram for a Target */
81enum target_sess_state_table {
82 TARG_SESS_STATE_FREE = 0x1,
83 TARG_SESS_STATE_ACTIVE = 0x2,
84 TARG_SESS_STATE_LOGGED_IN = 0x3,
85 TARG_SESS_STATE_FAILED = 0x4,
86 TARG_SESS_STATE_IN_CONTINUE = 0x5,
87};
88
89/* struct iscsi_data_count->type */
90enum data_count_type {
91 ISCSI_RX_DATA = 1,
92 ISCSI_TX_DATA = 2,
93};
94
95/* struct iscsi_datain_req->dr_complete */
96enum datain_req_comp_table {
97 DATAIN_COMPLETE_NORMAL = 1,
98 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY = 2,
99 DATAIN_COMPLETE_CONNECTION_RECOVERY = 3,
100};
101
102/* struct iscsi_datain_req->recovery */
103enum datain_req_rec_table {
104 DATAIN_WITHIN_COMMAND_RECOVERY = 1,
105 DATAIN_CONNECTION_RECOVERY = 2,
106};
107
108/* struct iscsi_portal_group->state */
109enum tpg_state_table {
110 TPG_STATE_FREE = 0,
111 TPG_STATE_ACTIVE = 1,
112 TPG_STATE_INACTIVE = 2,
113 TPG_STATE_COLD_RESET = 3,
114};
115
116/* struct iscsi_tiqn->tiqn_state */
117enum tiqn_state_table {
118 TIQN_STATE_ACTIVE = 1,
119 TIQN_STATE_SHUTDOWN = 2,
120};
121
122/* struct iscsi_cmd->cmd_flags */
123enum cmd_flags_table {
124 ICF_GOT_LAST_DATAOUT = 0x00000001,
125 ICF_GOT_DATACK_SNACK = 0x00000002,
126 ICF_NON_IMMEDIATE_UNSOLICITED_DATA = 0x00000004,
127 ICF_SENT_LAST_R2T = 0x00000008,
128 ICF_WITHIN_COMMAND_RECOVERY = 0x00000010,
129 ICF_CONTIG_MEMORY = 0x00000020,
130 ICF_ATTACHED_TO_RQUEUE = 0x00000040,
131 ICF_OOO_CMDSN = 0x00000080,
132 ICF_REJECT_FAIL_CONN = 0x00000100,
133};
134
135/* struct iscsi_cmd->i_state */
136enum cmd_i_state_table {
137 ISTATE_NO_STATE = 0,
138 ISTATE_NEW_CMD = 1,
139 ISTATE_DEFERRED_CMD = 2,
140 ISTATE_UNSOLICITED_DATA = 3,
141 ISTATE_RECEIVE_DATAOUT = 4,
142 ISTATE_RECEIVE_DATAOUT_RECOVERY = 5,
143 ISTATE_RECEIVED_LAST_DATAOUT = 6,
144 ISTATE_WITHIN_DATAOUT_RECOVERY = 7,
145 ISTATE_IN_CONNECTION_RECOVERY = 8,
146 ISTATE_RECEIVED_TASKMGT = 9,
147 ISTATE_SEND_ASYNCMSG = 10,
148 ISTATE_SENT_ASYNCMSG = 11,
149 ISTATE_SEND_DATAIN = 12,
150 ISTATE_SEND_LAST_DATAIN = 13,
151 ISTATE_SENT_LAST_DATAIN = 14,
152 ISTATE_SEND_LOGOUTRSP = 15,
153 ISTATE_SENT_LOGOUTRSP = 16,
154 ISTATE_SEND_NOPIN = 17,
155 ISTATE_SENT_NOPIN = 18,
156 ISTATE_SEND_REJECT = 19,
157 ISTATE_SENT_REJECT = 20,
158 ISTATE_SEND_R2T = 21,
159 ISTATE_SENT_R2T = 22,
160 ISTATE_SEND_R2T_RECOVERY = 23,
161 ISTATE_SENT_R2T_RECOVERY = 24,
162 ISTATE_SEND_LAST_R2T = 25,
163 ISTATE_SENT_LAST_R2T = 26,
164 ISTATE_SEND_LAST_R2T_RECOVERY = 27,
165 ISTATE_SENT_LAST_R2T_RECOVERY = 28,
166 ISTATE_SEND_STATUS = 29,
167 ISTATE_SEND_STATUS_BROKEN_PC = 30,
168 ISTATE_SENT_STATUS = 31,
169 ISTATE_SEND_STATUS_RECOVERY = 32,
170 ISTATE_SENT_STATUS_RECOVERY = 33,
171 ISTATE_SEND_TASKMGTRSP = 34,
172 ISTATE_SENT_TASKMGTRSP = 35,
173 ISTATE_SEND_TEXTRSP = 36,
174 ISTATE_SENT_TEXTRSP = 37,
175 ISTATE_SEND_NOPIN_WANT_RESPONSE = 38,
176 ISTATE_SENT_NOPIN_WANT_RESPONSE = 39,
177 ISTATE_SEND_NOPIN_NO_RESPONSE = 40,
178 ISTATE_REMOVE = 41,
179 ISTATE_FREE = 42,
180};
181
182/* Used for iscsi_recover_cmdsn() return values */
183enum recover_cmdsn_ret_table {
184 CMDSN_ERROR_CANNOT_RECOVER = -1,
185 CMDSN_NORMAL_OPERATION = 0,
186 CMDSN_LOWER_THAN_EXP = 1,
187 CMDSN_HIGHER_THAN_EXP = 2,
188};
189
190/* Used for iscsi_handle_immediate_data() return values */
191enum immedate_data_ret_table {
192 IMMEDIATE_DATA_CANNOT_RECOVER = -1,
193 IMMEDIATE_DATA_NORMAL_OPERATION = 0,
194 IMMEDIATE_DATA_ERL1_CRC_FAILURE = 1,
195};
196
197/* Used for iscsi_decide_dataout_action() return values */
198enum dataout_action_ret_table {
199 DATAOUT_CANNOT_RECOVER = -1,
200 DATAOUT_NORMAL = 0,
201 DATAOUT_SEND_R2T = 1,
202 DATAOUT_SEND_TO_TRANSPORT = 2,
203 DATAOUT_WITHIN_COMMAND_RECOVERY = 3,
204};
205
206/* Used for struct iscsi_node_auth->naf_flags */
207enum naf_flags_table {
208 NAF_USERID_SET = 0x01,
209 NAF_PASSWORD_SET = 0x02,
210 NAF_USERID_IN_SET = 0x04,
211 NAF_PASSWORD_IN_SET = 0x08,
212};
213
214/* Used by various struct timer_list to manage iSCSI specific state */
215enum iscsi_timer_flags_table {
216 ISCSI_TF_RUNNING = 0x01,
217 ISCSI_TF_STOP = 0x02,
218 ISCSI_TF_EXPIRED = 0x04,
219};
220
221/* Used for struct iscsi_np->np_flags */
222enum np_flags_table {
223 NPF_IP_NETWORK = 0x00,
224 NPF_SCTP_STRUCT_FILE = 0x01 /* Bugfix */
225};
226
227/* Used for struct iscsi_np->np_thread_state */
228enum np_thread_state_table {
229 ISCSI_NP_THREAD_ACTIVE = 1,
230 ISCSI_NP_THREAD_INACTIVE = 2,
231 ISCSI_NP_THREAD_RESET = 3,
232 ISCSI_NP_THREAD_SHUTDOWN = 4,
233 ISCSI_NP_THREAD_EXIT = 5,
234};
235
236struct iscsi_conn_ops {
237 u8 HeaderDigest; /* [0,1] == [None,CRC32C] */
238 u8 DataDigest; /* [0,1] == [None,CRC32C] */
239 u32 MaxRecvDataSegmentLength; /* [512..2**24-1] */
240 u8 OFMarker; /* [0,1] == [No,Yes] */
241 u8 IFMarker; /* [0,1] == [No,Yes] */
242 u32 OFMarkInt; /* [1..65535] */
243 u32 IFMarkInt; /* [1..65535] */
244};
245
246struct iscsi_sess_ops {
247 char InitiatorName[224];
248 char InitiatorAlias[256];
249 char TargetName[224];
250 char TargetAlias[256];
251 char TargetAddress[256];
252 u16 TargetPortalGroupTag; /* [0..65535] */
253 u16 MaxConnections; /* [1..65535] */
254 u8 InitialR2T; /* [0,1] == [No,Yes] */
255 u8 ImmediateData; /* [0,1] == [No,Yes] */
256 u32 MaxBurstLength; /* [512..2**24-1] */
257 u32 FirstBurstLength; /* [512..2**24-1] */
258 u16 DefaultTime2Wait; /* [0..3600] */
259 u16 DefaultTime2Retain; /* [0..3600] */
260 u16 MaxOutstandingR2T; /* [1..65535] */
261 u8 DataPDUInOrder; /* [0,1] == [No,Yes] */
262 u8 DataSequenceInOrder; /* [0,1] == [No,Yes] */
263 u8 ErrorRecoveryLevel; /* [0..2] */
264 u8 SessionType; /* [0,1] == [Normal,Discovery]*/
265};
266
267struct iscsi_queue_req {
268 int state;
269 struct iscsi_cmd *cmd;
270 struct list_head qr_list;
271};
272
273struct iscsi_data_count {
274 int data_length;
275 int sync_and_steering;
276 enum data_count_type type;
277 u32 iov_count;
278 u32 ss_iov_count;
279 u32 ss_marker_count;
280 struct kvec *iov;
281};
282
283struct iscsi_param_list {
284 struct list_head param_list;
285 struct list_head extra_response_list;
286};
287
288struct iscsi_datain_req {
289 enum datain_req_comp_table dr_complete;
290 int generate_recovery_values;
291 enum datain_req_rec_table recovery;
292 u32 begrun;
293 u32 runlength;
294 u32 data_length;
295 u32 data_offset;
296 u32 data_offset_end;
297 u32 data_sn;
298 u32 next_burst_len;
299 u32 read_data_done;
300 u32 seq_send_order;
301 struct list_head dr_list;
302} ____cacheline_aligned;
303
304struct iscsi_ooo_cmdsn {
305 u16 cid;
306 u32 batch_count;
307 u32 cmdsn;
308 u32 exp_cmdsn;
309 struct iscsi_cmd *cmd;
310 struct list_head ooo_list;
311} ____cacheline_aligned;
312
313struct iscsi_datain {
314 u8 flags;
315 u32 data_sn;
316 u32 length;
317 u32 offset;
318} ____cacheline_aligned;
319
320struct iscsi_r2t {
321 int seq_complete;
322 int recovery_r2t;
323 int sent_r2t;
324 u32 r2t_sn;
325 u32 offset;
326 u32 targ_xfer_tag;
327 u32 xfer_len;
328 struct list_head r2t_list;
329} ____cacheline_aligned;
330
331struct iscsi_cmd {
332 enum iscsi_timer_flags_table dataout_timer_flags;
333 /* DataOUT timeout retries */
334 u8 dataout_timeout_retries;
335 /* Within command recovery count */
336 u8 error_recovery_count;
337 /* iSCSI dependent state for out or order CmdSNs */
338 enum cmd_i_state_table deferred_i_state;
339 /* iSCSI dependent state */
340 enum cmd_i_state_table i_state;
341 /* Command is an immediate command (ISCSI_OP_IMMEDIATE set) */
342 u8 immediate_cmd;
343 /* Immediate data present */
344 u8 immediate_data;
345 /* iSCSI Opcode */
346 u8 iscsi_opcode;
347 /* iSCSI Response Code */
348 u8 iscsi_response;
349 /* Logout reason when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
350 u8 logout_reason;
351 /* Logout response code when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
352 u8 logout_response;
353 /* MaxCmdSN has been incremented */
354 u8 maxcmdsn_inc;
355 /* Immediate Unsolicited Dataout */
356 u8 unsolicited_data;
357 /* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */
358 u16 logout_cid;
359 /* Command flags */
360 enum cmd_flags_table cmd_flags;
361 /* Initiator Task Tag assigned from Initiator */
362 u32 init_task_tag;
363 /* Target Transfer Tag assigned from Target */
364 u32 targ_xfer_tag;
365 /* CmdSN assigned from Initiator */
366 u32 cmd_sn;
367 /* ExpStatSN assigned from Initiator */
368 u32 exp_stat_sn;
369 /* StatSN assigned to this ITT */
370 u32 stat_sn;
371 /* DataSN Counter */
372 u32 data_sn;
373 /* R2TSN Counter */
374 u32 r2t_sn;
375 /* Last DataSN acknowledged via DataAck SNACK */
376 u32 acked_data_sn;
377 /* Used for echoing NOPOUT ping data */
378 u32 buf_ptr_size;
379 /* Used to store DataDigest */
380 u32 data_crc;
381 /* Total size in bytes associated with command */
382 u32 data_length;
383 /* Counter for MaxOutstandingR2T */
384 u32 outstanding_r2ts;
385 /* Next R2T Offset when DataSequenceInOrder=Yes */
386 u32 r2t_offset;
387 /* Iovec current and orig count for iscsi_cmd->iov_data */
388 u32 iov_data_count;
389 u32 orig_iov_data_count;
390 /* Number of miscellaneous iovecs used for IP stack calls */
391 u32 iov_misc_count;
392 /* Number of struct iscsi_pdu in struct iscsi_cmd->pdu_list */
393 u32 pdu_count;
394 /* Next struct iscsi_pdu to send in struct iscsi_cmd->pdu_list */
395 u32 pdu_send_order;
396 /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
397 u32 pdu_start;
398 u32 residual_count;
399 /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
400 u32 seq_send_order;
401 /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
402 u32 seq_count;
403 /* Current struct iscsi_seq in struct iscsi_cmd->seq_list */
404 u32 seq_no;
405 /* Lowest offset in current DataOUT sequence */
406 u32 seq_start_offset;
407 /* Highest offset in current DataOUT sequence */
408 u32 seq_end_offset;
409 /* Total size in bytes received so far of READ data */
410 u32 read_data_done;
411 /* Total size in bytes received so far of WRITE data */
412 u32 write_data_done;
413 /* Counter for FirstBurstLength key */
414 u32 first_burst_len;
415 /* Counter for MaxBurstLength key */
416 u32 next_burst_len;
417 /* Transfer size used for IP stack calls */
418 u32 tx_size;
419 /* Buffer used for various purposes */
420 void *buf_ptr;
421 /* See include/linux/dma-mapping.h */
422 enum dma_data_direction data_direction;
423 /* iSCSI PDU Header + CRC */
424 unsigned char pdu[ISCSI_HDR_LEN + ISCSI_CRC_LEN];
425 /* Number of times struct iscsi_cmd is present in immediate queue */
426 atomic_t immed_queue_count;
427 atomic_t response_queue_count;
428 atomic_t transport_sent;
429 spinlock_t datain_lock;
430 spinlock_t dataout_timeout_lock;
431 /* spinlock for protecting struct iscsi_cmd->i_state */
432 spinlock_t istate_lock;
433 /* spinlock for adding within command recovery entries */
434 spinlock_t error_lock;
435 /* spinlock for adding R2Ts */
436 spinlock_t r2t_lock;
437 /* DataIN List */
438 struct list_head datain_list;
439 /* R2T List */
440 struct list_head cmd_r2t_list;
441 struct completion reject_comp;
442 /* Timer for DataOUT */
443 struct timer_list dataout_timer;
444 /* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */
445 struct kvec *iov_data;
446 /* Iovecs for miscellaneous purposes */
447#define ISCSI_MISC_IOVECS 5
448 struct kvec iov_misc[ISCSI_MISC_IOVECS];
449 /* Array of struct iscsi_pdu used for DataPDUInOrder=No */
450 struct iscsi_pdu *pdu_list;
451 /* Current struct iscsi_pdu used for DataPDUInOrder=No */
452 struct iscsi_pdu *pdu_ptr;
453 /* Array of struct iscsi_seq used for DataSequenceInOrder=No */
454 struct iscsi_seq *seq_list;
455 /* Current struct iscsi_seq used for DataSequenceInOrder=No */
456 struct iscsi_seq *seq_ptr;
457 /* TMR Request when iscsi_opcode == ISCSI_OP_SCSI_TMFUNC */
458 struct iscsi_tmr_req *tmr_req;
459 /* Connection this command is alligient to */
460 struct iscsi_conn *conn;
461 /* Pointer to connection recovery entry */
462 struct iscsi_conn_recovery *cr;
463 /* Session the command is part of, used for connection recovery */
464 struct iscsi_session *sess;
465 /* list_head for connection list */
466 struct list_head i_list;
467 /* The TCM I/O descriptor that is accessed via container_of() */
468 struct se_cmd se_cmd;
469 /* Sense buffer that will be mapped into outgoing status */
470#define ISCSI_SENSE_BUFFER_LEN (TRANSPORT_SENSE_BUFFER + 2)
471 unsigned char sense_buffer[ISCSI_SENSE_BUFFER_LEN];
472
473 struct scatterlist *t_mem_sg;
474 u32 t_mem_sg_nents;
475
476 u32 padding;
477 u8 pad_bytes[4];
478
479 struct scatterlist *first_data_sg;
480 u32 first_data_sg_off;
481 u32 kmapped_nents;
482
483} ____cacheline_aligned;
484
485struct iscsi_tmr_req {
486 bool task_reassign:1;
487 u32 ref_cmd_sn;
488 u32 exp_data_sn;
489 struct iscsi_conn_recovery *conn_recovery;
490 struct se_tmr_req *se_tmr_req;
491};
492
493struct iscsi_conn {
494 /* Authentication Successful for this connection */
495 u8 auth_complete;
496 /* State connection is currently in */
497 u8 conn_state;
498 u8 conn_logout_reason;
499 u8 network_transport;
500 enum iscsi_timer_flags_table nopin_timer_flags;
501 enum iscsi_timer_flags_table nopin_response_timer_flags;
502 u8 tx_immediate_queue;
503 u8 tx_response_queue;
504 /* Used to know what thread encountered a transport failure */
505 u8 which_thread;
506 /* connection id assigned by the Initiator */
507 u16 cid;
508 /* Remote TCP Port */
509 u16 login_port;
510 int net_size;
511 u32 auth_id;
512#define CONNFLAG_SCTP_STRUCT_FILE 0x01
513 u32 conn_flags;
514 /* Used for iscsi_tx_login_rsp() */
515 u32 login_itt;
516 u32 exp_statsn;
517 /* Per connection status sequence number */
518 u32 stat_sn;
519 /* IFMarkInt's Current Value */
520 u32 if_marker;
521 /* OFMarkInt's Current Value */
522 u32 of_marker;
523 /* Used for calculating OFMarker offset to next PDU */
524 u32 of_marker_offset;
525 /* Complete Bad PDU for sending reject */
526 unsigned char bad_hdr[ISCSI_HDR_LEN];
527#define IPV6_ADDRESS_SPACE 48
528 unsigned char login_ip[IPV6_ADDRESS_SPACE];
529 int conn_usage_count;
530 int conn_waiting_on_uc;
531 atomic_t check_immediate_queue;
532 atomic_t conn_logout_remove;
533 atomic_t connection_exit;
534 atomic_t connection_recovery;
535 atomic_t connection_reinstatement;
536 atomic_t connection_wait;
537 atomic_t connection_wait_rcfr;
538 atomic_t sleep_on_conn_wait_comp;
539 atomic_t transport_failed;
540 struct completion conn_post_wait_comp;
541 struct completion conn_wait_comp;
542 struct completion conn_wait_rcfr_comp;
543 struct completion conn_waiting_on_uc_comp;
544 struct completion conn_logout_comp;
545 struct completion tx_half_close_comp;
546 struct completion rx_half_close_comp;
547 /* socket used by this connection */
548 struct socket *sock;
549 struct timer_list nopin_timer;
550 struct timer_list nopin_response_timer;
551 struct timer_list transport_timer;
552 /* Spinlock used for add/deleting cmd's from conn_cmd_list */
553 spinlock_t cmd_lock;
554 spinlock_t conn_usage_lock;
555 spinlock_t immed_queue_lock;
556 spinlock_t nopin_timer_lock;
557 spinlock_t response_queue_lock;
558 spinlock_t state_lock;
559 /* libcrypto RX and TX contexts for crc32c */
560 struct hash_desc conn_rx_hash;
561 struct hash_desc conn_tx_hash;
562 /* Used for scheduling TX and RX connection kthreads */
563 cpumask_var_t conn_cpumask;
564 int conn_rx_reset_cpumask:1;
565 int conn_tx_reset_cpumask:1;
566 /* list_head of struct iscsi_cmd for this connection */
567 struct list_head conn_cmd_list;
568 struct list_head immed_queue_list;
569 struct list_head response_queue_list;
570 struct iscsi_conn_ops *conn_ops;
571 struct iscsi_param_list *param_list;
572 /* Used for per connection auth state machine */
573 void *auth_protocol;
574 struct iscsi_login_thread_s *login_thread;
575 struct iscsi_portal_group *tpg;
576 /* Pointer to parent session */
577 struct iscsi_session *sess;
578 /* Pointer to thread_set in use for this conn's threads */
579 struct iscsi_thread_set *thread_set;
580 /* list_head for session connection list */
581 struct list_head conn_list;
582} ____cacheline_aligned;
583
584struct iscsi_conn_recovery {
585 u16 cid;
586 u32 cmd_count;
587 u32 maxrecvdatasegmentlength;
588 int ready_for_reallegiance;
589 struct list_head conn_recovery_cmd_list;
590 spinlock_t conn_recovery_cmd_lock;
591 struct timer_list time2retain_timer;
592 struct iscsi_session *sess;
593 struct list_head cr_list;
594} ____cacheline_aligned;
595
596struct iscsi_session {
597 u8 initiator_vendor;
598 u8 isid[6];
599 enum iscsi_timer_flags_table time2retain_timer_flags;
600 u8 version_active;
601 u16 cid_called;
602 u16 conn_recovery_count;
603 u16 tsih;
604 /* state session is currently in */
605 u32 session_state;
606 /* session wide counter: initiator assigned task tag */
607 u32 init_task_tag;
608 /* session wide counter: target assigned task tag */
609 u32 targ_xfer_tag;
610 u32 cmdsn_window;
611
612 /* protects cmdsn values */
613 struct mutex cmdsn_mutex;
614 /* session wide counter: expected command sequence number */
615 u32 exp_cmd_sn;
616 /* session wide counter: maximum allowed command sequence number */
617 u32 max_cmd_sn;
618 struct list_head sess_ooo_cmdsn_list;
619
620 /* LIO specific session ID */
621 u32 sid;
622 char auth_type[8];
623 /* unique within the target */
624 int session_index;
625 /* Used for session reference counting */
626 int session_usage_count;
627 int session_waiting_on_uc;
628 u32 cmd_pdus;
629 u32 rsp_pdus;
630 u64 tx_data_octets;
631 u64 rx_data_octets;
632 u32 conn_digest_errors;
633 u32 conn_timeout_errors;
634 u64 creation_time;
635 spinlock_t session_stats_lock;
636 /* Number of active connections */
637 atomic_t nconn;
638 atomic_t session_continuation;
639 atomic_t session_fall_back_to_erl0;
640 atomic_t session_logout;
641 atomic_t session_reinstatement;
642 atomic_t session_stop_active;
643 atomic_t sleep_on_sess_wait_comp;
644 atomic_t transport_wait_cmds;
645 /* connection list */
646 struct list_head sess_conn_list;
647 struct list_head cr_active_list;
648 struct list_head cr_inactive_list;
649 spinlock_t conn_lock;
650 spinlock_t cr_a_lock;
651 spinlock_t cr_i_lock;
652 spinlock_t session_usage_lock;
653 spinlock_t ttt_lock;
654 struct completion async_msg_comp;
655 struct completion reinstatement_comp;
656 struct completion session_wait_comp;
657 struct completion session_waiting_on_uc_comp;
658 struct timer_list time2retain_timer;
659 struct iscsi_sess_ops *sess_ops;
660 struct se_session *se_sess;
661 struct iscsi_portal_group *tpg;
662} ____cacheline_aligned;
663
664struct iscsi_login {
665 u8 auth_complete;
666 u8 checked_for_existing;
667 u8 current_stage;
668 u8 leading_connection;
669 u8 first_request;
670 u8 version_min;
671 u8 version_max;
672 char isid[6];
673 u32 cmd_sn;
674 u32 init_task_tag;
675 u32 initial_exp_statsn;
676 u32 rsp_length;
677 u16 cid;
678 u16 tsih;
679 char *req;
680 char *rsp;
681 char *req_buf;
682 char *rsp_buf;
683} ____cacheline_aligned;
684
685struct iscsi_node_attrib {
686 u32 dataout_timeout;
687 u32 dataout_timeout_retries;
688 u32 default_erl;
689 u32 nopin_timeout;
690 u32 nopin_response_timeout;
691 u32 random_datain_pdu_offsets;
692 u32 random_datain_seq_offsets;
693 u32 random_r2t_offsets;
694 u32 tmr_cold_reset;
695 u32 tmr_warm_reset;
696 struct iscsi_node_acl *nacl;
697};
698
699struct se_dev_entry_s;
700
701struct iscsi_node_auth {
702 enum naf_flags_table naf_flags;
703 int authenticate_target;
704 /* Used for iscsit_global->discovery_auth,
705 * set to zero (auth disabled) by default */
706 int enforce_discovery_auth;
707#define MAX_USER_LEN 256
708#define MAX_PASS_LEN 256
709 char userid[MAX_USER_LEN];
710 char password[MAX_PASS_LEN];
711 char userid_mutual[MAX_USER_LEN];
712 char password_mutual[MAX_PASS_LEN];
713};
714
715#include "iscsi_target_stat.h"
716
717struct iscsi_node_stat_grps {
718 struct config_group iscsi_sess_stats_group;
719 struct config_group iscsi_conn_stats_group;
720};
721
722struct iscsi_node_acl {
723 struct iscsi_node_attrib node_attrib;
724 struct iscsi_node_auth node_auth;
725 struct iscsi_node_stat_grps node_stat_grps;
726 struct se_node_acl se_node_acl;
727};
728
729#define NODE_STAT_GRPS(nacl) (&(nacl)->node_stat_grps)
730
731#define ISCSI_NODE_ATTRIB(t) (&(t)->node_attrib)
732#define ISCSI_NODE_AUTH(t) (&(t)->node_auth)
733
734struct iscsi_tpg_attrib {
735 u32 authentication;
736 u32 login_timeout;
737 u32 netif_timeout;
738 u32 generate_node_acls;
739 u32 cache_dynamic_acls;
740 u32 default_cmdsn_depth;
741 u32 demo_mode_write_protect;
742 u32 prod_mode_write_protect;
743 struct iscsi_portal_group *tpg;
744};
745
746struct iscsi_np {
747 int np_network_transport;
748 int np_ip_proto;
749 int np_sock_type;
750 enum np_thread_state_table np_thread_state;
751 enum iscsi_timer_flags_table np_login_timer_flags;
752 u32 np_exports;
753 enum np_flags_table np_flags;
754 unsigned char np_ip[IPV6_ADDRESS_SPACE];
755 u16 np_port;
756 spinlock_t np_thread_lock;
757 struct completion np_restart_comp;
758 struct socket *np_socket;
759 struct __kernel_sockaddr_storage np_sockaddr;
760 struct task_struct *np_thread;
761 struct timer_list np_login_timer;
762 struct iscsi_portal_group *np_login_tpg;
763 struct list_head np_list;
764} ____cacheline_aligned;
765
766struct iscsi_tpg_np {
767 struct iscsi_np *tpg_np;
768 struct iscsi_portal_group *tpg;
769 struct iscsi_tpg_np *tpg_np_parent;
770 struct list_head tpg_np_list;
771 struct list_head tpg_np_child_list;
772 struct list_head tpg_np_parent_list;
773 struct se_tpg_np se_tpg_np;
774 spinlock_t tpg_np_parent_lock;
775};
776
777struct iscsi_portal_group {
778 unsigned char tpg_chap_id;
779 /* TPG State */
780 enum tpg_state_table tpg_state;
781 /* Target Portal Group Tag */
782 u16 tpgt;
783 /* Id assigned to target sessions */
784 u16 ntsih;
785 /* Number of active sessions */
786 u32 nsessions;
787 /* Number of Network Portals available for this TPG */
788 u32 num_tpg_nps;
789 /* Per TPG LIO specific session ID. */
790 u32 sid;
791 /* Spinlock for adding/removing Network Portals */
792 spinlock_t tpg_np_lock;
793 spinlock_t tpg_state_lock;
794 struct se_portal_group tpg_se_tpg;
795 struct mutex tpg_access_lock;
796 struct mutex np_login_lock;
797 struct iscsi_tpg_attrib tpg_attrib;
798 /* Pointer to default list of iSCSI parameters for TPG */
799 struct iscsi_param_list *param_list;
800 struct iscsi_tiqn *tpg_tiqn;
801 struct list_head tpg_gnp_list;
802 struct list_head tpg_list;
803} ____cacheline_aligned;
804
805#define ISCSI_TPG_C(c) ((struct iscsi_portal_group *)(c)->tpg)
806#define ISCSI_TPG_LUN(c, l) ((iscsi_tpg_list_t *)(c)->tpg->tpg_lun_list_t[l])
807#define ISCSI_TPG_S(s) ((struct iscsi_portal_group *)(s)->tpg)
808#define ISCSI_TPG_ATTRIB(t) (&(t)->tpg_attrib)
809#define SE_TPG(tpg) (&(tpg)->tpg_se_tpg)
810
811struct iscsi_wwn_stat_grps {
812 struct config_group iscsi_stat_group;
813 struct config_group iscsi_instance_group;
814 struct config_group iscsi_sess_err_group;
815 struct config_group iscsi_tgt_attr_group;
816 struct config_group iscsi_login_stats_group;
817 struct config_group iscsi_logout_stats_group;
818};
819
820struct iscsi_tiqn {
821#define ISCSI_IQN_LEN 224
822 unsigned char tiqn[ISCSI_IQN_LEN];
823 enum tiqn_state_table tiqn_state;
824 int tiqn_access_count;
825 u32 tiqn_active_tpgs;
826 u32 tiqn_ntpgs;
827 u32 tiqn_num_tpg_nps;
828 u32 tiqn_nsessions;
829 struct list_head tiqn_list;
830 struct list_head tiqn_tpg_list;
831 spinlock_t tiqn_state_lock;
832 spinlock_t tiqn_tpg_lock;
833 struct se_wwn tiqn_wwn;
834 struct iscsi_wwn_stat_grps tiqn_stat_grps;
835 int tiqn_index;
836 struct iscsi_sess_err_stats sess_err_stats;
837 struct iscsi_login_stats login_stats;
838 struct iscsi_logout_stats logout_stats;
839} ____cacheline_aligned;
840
841#define WWN_STAT_GRPS(tiqn) (&(tiqn)->tiqn_stat_grps)
842
843struct iscsit_global {
844 /* In core shutdown */
845 u32 in_shutdown;
846 u32 active_ts;
847 /* Unique identifier used for the authentication daemon */
848 u32 auth_id;
849 u32 inactive_ts;
850 /* Thread Set bitmap count */
851 int ts_bitmap_count;
852 /* Thread Set bitmap pointer */
853 unsigned long *ts_bitmap;
854 /* Used for iSCSI discovery session authentication */
855 struct iscsi_node_acl discovery_acl;
856 struct iscsi_portal_group *discovery_tpg;
857};
858
859#endif /* ISCSI_TARGET_CORE_H */
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c
new file mode 100644
index 000000000000..8c0495129513
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_datain_values.c
@@ -0,0 +1,531 @@
1/*******************************************************************************
2 * This file contains the iSCSI Target DataIN value generation functions.
3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 ******************************************************************************/
20
21#include <scsi/iscsi_proto.h>
22
23#include "iscsi_target_core.h"
24#include "iscsi_target_seq_pdu_list.h"
25#include "iscsi_target_erl1.h"
26#include "iscsi_target_util.h"
27#include "iscsi_target.h"
28#include "iscsi_target_datain_values.h"
29
30struct iscsi_datain_req *iscsit_allocate_datain_req(void)
31{
32 struct iscsi_datain_req *dr;
33
34 dr = kmem_cache_zalloc(lio_dr_cache, GFP_ATOMIC);
35 if (!dr) {
36 pr_err("Unable to allocate memory for"
37 " struct iscsi_datain_req\n");
38 return NULL;
39 }
40 INIT_LIST_HEAD(&dr->dr_list);
41
42 return dr;
43}
44
45void iscsit_attach_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
46{
47 spin_lock(&cmd->datain_lock);
48 list_add_tail(&dr->dr_list, &cmd->datain_list);
49 spin_unlock(&cmd->datain_lock);
50}
51
52void iscsit_free_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
53{
54 spin_lock(&cmd->datain_lock);
55 list_del(&dr->dr_list);
56 spin_unlock(&cmd->datain_lock);
57
58 kmem_cache_free(lio_dr_cache, dr);
59}
60
61void iscsit_free_all_datain_reqs(struct iscsi_cmd *cmd)
62{
63 struct iscsi_datain_req *dr, *dr_tmp;
64
65 spin_lock(&cmd->datain_lock);
66 list_for_each_entry_safe(dr, dr_tmp, &cmd->datain_list, dr_list) {
67 list_del(&dr->dr_list);
68 kmem_cache_free(lio_dr_cache, dr);
69 }
70 spin_unlock(&cmd->datain_lock);
71}
72
73struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *cmd)
74{
75 struct iscsi_datain_req *dr;
76
77 if (list_empty(&cmd->datain_list)) {
78 pr_err("cmd->datain_list is empty for ITT:"
79 " 0x%08x\n", cmd->init_task_tag);
80 return NULL;
81 }
82 list_for_each_entry(dr, &cmd->datain_list, dr_list)
83 break;
84
85 return dr;
86}
87
88/*
89 * For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=Yes.
90 */
91static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_yes(
92 struct iscsi_cmd *cmd,
93 struct iscsi_datain *datain)
94{
95 u32 next_burst_len, read_data_done, read_data_left;
96 struct iscsi_conn *conn = cmd->conn;
97 struct iscsi_datain_req *dr;
98
99 dr = iscsit_get_datain_req(cmd);
100 if (!dr)
101 return NULL;
102
103 if (dr->recovery && dr->generate_recovery_values) {
104 if (iscsit_create_recovery_datain_values_datasequenceinorder_yes(
105 cmd, dr) < 0)
106 return NULL;
107
108 dr->generate_recovery_values = 0;
109 }
110
111 next_burst_len = (!dr->recovery) ?
112 cmd->next_burst_len : dr->next_burst_len;
113 read_data_done = (!dr->recovery) ?
114 cmd->read_data_done : dr->read_data_done;
115
116 read_data_left = (cmd->data_length - read_data_done);
117 if (!read_data_left) {
118 pr_err("ITT: 0x%08x read_data_left is zero!\n",
119 cmd->init_task_tag);
120 return NULL;
121 }
122
123 if ((read_data_left <= conn->conn_ops->MaxRecvDataSegmentLength) &&
124 (read_data_left <= (conn->sess->sess_ops->MaxBurstLength -
125 next_burst_len))) {
126 datain->length = read_data_left;
127
128 datain->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS);
129 if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
130 datain->flags |= ISCSI_FLAG_DATA_ACK;
131 } else {
132 if ((next_burst_len +
133 conn->conn_ops->MaxRecvDataSegmentLength) <
134 conn->sess->sess_ops->MaxBurstLength) {
135 datain->length =
136 conn->conn_ops->MaxRecvDataSegmentLength;
137 next_burst_len += datain->length;
138 } else {
139 datain->length = (conn->sess->sess_ops->MaxBurstLength -
140 next_burst_len);
141 next_burst_len = 0;
142
143 datain->flags |= ISCSI_FLAG_CMD_FINAL;
144 if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
145 datain->flags |= ISCSI_FLAG_DATA_ACK;
146 }
147 }
148
149 datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
150 datain->offset = read_data_done;
151
152 if (!dr->recovery) {
153 cmd->next_burst_len = next_burst_len;
154 cmd->read_data_done += datain->length;
155 } else {
156 dr->next_burst_len = next_burst_len;
157 dr->read_data_done += datain->length;
158 }
159
160 if (!dr->recovery) {
161 if (datain->flags & ISCSI_FLAG_DATA_STATUS)
162 dr->dr_complete = DATAIN_COMPLETE_NORMAL;
163
164 return dr;
165 }
166
167 if (!dr->runlength) {
168 if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
169 dr->dr_complete =
170 (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
171 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
172 DATAIN_COMPLETE_CONNECTION_RECOVERY;
173 }
174 } else {
175 if ((dr->begrun + dr->runlength) == dr->data_sn) {
176 dr->dr_complete =
177 (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
178 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
179 DATAIN_COMPLETE_CONNECTION_RECOVERY;
180 }
181 }
182
183 return dr;
184}
185
186/*
187 * For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=Yes.
188 */
189static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes(
190 struct iscsi_cmd *cmd,
191 struct iscsi_datain *datain)
192{
193 u32 offset, read_data_done, read_data_left, seq_send_order;
194 struct iscsi_conn *conn = cmd->conn;
195 struct iscsi_datain_req *dr;
196 struct iscsi_seq *seq;
197
198 dr = iscsit_get_datain_req(cmd);
199 if (!dr)
200 return NULL;
201
202 if (dr->recovery && dr->generate_recovery_values) {
203 if (iscsit_create_recovery_datain_values_datasequenceinorder_no(
204 cmd, dr) < 0)
205 return NULL;
206
207 dr->generate_recovery_values = 0;
208 }
209
210 read_data_done = (!dr->recovery) ?
211 cmd->read_data_done : dr->read_data_done;
212 seq_send_order = (!dr->recovery) ?
213 cmd->seq_send_order : dr->seq_send_order;
214
215 read_data_left = (cmd->data_length - read_data_done);
216 if (!read_data_left) {
217 pr_err("ITT: 0x%08x read_data_left is zero!\n",
218 cmd->init_task_tag);
219 return NULL;
220 }
221
222 seq = iscsit_get_seq_holder_for_datain(cmd, seq_send_order);
223 if (!seq)
224 return NULL;
225
226 seq->sent = 1;
227
228 if (!dr->recovery && !seq->next_burst_len)
229 seq->first_datasn = cmd->data_sn;
230
231 offset = (seq->offset + seq->next_burst_len);
232
233 if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
234 cmd->data_length) {
235 datain->length = (cmd->data_length - offset);
236 datain->offset = offset;
237
238 datain->flags |= ISCSI_FLAG_CMD_FINAL;
239 if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
240 datain->flags |= ISCSI_FLAG_DATA_ACK;
241
242 seq->next_burst_len = 0;
243 seq_send_order++;
244 } else {
245 if ((seq->next_burst_len +
246 conn->conn_ops->MaxRecvDataSegmentLength) <
247 conn->sess->sess_ops->MaxBurstLength) {
248 datain->length =
249 conn->conn_ops->MaxRecvDataSegmentLength;
250 datain->offset = (seq->offset + seq->next_burst_len);
251
252 seq->next_burst_len += datain->length;
253 } else {
254 datain->length = (conn->sess->sess_ops->MaxBurstLength -
255 seq->next_burst_len);
256 datain->offset = (seq->offset + seq->next_burst_len);
257
258 datain->flags |= ISCSI_FLAG_CMD_FINAL;
259 if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
260 datain->flags |= ISCSI_FLAG_DATA_ACK;
261
262 seq->next_burst_len = 0;
263 seq_send_order++;
264 }
265 }
266
267 if ((read_data_done + datain->length) == cmd->data_length)
268 datain->flags |= ISCSI_FLAG_DATA_STATUS;
269
270 datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
271 if (!dr->recovery) {
272 cmd->seq_send_order = seq_send_order;
273 cmd->read_data_done += datain->length;
274 } else {
275 dr->seq_send_order = seq_send_order;
276 dr->read_data_done += datain->length;
277 }
278
279 if (!dr->recovery) {
280 if (datain->flags & ISCSI_FLAG_CMD_FINAL)
281 seq->last_datasn = datain->data_sn;
282 if (datain->flags & ISCSI_FLAG_DATA_STATUS)
283 dr->dr_complete = DATAIN_COMPLETE_NORMAL;
284
285 return dr;
286 }
287
288 if (!dr->runlength) {
289 if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
290 dr->dr_complete =
291 (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
292 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
293 DATAIN_COMPLETE_CONNECTION_RECOVERY;
294 }
295 } else {
296 if ((dr->begrun + dr->runlength) == dr->data_sn) {
297 dr->dr_complete =
298 (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
299 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
300 DATAIN_COMPLETE_CONNECTION_RECOVERY;
301 }
302 }
303
304 return dr;
305}
306
307/*
308 * For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=No.
309 */
310static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_no(
311 struct iscsi_cmd *cmd,
312 struct iscsi_datain *datain)
313{
314 u32 next_burst_len, read_data_done, read_data_left;
315 struct iscsi_conn *conn = cmd->conn;
316 struct iscsi_datain_req *dr;
317 struct iscsi_pdu *pdu;
318
319 dr = iscsit_get_datain_req(cmd);
320 if (!dr)
321 return NULL;
322
323 if (dr->recovery && dr->generate_recovery_values) {
324 if (iscsit_create_recovery_datain_values_datasequenceinorder_yes(
325 cmd, dr) < 0)
326 return NULL;
327
328 dr->generate_recovery_values = 0;
329 }
330
331 next_burst_len = (!dr->recovery) ?
332 cmd->next_burst_len : dr->next_burst_len;
333 read_data_done = (!dr->recovery) ?
334 cmd->read_data_done : dr->read_data_done;
335
336 read_data_left = (cmd->data_length - read_data_done);
337 if (!read_data_left) {
338 pr_err("ITT: 0x%08x read_data_left is zero!\n",
339 cmd->init_task_tag);
340 return dr;
341 }
342
343 pdu = iscsit_get_pdu_holder_for_seq(cmd, NULL);
344 if (!pdu)
345 return dr;
346
347 if ((read_data_done + pdu->length) == cmd->data_length) {
348 pdu->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS);
349 if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
350 pdu->flags |= ISCSI_FLAG_DATA_ACK;
351
352 next_burst_len = 0;
353 } else {
354 if ((next_burst_len + conn->conn_ops->MaxRecvDataSegmentLength) <
355 conn->sess->sess_ops->MaxBurstLength)
356 next_burst_len += pdu->length;
357 else {
358 pdu->flags |= ISCSI_FLAG_CMD_FINAL;
359 if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
360 pdu->flags |= ISCSI_FLAG_DATA_ACK;
361
362 next_burst_len = 0;
363 }
364 }
365
366 pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
367 if (!dr->recovery) {
368 cmd->next_burst_len = next_burst_len;
369 cmd->read_data_done += pdu->length;
370 } else {
371 dr->next_burst_len = next_burst_len;
372 dr->read_data_done += pdu->length;
373 }
374
375 datain->flags = pdu->flags;
376 datain->length = pdu->length;
377 datain->offset = pdu->offset;
378 datain->data_sn = pdu->data_sn;
379
380 if (!dr->recovery) {
381 if (datain->flags & ISCSI_FLAG_DATA_STATUS)
382 dr->dr_complete = DATAIN_COMPLETE_NORMAL;
383
384 return dr;
385 }
386
387 if (!dr->runlength) {
388 if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
389 dr->dr_complete =
390 (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
391 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
392 DATAIN_COMPLETE_CONNECTION_RECOVERY;
393 }
394 } else {
395 if ((dr->begrun + dr->runlength) == dr->data_sn) {
396 dr->dr_complete =
397 (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
398 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
399 DATAIN_COMPLETE_CONNECTION_RECOVERY;
400 }
401 }
402
403 return dr;
404}
405
406/*
407 * For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=No.
408 */
409static struct iscsi_datain_req *iscsit_set_datain_values_no_and_no(
410 struct iscsi_cmd *cmd,
411 struct iscsi_datain *datain)
412{
413 u32 read_data_done, read_data_left, seq_send_order;
414 struct iscsi_conn *conn = cmd->conn;
415 struct iscsi_datain_req *dr;
416 struct iscsi_pdu *pdu;
417 struct iscsi_seq *seq = NULL;
418
419 dr = iscsit_get_datain_req(cmd);
420 if (!dr)
421 return NULL;
422
423 if (dr->recovery && dr->generate_recovery_values) {
424 if (iscsit_create_recovery_datain_values_datasequenceinorder_no(
425 cmd, dr) < 0)
426 return NULL;
427
428 dr->generate_recovery_values = 0;
429 }
430
431 read_data_done = (!dr->recovery) ?
432 cmd->read_data_done : dr->read_data_done;
433 seq_send_order = (!dr->recovery) ?
434 cmd->seq_send_order : dr->seq_send_order;
435
436 read_data_left = (cmd->data_length - read_data_done);
437 if (!read_data_left) {
438 pr_err("ITT: 0x%08x read_data_left is zero!\n",
439 cmd->init_task_tag);
440 return NULL;
441 }
442
443 seq = iscsit_get_seq_holder_for_datain(cmd, seq_send_order);
444 if (!seq)
445 return NULL;
446
447 seq->sent = 1;
448
449 if (!dr->recovery && !seq->next_burst_len)
450 seq->first_datasn = cmd->data_sn;
451
452 pdu = iscsit_get_pdu_holder_for_seq(cmd, seq);
453 if (!pdu)
454 return NULL;
455
456 if (seq->pdu_send_order == seq->pdu_count) {
457 pdu->flags |= ISCSI_FLAG_CMD_FINAL;
458 if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
459 pdu->flags |= ISCSI_FLAG_DATA_ACK;
460
461 seq->next_burst_len = 0;
462 seq_send_order++;
463 } else
464 seq->next_burst_len += pdu->length;
465
466 if ((read_data_done + pdu->length) == cmd->data_length)
467 pdu->flags |= ISCSI_FLAG_DATA_STATUS;
468
469 pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
470 if (!dr->recovery) {
471 cmd->seq_send_order = seq_send_order;
472 cmd->read_data_done += pdu->length;
473 } else {
474 dr->seq_send_order = seq_send_order;
475 dr->read_data_done += pdu->length;
476 }
477
478 datain->flags = pdu->flags;
479 datain->length = pdu->length;
480 datain->offset = pdu->offset;
481 datain->data_sn = pdu->data_sn;
482
483 if (!dr->recovery) {
484 if (datain->flags & ISCSI_FLAG_CMD_FINAL)
485 seq->last_datasn = datain->data_sn;
486 if (datain->flags & ISCSI_FLAG_DATA_STATUS)
487 dr->dr_complete = DATAIN_COMPLETE_NORMAL;
488
489 return dr;
490 }
491
492 if (!dr->runlength) {
493 if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
494 dr->dr_complete =
495 (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
496 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
497 DATAIN_COMPLETE_CONNECTION_RECOVERY;
498 }
499 } else {
500 if ((dr->begrun + dr->runlength) == dr->data_sn) {
501 dr->dr_complete =
502 (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
503 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
504 DATAIN_COMPLETE_CONNECTION_RECOVERY;
505 }
506 }
507
508 return dr;
509}
510
511struct iscsi_datain_req *iscsit_get_datain_values(
512 struct iscsi_cmd *cmd,
513 struct iscsi_datain *datain)
514{
515 struct iscsi_conn *conn = cmd->conn;
516
517 if (conn->sess->sess_ops->DataSequenceInOrder &&
518 conn->sess->sess_ops->DataPDUInOrder)
519 return iscsit_set_datain_values_yes_and_yes(cmd, datain);
520 else if (!conn->sess->sess_ops->DataSequenceInOrder &&
521 conn->sess->sess_ops->DataPDUInOrder)
522 return iscsit_set_datain_values_no_and_yes(cmd, datain);
523 else if (conn->sess->sess_ops->DataSequenceInOrder &&
524 !conn->sess->sess_ops->DataPDUInOrder)
525 return iscsit_set_datain_values_yes_and_no(cmd, datain);
526 else if (!conn->sess->sess_ops->DataSequenceInOrder &&
527 !conn->sess->sess_ops->DataPDUInOrder)
528 return iscsit_set_datain_values_no_and_no(cmd, datain);
529
530 return NULL;
531}
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.h b/drivers/target/iscsi/iscsi_target_datain_values.h
new file mode 100644
index 000000000000..646429ac5a02
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_datain_values.h
@@ -0,0 +1,12 @@
1#ifndef ISCSI_TARGET_DATAIN_VALUES_H
2#define ISCSI_TARGET_DATAIN_VALUES_H
3
4extern struct iscsi_datain_req *iscsit_allocate_datain_req(void);
5extern void iscsit_attach_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
6extern void iscsit_free_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
7extern void iscsit_free_all_datain_reqs(struct iscsi_cmd *);
8extern struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *);
9extern struct iscsi_datain_req *iscsit_get_datain_values(struct iscsi_cmd *,
10 struct iscsi_datain *);
11
12#endif /*** ISCSI_TARGET_DATAIN_VALUES_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
new file mode 100644
index 000000000000..a19fa5eea88e
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -0,0 +1,87 @@
1/*******************************************************************************
2 * This file contains the iSCSI Virtual Device and Disk Transport
3 * agnostic related functions.
4 *
5 \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 *
9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 ******************************************************************************/
21
22#include <scsi/scsi_device.h>
23#include <target/target_core_base.h>
24#include <target/target_core_device.h>
25#include <target/target_core_transport.h>
26
27#include "iscsi_target_core.h"
28#include "iscsi_target_device.h"
29#include "iscsi_target_tpg.h"
30#include "iscsi_target_util.h"
31
32int iscsit_get_lun_for_tmr(
33 struct iscsi_cmd *cmd,
34 u64 lun)
35{
36 u32 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
37
38 return transport_lookup_tmr_lun(&cmd->se_cmd, unpacked_lun);
39}
40
41int iscsit_get_lun_for_cmd(
42 struct iscsi_cmd *cmd,
43 unsigned char *cdb,
44 u64 lun)
45{
46 u32 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
47
48 return transport_lookup_cmd_lun(&cmd->se_cmd, unpacked_lun);
49}
50
51void iscsit_determine_maxcmdsn(struct iscsi_session *sess)
52{
53 struct se_node_acl *se_nacl;
54
55 /*
56 * This is a discovery session, the single queue slot was already
57 * assigned in iscsi_login_zero_tsih(). Since only Logout and
58 * Text Opcodes are allowed during discovery we do not have to worry
59 * about the HBA's queue depth here.
60 */
61 if (sess->sess_ops->SessionType)
62 return;
63
64 se_nacl = sess->se_sess->se_node_acl;
65
66 /*
67 * This is a normal session, set the Session's CmdSN window to the
68 * struct se_node_acl->queue_depth. The value in struct se_node_acl->queue_depth
69 * has already been validated as a legal value in
70 * core_set_queue_depth_for_node().
71 */
72 sess->cmdsn_window = se_nacl->queue_depth;
73 sess->max_cmd_sn = (sess->max_cmd_sn + se_nacl->queue_depth) - 1;
74}
75
76void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess)
77{
78 if (cmd->immediate_cmd || cmd->maxcmdsn_inc)
79 return;
80
81 cmd->maxcmdsn_inc = 1;
82
83 mutex_lock(&sess->cmdsn_mutex);
84 sess->max_cmd_sn += 1;
85 pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
86 mutex_unlock(&sess->cmdsn_mutex);
87}
diff --git a/drivers/target/iscsi/iscsi_target_device.h b/drivers/target/iscsi/iscsi_target_device.h
new file mode 100644
index 000000000000..bef1cada15f8
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_device.h
@@ -0,0 +1,9 @@
1#ifndef ISCSI_TARGET_DEVICE_H
2#define ISCSI_TARGET_DEVICE_H
3
4extern int iscsit_get_lun_for_tmr(struct iscsi_cmd *, u64);
5extern int iscsit_get_lun_for_cmd(struct iscsi_cmd *, unsigned char *, u64);
6extern void iscsit_determine_maxcmdsn(struct iscsi_session *);
7extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
8
9#endif /* ISCSI_TARGET_DEVICE_H */
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
new file mode 100644
index 000000000000..b7ffc3cd40cc
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -0,0 +1,1004 @@
1/******************************************************************************
2 * This file contains error recovery level zero functions used by
3 * the iSCSI Target driver.
4 *
5 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 *
9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 ******************************************************************************/
21
22#include <scsi/iscsi_proto.h>
23#include <target/target_core_base.h>
24#include <target/target_core_transport.h>
25
26#include "iscsi_target_core.h"
27#include "iscsi_target_seq_pdu_list.h"
28#include "iscsi_target_tq.h"
29#include "iscsi_target_erl0.h"
30#include "iscsi_target_erl1.h"
31#include "iscsi_target_erl2.h"
32#include "iscsi_target_util.h"
33#include "iscsi_target.h"
34
35/*
36 * Used to set values in struct iscsi_cmd that iscsit_dataout_check_sequence()
37 * checks against to determine a PDU's Offset+Length is within the current
38 * DataOUT Sequence. Used for DataSequenceInOrder=Yes only.
39 */
40void iscsit_set_dataout_sequence_values(
41 struct iscsi_cmd *cmd)
42{
43 struct iscsi_conn *conn = cmd->conn;
44 /*
45 * Still set seq_start_offset and seq_end_offset for Unsolicited
46 * DataOUT, even if DataSequenceInOrder=No.
47 */
48 if (cmd->unsolicited_data) {
49 cmd->seq_start_offset = cmd->write_data_done;
50 cmd->seq_end_offset = (cmd->write_data_done +
51 (cmd->data_length >
52 conn->sess->sess_ops->FirstBurstLength) ?
53 conn->sess->sess_ops->FirstBurstLength : cmd->data_length);
54 return;
55 }
56
57 if (!conn->sess->sess_ops->DataSequenceInOrder)
58 return;
59
60 if (!cmd->seq_start_offset && !cmd->seq_end_offset) {
61 cmd->seq_start_offset = cmd->write_data_done;
62 cmd->seq_end_offset = (cmd->data_length >
63 conn->sess->sess_ops->MaxBurstLength) ?
64 (cmd->write_data_done +
65 conn->sess->sess_ops->MaxBurstLength) : cmd->data_length;
66 } else {
67 cmd->seq_start_offset = cmd->seq_end_offset;
68 cmd->seq_end_offset = ((cmd->seq_end_offset +
69 conn->sess->sess_ops->MaxBurstLength) >=
70 cmd->data_length) ? cmd->data_length :
71 (cmd->seq_end_offset +
72 conn->sess->sess_ops->MaxBurstLength);
73 }
74}
75
76static int iscsit_dataout_within_command_recovery_check(
77 struct iscsi_cmd *cmd,
78 unsigned char *buf)
79{
80 struct iscsi_conn *conn = cmd->conn;
81 struct iscsi_data *hdr = (struct iscsi_data *) buf;
82 u32 payload_length = ntoh24(hdr->dlength);
83
84 /*
85 * We do the within-command recovery checks here as it is
86 * the first function called in iscsi_check_pre_dataout().
87 * Basically, if we are in within-command recovery and
88 * the PDU does not contain the offset the sequence needs,
89 * dump the payload.
90 *
91 * This only applies to DataPDUInOrder=Yes, for
92 * DataPDUInOrder=No we only re-request the failed PDU
93 * and check that all PDUs in a sequence are received
94 * upon end of sequence.
95 */
96 if (conn->sess->sess_ops->DataSequenceInOrder) {
97 if ((cmd->cmd_flags & ICF_WITHIN_COMMAND_RECOVERY) &&
98 (cmd->write_data_done != hdr->offset))
99 goto dump;
100
101 cmd->cmd_flags &= ~ICF_WITHIN_COMMAND_RECOVERY;
102 } else {
103 struct iscsi_seq *seq;
104
105 seq = iscsit_get_seq_holder(cmd, hdr->offset, payload_length);
106 if (!seq)
107 return DATAOUT_CANNOT_RECOVER;
108 /*
109 * Set the struct iscsi_seq pointer to reuse later.
110 */
111 cmd->seq_ptr = seq;
112
113 if (conn->sess->sess_ops->DataPDUInOrder) {
114 if ((seq->status ==
115 DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY) &&
116 ((seq->offset != hdr->offset) ||
117 (seq->data_sn != hdr->datasn)))
118 goto dump;
119 } else {
120 if ((seq->status ==
121 DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY) &&
122 (seq->data_sn != hdr->datasn))
123 goto dump;
124 }
125
126 if (seq->status == DATAOUT_SEQUENCE_COMPLETE)
127 goto dump;
128
129 if (seq->status != DATAOUT_SEQUENCE_COMPLETE)
130 seq->status = 0;
131 }
132
133 return DATAOUT_NORMAL;
134
135dump:
136 pr_err("Dumping DataOUT PDU Offset: %u Length: %d DataSN:"
137 " 0x%08x\n", hdr->offset, payload_length, hdr->datasn);
138 return iscsit_dump_data_payload(conn, payload_length, 1);
139}
140
141static int iscsit_dataout_check_unsolicited_sequence(
142 struct iscsi_cmd *cmd,
143 unsigned char *buf)
144{
145 u32 first_burst_len;
146 struct iscsi_conn *conn = cmd->conn;
147 struct iscsi_data *hdr = (struct iscsi_data *) buf;
148 u32 payload_length = ntoh24(hdr->dlength);
149
150
151 if ((hdr->offset < cmd->seq_start_offset) ||
152 ((hdr->offset + payload_length) > cmd->seq_end_offset)) {
153 pr_err("Command ITT: 0x%08x with Offset: %u,"
154 " Length: %u outside of Unsolicited Sequence %u:%u while"
155 " DataSequenceInOrder=Yes.\n", cmd->init_task_tag,
156 hdr->offset, payload_length, cmd->seq_start_offset,
157 cmd->seq_end_offset);
158 return DATAOUT_CANNOT_RECOVER;
159 }
160
161 first_burst_len = (cmd->first_burst_len + payload_length);
162
163 if (first_burst_len > conn->sess->sess_ops->FirstBurstLength) {
164 pr_err("Total %u bytes exceeds FirstBurstLength: %u"
165 " for this Unsolicited DataOut Burst.\n",
166 first_burst_len, conn->sess->sess_ops->FirstBurstLength);
167 transport_send_check_condition_and_sense(&cmd->se_cmd,
168 TCM_INCORRECT_AMOUNT_OF_DATA, 0);
169 return DATAOUT_CANNOT_RECOVER;
170 }
171
172 /*
173 * Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity
174 * checks for the current Unsolicited DataOUT Sequence.
175 */
176 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) {
177 /*
178 * Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of
179 * sequence checks are handled in
180 * iscsit_dataout_datapduinorder_no_fbit().
181 */
182 if (!conn->sess->sess_ops->DataPDUInOrder)
183 goto out;
184
185 if ((first_burst_len != cmd->data_length) &&
186 (first_burst_len != conn->sess->sess_ops->FirstBurstLength)) {
187 pr_err("Unsolicited non-immediate data"
188 " received %u does not equal FirstBurstLength: %u, and"
189 " does not equal ExpXferLen %u.\n", first_burst_len,
190 conn->sess->sess_ops->FirstBurstLength,
191 cmd->data_length);
192 transport_send_check_condition_and_sense(&cmd->se_cmd,
193 TCM_INCORRECT_AMOUNT_OF_DATA, 0);
194 return DATAOUT_CANNOT_RECOVER;
195 }
196 } else {
197 if (first_burst_len == conn->sess->sess_ops->FirstBurstLength) {
198 pr_err("Command ITT: 0x%08x reached"
199 " FirstBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol"
200 " error.\n", cmd->init_task_tag,
201 conn->sess->sess_ops->FirstBurstLength);
202 return DATAOUT_CANNOT_RECOVER;
203 }
204 if (first_burst_len == cmd->data_length) {
205 pr_err("Command ITT: 0x%08x reached"
206 " ExpXferLen: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol"
207 " error.\n", cmd->init_task_tag, cmd->data_length);
208 return DATAOUT_CANNOT_RECOVER;
209 }
210 }
211
212out:
213 return DATAOUT_NORMAL;
214}
215
216static int iscsit_dataout_check_sequence(
217 struct iscsi_cmd *cmd,
218 unsigned char *buf)
219{
220 u32 next_burst_len;
221 struct iscsi_conn *conn = cmd->conn;
222 struct iscsi_seq *seq = NULL;
223 struct iscsi_data *hdr = (struct iscsi_data *) buf;
224 u32 payload_length = ntoh24(hdr->dlength);
225
226 /*
227 * For DataSequenceInOrder=Yes: Check that the offset and offset+length
228 * is within range as defined by iscsi_set_dataout_sequence_values().
229 *
230 * For DataSequenceInOrder=No: Check that an struct iscsi_seq exists for
231 * offset+length tuple.
232 */
233 if (conn->sess->sess_ops->DataSequenceInOrder) {
234 /*
235 * Due to possibility of recovery DataOUT sent by the initiator
236 * fullfilling an Recovery R2T, it's best to just dump the
237 * payload here, instead of erroring out.
238 */
239 if ((hdr->offset < cmd->seq_start_offset) ||
240 ((hdr->offset + payload_length) > cmd->seq_end_offset)) {
241 pr_err("Command ITT: 0x%08x with Offset: %u,"
242 " Length: %u outside of Sequence %u:%u while"
243 " DataSequenceInOrder=Yes.\n", cmd->init_task_tag,
244 hdr->offset, payload_length, cmd->seq_start_offset,
245 cmd->seq_end_offset);
246
247 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
248 return DATAOUT_CANNOT_RECOVER;
249 return DATAOUT_WITHIN_COMMAND_RECOVERY;
250 }
251
252 next_burst_len = (cmd->next_burst_len + payload_length);
253 } else {
254 seq = iscsit_get_seq_holder(cmd, hdr->offset, payload_length);
255 if (!seq)
256 return DATAOUT_CANNOT_RECOVER;
257 /*
258 * Set the struct iscsi_seq pointer to reuse later.
259 */
260 cmd->seq_ptr = seq;
261
262 if (seq->status == DATAOUT_SEQUENCE_COMPLETE) {
263 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
264 return DATAOUT_CANNOT_RECOVER;
265 return DATAOUT_WITHIN_COMMAND_RECOVERY;
266 }
267
268 next_burst_len = (seq->next_burst_len + payload_length);
269 }
270
271 if (next_burst_len > conn->sess->sess_ops->MaxBurstLength) {
272 pr_err("Command ITT: 0x%08x, NextBurstLength: %u and"
273 " Length: %u exceeds MaxBurstLength: %u. protocol"
274 " error.\n", cmd->init_task_tag,
275 (next_burst_len - payload_length),
276 payload_length, conn->sess->sess_ops->MaxBurstLength);
277 return DATAOUT_CANNOT_RECOVER;
278 }
279
280 /*
281 * Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity
282 * checks for the current DataOUT Sequence.
283 */
284 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) {
285 /*
286 * Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of
287 * sequence checks are handled in
288 * iscsit_dataout_datapduinorder_no_fbit().
289 */
290 if (!conn->sess->sess_ops->DataPDUInOrder)
291 goto out;
292
293 if (conn->sess->sess_ops->DataSequenceInOrder) {
294 if ((next_burst_len <
295 conn->sess->sess_ops->MaxBurstLength) &&
296 ((cmd->write_data_done + payload_length) <
297 cmd->data_length)) {
298 pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL"
299 " before end of DataOUT sequence, protocol"
300 " error.\n", cmd->init_task_tag);
301 return DATAOUT_CANNOT_RECOVER;
302 }
303 } else {
304 if (next_burst_len < seq->xfer_len) {
305 pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL"
306 " before end of DataOUT sequence, protocol"
307 " error.\n", cmd->init_task_tag);
308 return DATAOUT_CANNOT_RECOVER;
309 }
310 }
311 } else {
312 if (conn->sess->sess_ops->DataSequenceInOrder) {
313 if (next_burst_len ==
314 conn->sess->sess_ops->MaxBurstLength) {
315 pr_err("Command ITT: 0x%08x reached"
316 " MaxBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is"
317 " not set, protocol error.", cmd->init_task_tag,
318 conn->sess->sess_ops->MaxBurstLength);
319 return DATAOUT_CANNOT_RECOVER;
320 }
321 if ((cmd->write_data_done + payload_length) ==
322 cmd->data_length) {
323 pr_err("Command ITT: 0x%08x reached"
324 " last DataOUT PDU in sequence but ISCSI_FLAG_"
325 "CMD_FINAL is not set, protocol error.\n",
326 cmd->init_task_tag);
327 return DATAOUT_CANNOT_RECOVER;
328 }
329 } else {
330 if (next_burst_len == seq->xfer_len) {
331 pr_err("Command ITT: 0x%08x reached"
332 " last DataOUT PDU in sequence but ISCSI_FLAG_"
333 "CMD_FINAL is not set, protocol error.\n",
334 cmd->init_task_tag);
335 return DATAOUT_CANNOT_RECOVER;
336 }
337 }
338 }
339
340out:
341 return DATAOUT_NORMAL;
342}
343
344static int iscsit_dataout_check_datasn(
345 struct iscsi_cmd *cmd,
346 unsigned char *buf)
347{
348 int dump = 0, recovery = 0;
349 u32 data_sn = 0;
350 struct iscsi_conn *conn = cmd->conn;
351 struct iscsi_data *hdr = (struct iscsi_data *) buf;
352 u32 payload_length = ntoh24(hdr->dlength);
353
354 /*
355 * Considering the target has no method of re-requesting DataOUT
356 * by DataSN, if we receieve a greater DataSN than expected we
357 * assume the functions for DataPDUInOrder=[Yes,No] below will
358 * handle it.
359 *
360 * If the DataSN is less than expected, dump the payload.
361 */
362 if (conn->sess->sess_ops->DataSequenceInOrder)
363 data_sn = cmd->data_sn;
364 else {
365 struct iscsi_seq *seq = cmd->seq_ptr;
366 data_sn = seq->data_sn;
367 }
368
369 if (hdr->datasn > data_sn) {
370 pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
371 " higher than expected 0x%08x.\n", cmd->init_task_tag,
372 hdr->datasn, data_sn);
373 recovery = 1;
374 goto recover;
375 } else if (hdr->datasn < data_sn) {
376 pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
377 " lower than expected 0x%08x, discarding payload.\n",
378 cmd->init_task_tag, hdr->datasn, data_sn);
379 dump = 1;
380 goto dump;
381 }
382
383 return DATAOUT_NORMAL;
384
385recover:
386 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
387 pr_err("Unable to perform within-command recovery"
388 " while ERL=0.\n");
389 return DATAOUT_CANNOT_RECOVER;
390 }
391dump:
392 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
393 return DATAOUT_CANNOT_RECOVER;
394
395 return (recovery || dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY :
396 DATAOUT_NORMAL;
397}
398
399static int iscsit_dataout_pre_datapduinorder_yes(
400 struct iscsi_cmd *cmd,
401 unsigned char *buf)
402{
403 int dump = 0, recovery = 0;
404 struct iscsi_conn *conn = cmd->conn;
405 struct iscsi_data *hdr = (struct iscsi_data *) buf;
406 u32 payload_length = ntoh24(hdr->dlength);
407
408 /*
409 * For DataSequenceInOrder=Yes: If the offset is greater than the global
410 * DataPDUInOrder=Yes offset counter in struct iscsi_cmd a protcol error has
411 * occured and fail the connection.
412 *
413 * For DataSequenceInOrder=No: If the offset is greater than the per
414 * sequence DataPDUInOrder=Yes offset counter in struct iscsi_seq a protocol
415 * error has occured and fail the connection.
416 */
417 if (conn->sess->sess_ops->DataSequenceInOrder) {
418 if (hdr->offset != cmd->write_data_done) {
419 pr_err("Command ITT: 0x%08x, received offset"
420 " %u different than expected %u.\n", cmd->init_task_tag,
421 hdr->offset, cmd->write_data_done);
422 recovery = 1;
423 goto recover;
424 }
425 } else {
426 struct iscsi_seq *seq = cmd->seq_ptr;
427
428 if (hdr->offset > seq->offset) {
429 pr_err("Command ITT: 0x%08x, received offset"
430 " %u greater than expected %u.\n", cmd->init_task_tag,
431 hdr->offset, seq->offset);
432 recovery = 1;
433 goto recover;
434 } else if (hdr->offset < seq->offset) {
435 pr_err("Command ITT: 0x%08x, received offset"
436 " %u less than expected %u, discarding payload.\n",
437 cmd->init_task_tag, hdr->offset, seq->offset);
438 dump = 1;
439 goto dump;
440 }
441 }
442
443 return DATAOUT_NORMAL;
444
445recover:
446 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
447 pr_err("Unable to perform within-command recovery"
448 " while ERL=0.\n");
449 return DATAOUT_CANNOT_RECOVER;
450 }
451dump:
452 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
453 return DATAOUT_CANNOT_RECOVER;
454
455 return (recovery) ? iscsit_recover_dataout_sequence(cmd,
456 hdr->offset, payload_length) :
457 (dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY : DATAOUT_NORMAL;
458}
459
460static int iscsit_dataout_pre_datapduinorder_no(
461 struct iscsi_cmd *cmd,
462 unsigned char *buf)
463{
464 struct iscsi_pdu *pdu;
465 struct iscsi_data *hdr = (struct iscsi_data *) buf;
466 u32 payload_length = ntoh24(hdr->dlength);
467
468 pdu = iscsit_get_pdu_holder(cmd, hdr->offset, payload_length);
469 if (!pdu)
470 return DATAOUT_CANNOT_RECOVER;
471
472 cmd->pdu_ptr = pdu;
473
474 switch (pdu->status) {
475 case ISCSI_PDU_NOT_RECEIVED:
476 case ISCSI_PDU_CRC_FAILED:
477 case ISCSI_PDU_TIMED_OUT:
478 break;
479 case ISCSI_PDU_RECEIVED_OK:
480 pr_err("Command ITT: 0x%08x received already gotten"
481 " Offset: %u, Length: %u\n", cmd->init_task_tag,
482 hdr->offset, payload_length);
483 return iscsit_dump_data_payload(cmd->conn, payload_length, 1);
484 default:
485 return DATAOUT_CANNOT_RECOVER;
486 }
487
488 return DATAOUT_NORMAL;
489}
490
491static int iscsit_dataout_update_r2t(struct iscsi_cmd *cmd, u32 offset, u32 length)
492{
493 struct iscsi_r2t *r2t;
494
495 if (cmd->unsolicited_data)
496 return 0;
497
498 r2t = iscsit_get_r2t_for_eos(cmd, offset, length);
499 if (!r2t)
500 return -1;
501
502 spin_lock_bh(&cmd->r2t_lock);
503 r2t->seq_complete = 1;
504 cmd->outstanding_r2ts--;
505 spin_unlock_bh(&cmd->r2t_lock);
506
507 return 0;
508}
509
510static int iscsit_dataout_update_datapduinorder_no(
511 struct iscsi_cmd *cmd,
512 u32 data_sn,
513 int f_bit)
514{
515 int ret = 0;
516 struct iscsi_pdu *pdu = cmd->pdu_ptr;
517
518 pdu->data_sn = data_sn;
519
520 switch (pdu->status) {
521 case ISCSI_PDU_NOT_RECEIVED:
522 pdu->status = ISCSI_PDU_RECEIVED_OK;
523 break;
524 case ISCSI_PDU_CRC_FAILED:
525 pdu->status = ISCSI_PDU_RECEIVED_OK;
526 break;
527 case ISCSI_PDU_TIMED_OUT:
528 pdu->status = ISCSI_PDU_RECEIVED_OK;
529 break;
530 default:
531 return DATAOUT_CANNOT_RECOVER;
532 }
533
534 if (f_bit) {
535 ret = iscsit_dataout_datapduinorder_no_fbit(cmd, pdu);
536 if (ret == DATAOUT_CANNOT_RECOVER)
537 return ret;
538 }
539
540 return DATAOUT_NORMAL;
541}
542
543static int iscsit_dataout_post_crc_passed(
544 struct iscsi_cmd *cmd,
545 unsigned char *buf)
546{
547 int ret, send_r2t = 0;
548 struct iscsi_conn *conn = cmd->conn;
549 struct iscsi_seq *seq = NULL;
550 struct iscsi_data *hdr = (struct iscsi_data *) buf;
551 u32 payload_length = ntoh24(hdr->dlength);
552
553 if (cmd->unsolicited_data) {
554 if ((cmd->first_burst_len + payload_length) ==
555 conn->sess->sess_ops->FirstBurstLength) {
556 if (iscsit_dataout_update_r2t(cmd, hdr->offset,
557 payload_length) < 0)
558 return DATAOUT_CANNOT_RECOVER;
559 send_r2t = 1;
560 }
561
562 if (!conn->sess->sess_ops->DataPDUInOrder) {
563 ret = iscsit_dataout_update_datapduinorder_no(cmd,
564 hdr->datasn, (hdr->flags & ISCSI_FLAG_CMD_FINAL));
565 if (ret == DATAOUT_CANNOT_RECOVER)
566 return ret;
567 }
568
569 cmd->first_burst_len += payload_length;
570
571 if (conn->sess->sess_ops->DataSequenceInOrder)
572 cmd->data_sn++;
573 else {
574 seq = cmd->seq_ptr;
575 seq->data_sn++;
576 seq->offset += payload_length;
577 }
578
579 if (send_r2t) {
580 if (seq)
581 seq->status = DATAOUT_SEQUENCE_COMPLETE;
582 cmd->first_burst_len = 0;
583 cmd->unsolicited_data = 0;
584 }
585 } else {
586 if (conn->sess->sess_ops->DataSequenceInOrder) {
587 if ((cmd->next_burst_len + payload_length) ==
588 conn->sess->sess_ops->MaxBurstLength) {
589 if (iscsit_dataout_update_r2t(cmd, hdr->offset,
590 payload_length) < 0)
591 return DATAOUT_CANNOT_RECOVER;
592 send_r2t = 1;
593 }
594
595 if (!conn->sess->sess_ops->DataPDUInOrder) {
596 ret = iscsit_dataout_update_datapduinorder_no(
597 cmd, hdr->datasn,
598 (hdr->flags & ISCSI_FLAG_CMD_FINAL));
599 if (ret == DATAOUT_CANNOT_RECOVER)
600 return ret;
601 }
602
603 cmd->next_burst_len += payload_length;
604 cmd->data_sn++;
605
606 if (send_r2t)
607 cmd->next_burst_len = 0;
608 } else {
609 seq = cmd->seq_ptr;
610
611 if ((seq->next_burst_len + payload_length) ==
612 seq->xfer_len) {
613 if (iscsit_dataout_update_r2t(cmd, hdr->offset,
614 payload_length) < 0)
615 return DATAOUT_CANNOT_RECOVER;
616 send_r2t = 1;
617 }
618
619 if (!conn->sess->sess_ops->DataPDUInOrder) {
620 ret = iscsit_dataout_update_datapduinorder_no(
621 cmd, hdr->datasn,
622 (hdr->flags & ISCSI_FLAG_CMD_FINAL));
623 if (ret == DATAOUT_CANNOT_RECOVER)
624 return ret;
625 }
626
627 seq->data_sn++;
628 seq->offset += payload_length;
629 seq->next_burst_len += payload_length;
630
631 if (send_r2t) {
632 seq->next_burst_len = 0;
633 seq->status = DATAOUT_SEQUENCE_COMPLETE;
634 }
635 }
636 }
637
638 if (send_r2t && conn->sess->sess_ops->DataSequenceInOrder)
639 cmd->data_sn = 0;
640
641 cmd->write_data_done += payload_length;
642
643 return (cmd->write_data_done == cmd->data_length) ?
644 DATAOUT_SEND_TO_TRANSPORT : (send_r2t) ?
645 DATAOUT_SEND_R2T : DATAOUT_NORMAL;
646}
647
648static int iscsit_dataout_post_crc_failed(
649 struct iscsi_cmd *cmd,
650 unsigned char *buf)
651{
652 struct iscsi_conn *conn = cmd->conn;
653 struct iscsi_pdu *pdu;
654 struct iscsi_data *hdr = (struct iscsi_data *) buf;
655 u32 payload_length = ntoh24(hdr->dlength);
656
657 if (conn->sess->sess_ops->DataPDUInOrder)
658 goto recover;
659 /*
660 * The rest of this function is only called when DataPDUInOrder=No.
661 */
662 pdu = cmd->pdu_ptr;
663
664 switch (pdu->status) {
665 case ISCSI_PDU_NOT_RECEIVED:
666 pdu->status = ISCSI_PDU_CRC_FAILED;
667 break;
668 case ISCSI_PDU_CRC_FAILED:
669 break;
670 case ISCSI_PDU_TIMED_OUT:
671 pdu->status = ISCSI_PDU_CRC_FAILED;
672 break;
673 default:
674 return DATAOUT_CANNOT_RECOVER;
675 }
676
677recover:
678 return iscsit_recover_dataout_sequence(cmd, hdr->offset, payload_length);
679}
680
681/*
682 * Called from iscsit_handle_data_out() before DataOUT Payload is received
683 * and CRC computed.
684 */
685extern int iscsit_check_pre_dataout(
686 struct iscsi_cmd *cmd,
687 unsigned char *buf)
688{
689 int ret;
690 struct iscsi_conn *conn = cmd->conn;
691
692 ret = iscsit_dataout_within_command_recovery_check(cmd, buf);
693 if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
694 (ret == DATAOUT_CANNOT_RECOVER))
695 return ret;
696
697 ret = iscsit_dataout_check_datasn(cmd, buf);
698 if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
699 (ret == DATAOUT_CANNOT_RECOVER))
700 return ret;
701
702 if (cmd->unsolicited_data) {
703 ret = iscsit_dataout_check_unsolicited_sequence(cmd, buf);
704 if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
705 (ret == DATAOUT_CANNOT_RECOVER))
706 return ret;
707 } else {
708 ret = iscsit_dataout_check_sequence(cmd, buf);
709 if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
710 (ret == DATAOUT_CANNOT_RECOVER))
711 return ret;
712 }
713
714 return (conn->sess->sess_ops->DataPDUInOrder) ?
715 iscsit_dataout_pre_datapduinorder_yes(cmd, buf) :
716 iscsit_dataout_pre_datapduinorder_no(cmd, buf);
717}
718
719/*
720 * Called from iscsit_handle_data_out() after DataOUT Payload is received
721 * and CRC computed.
722 */
723int iscsit_check_post_dataout(
724 struct iscsi_cmd *cmd,
725 unsigned char *buf,
726 u8 data_crc_failed)
727{
728 struct iscsi_conn *conn = cmd->conn;
729
730 cmd->dataout_timeout_retries = 0;
731
732 if (!data_crc_failed)
733 return iscsit_dataout_post_crc_passed(cmd, buf);
734 else {
735 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
736 pr_err("Unable to recover from DataOUT CRC"
737 " failure while ERL=0, closing session.\n");
738 iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
739 1, 0, buf, cmd);
740 return DATAOUT_CANNOT_RECOVER;
741 }
742
743 iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
744 0, 0, buf, cmd);
745 return iscsit_dataout_post_crc_failed(cmd, buf);
746 }
747}
748
749static void iscsit_handle_time2retain_timeout(unsigned long data)
750{
751 struct iscsi_session *sess = (struct iscsi_session *) data;
752 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
753 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
754
755 spin_lock_bh(&se_tpg->session_lock);
756 if (sess->time2retain_timer_flags & ISCSI_TF_STOP) {
757 spin_unlock_bh(&se_tpg->session_lock);
758 return;
759 }
760 if (atomic_read(&sess->session_reinstatement)) {
761 pr_err("Exiting Time2Retain handler because"
762 " session_reinstatement=1\n");
763 spin_unlock_bh(&se_tpg->session_lock);
764 return;
765 }
766 sess->time2retain_timer_flags |= ISCSI_TF_EXPIRED;
767
768 pr_err("Time2Retain timer expired for SID: %u, cleaning up"
769 " iSCSI session.\n", sess->sid);
770 {
771 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
772
773 if (tiqn) {
774 spin_lock(&tiqn->sess_err_stats.lock);
775 strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
776 (void *)sess->sess_ops->InitiatorName);
777 tiqn->sess_err_stats.last_sess_failure_type =
778 ISCSI_SESS_ERR_CXN_TIMEOUT;
779 tiqn->sess_err_stats.cxn_timeout_errors++;
780 sess->conn_timeout_errors++;
781 spin_unlock(&tiqn->sess_err_stats.lock);
782 }
783 }
784
785 spin_unlock_bh(&se_tpg->session_lock);
786 iscsit_close_session(sess);
787}
788
789extern void iscsit_start_time2retain_handler(struct iscsi_session *sess)
790{
791 int tpg_active;
792 /*
793 * Only start Time2Retain timer when the assoicated TPG is still in
794 * an ACTIVE (eg: not disabled or shutdown) state.
795 */
796 spin_lock(&ISCSI_TPG_S(sess)->tpg_state_lock);
797 tpg_active = (ISCSI_TPG_S(sess)->tpg_state == TPG_STATE_ACTIVE);
798 spin_unlock(&ISCSI_TPG_S(sess)->tpg_state_lock);
799
800 if (!tpg_active)
801 return;
802
803 if (sess->time2retain_timer_flags & ISCSI_TF_RUNNING)
804 return;
805
806 pr_debug("Starting Time2Retain timer for %u seconds on"
807 " SID: %u\n", sess->sess_ops->DefaultTime2Retain, sess->sid);
808
809 init_timer(&sess->time2retain_timer);
810 sess->time2retain_timer.expires =
811 (get_jiffies_64() + sess->sess_ops->DefaultTime2Retain * HZ);
812 sess->time2retain_timer.data = (unsigned long)sess;
813 sess->time2retain_timer.function = iscsit_handle_time2retain_timeout;
814 sess->time2retain_timer_flags &= ~ISCSI_TF_STOP;
815 sess->time2retain_timer_flags |= ISCSI_TF_RUNNING;
816 add_timer(&sess->time2retain_timer);
817}
818
819/*
820 * Called with spin_lock_bh(&struct se_portal_group->session_lock) held
821 */
822extern int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
823{
824 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
825 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
826
827 if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)
828 return -1;
829
830 if (!(sess->time2retain_timer_flags & ISCSI_TF_RUNNING))
831 return 0;
832
833 sess->time2retain_timer_flags |= ISCSI_TF_STOP;
834 spin_unlock_bh(&se_tpg->session_lock);
835
836 del_timer_sync(&sess->time2retain_timer);
837
838 spin_lock_bh(&se_tpg->session_lock);
839 sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING;
840 pr_debug("Stopped Time2Retain Timer for SID: %u\n",
841 sess->sid);
842 return 0;
843}
844
845void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn)
846{
847 spin_lock_bh(&conn->state_lock);
848 if (atomic_read(&conn->connection_exit)) {
849 spin_unlock_bh(&conn->state_lock);
850 goto sleep;
851 }
852
853 if (atomic_read(&conn->transport_failed)) {
854 spin_unlock_bh(&conn->state_lock);
855 goto sleep;
856 }
857 spin_unlock_bh(&conn->state_lock);
858
859 iscsi_thread_set_force_reinstatement(conn);
860
861sleep:
862 wait_for_completion(&conn->conn_wait_rcfr_comp);
863 complete(&conn->conn_post_wait_comp);
864}
865
866void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
867{
868 spin_lock_bh(&conn->state_lock);
869 if (atomic_read(&conn->connection_exit)) {
870 spin_unlock_bh(&conn->state_lock);
871 return;
872 }
873
874 if (atomic_read(&conn->transport_failed)) {
875 spin_unlock_bh(&conn->state_lock);
876 return;
877 }
878
879 if (atomic_read(&conn->connection_reinstatement)) {
880 spin_unlock_bh(&conn->state_lock);
881 return;
882 }
883
884 if (iscsi_thread_set_force_reinstatement(conn) < 0) {
885 spin_unlock_bh(&conn->state_lock);
886 return;
887 }
888
889 atomic_set(&conn->connection_reinstatement, 1);
890 if (!sleep) {
891 spin_unlock_bh(&conn->state_lock);
892 return;
893 }
894
895 atomic_set(&conn->sleep_on_conn_wait_comp, 1);
896 spin_unlock_bh(&conn->state_lock);
897
898 wait_for_completion(&conn->conn_wait_comp);
899 complete(&conn->conn_post_wait_comp);
900}
901
902void iscsit_fall_back_to_erl0(struct iscsi_session *sess)
903{
904 pr_debug("Falling back to ErrorRecoveryLevel=0 for SID:"
905 " %u\n", sess->sid);
906
907 atomic_set(&sess->session_fall_back_to_erl0, 1);
908}
909
910static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
911{
912 struct iscsi_session *sess = conn->sess;
913
914 if ((sess->sess_ops->ErrorRecoveryLevel == 2) &&
915 !atomic_read(&sess->session_reinstatement) &&
916 !atomic_read(&sess->session_fall_back_to_erl0))
917 iscsit_connection_recovery_transport_reset(conn);
918 else {
919 pr_debug("Performing cleanup for failed iSCSI"
920 " Connection ID: %hu from %s\n", conn->cid,
921 sess->sess_ops->InitiatorName);
922 iscsit_close_connection(conn);
923 }
924}
925
926extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
927{
928 spin_lock_bh(&conn->state_lock);
929 if (atomic_read(&conn->connection_exit)) {
930 spin_unlock_bh(&conn->state_lock);
931 return;
932 }
933 atomic_set(&conn->connection_exit, 1);
934
935 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
936 spin_unlock_bh(&conn->state_lock);
937 iscsit_close_connection(conn);
938 return;
939 }
940
941 if (conn->conn_state == TARG_CONN_STATE_CLEANUP_WAIT) {
942 spin_unlock_bh(&conn->state_lock);
943 return;
944 }
945
946 pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
947 conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
948 spin_unlock_bh(&conn->state_lock);
949
950 iscsit_handle_connection_cleanup(conn);
951}
952
953/*
954 * This is the simple function that makes the magic of
955 * sync and steering happen in the follow paradoxical order:
956 *
957 * 0) Receive conn->of_marker (bytes left until next OFMarker)
958 * bytes into an offload buffer. When we pass the exact number
959 * of bytes in conn->of_marker, iscsit_dump_data_payload() and hence
960 * rx_data() will automatically receive the identical u32 marker
961 * values and store it in conn->of_marker_offset;
962 * 1) Now conn->of_marker_offset will contain the offset to the start
963 * of the next iSCSI PDU. Dump these remaining bytes into another
964 * offload buffer.
965 * 2) We are done!
966 * Next byte in the TCP stream will contain the next iSCSI PDU!
967 * Cool Huh?!
968 */
969int iscsit_recover_from_unknown_opcode(struct iscsi_conn *conn)
970{
971 /*
972 * Make sure the remaining bytes to next maker is a sane value.
973 */
974 if (conn->of_marker > (conn->conn_ops->OFMarkInt * 4)) {
975 pr_err("Remaining bytes to OFMarker: %u exceeds"
976 " OFMarkInt bytes: %u.\n", conn->of_marker,
977 conn->conn_ops->OFMarkInt * 4);
978 return -1;
979 }
980
981 pr_debug("Advancing %u bytes in TCP stream to get to the"
982 " next OFMarker.\n", conn->of_marker);
983
984 if (iscsit_dump_data_payload(conn, conn->of_marker, 0) < 0)
985 return -1;
986
987 /*
988 * Make sure the offset marker we retrived is a valid value.
989 */
990 if (conn->of_marker_offset > (ISCSI_HDR_LEN + (ISCSI_CRC_LEN * 2) +
991 conn->conn_ops->MaxRecvDataSegmentLength)) {
992 pr_err("OfMarker offset value: %u exceeds limit.\n",
993 conn->of_marker_offset);
994 return -1;
995 }
996
997 pr_debug("Discarding %u bytes of TCP stream to get to the"
998 " next iSCSI Opcode.\n", conn->of_marker_offset);
999
1000 if (iscsit_dump_data_payload(conn, conn->of_marker_offset, 0) < 0)
1001 return -1;
1002
1003 return 0;
1004}
diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h
new file mode 100644
index 000000000000..21acc9a06376
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl0.h
@@ -0,0 +1,15 @@
1#ifndef ISCSI_TARGET_ERL0_H
2#define ISCSI_TARGET_ERL0_H
3
4extern void iscsit_set_dataout_sequence_values(struct iscsi_cmd *);
5extern int iscsit_check_pre_dataout(struct iscsi_cmd *, unsigned char *);
6extern int iscsit_check_post_dataout(struct iscsi_cmd *, unsigned char *, u8);
7extern void iscsit_start_time2retain_handler(struct iscsi_session *);
8extern int iscsit_stop_time2retain_timer(struct iscsi_session *);
9extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
10extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
11extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
12extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *);
13extern int iscsit_recover_from_unknown_opcode(struct iscsi_conn *);
14
15#endif /*** ISCSI_TARGET_ERL0_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
new file mode 100644
index 000000000000..980650792cf6
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -0,0 +1,1299 @@
1/*******************************************************************************
2 * This file contains error recovery level one used by the iSCSI Target driver.
3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 ******************************************************************************/
20
21#include <linux/list.h>
22#include <scsi/iscsi_proto.h>
23#include <target/target_core_base.h>
24#include <target/target_core_transport.h>
25
26#include "iscsi_target_core.h"
27#include "iscsi_target_seq_pdu_list.h"
28#include "iscsi_target_datain_values.h"
29#include "iscsi_target_device.h"
30#include "iscsi_target_tpg.h"
31#include "iscsi_target_util.h"
32#include "iscsi_target_erl0.h"
33#include "iscsi_target_erl1.h"
34#include "iscsi_target_erl2.h"
35#include "iscsi_target.h"
36
37#define OFFLOAD_BUF_SIZE 32768
38
39/*
40 * Used to dump excess datain payload for certain error recovery
41 * situations. Receive in OFFLOAD_BUF_SIZE max of datain per rx_data().
42 *
43 * dump_padding_digest denotes if padding and data digests need
44 * to be dumped.
45 */
46int iscsit_dump_data_payload(
47 struct iscsi_conn *conn,
48 u32 buf_len,
49 int dump_padding_digest)
50{
51 char *buf, pad_bytes[4];
52 int ret = DATAOUT_WITHIN_COMMAND_RECOVERY, rx_got;
53 u32 length, padding, offset = 0, size;
54 struct kvec iov;
55
56 length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len;
57
58 buf = kzalloc(length, GFP_ATOMIC);
59 if (!buf) {
60 pr_err("Unable to allocate %u bytes for offload"
61 " buffer.\n", length);
62 return -1;
63 }
64 memset(&iov, 0, sizeof(struct kvec));
65
66 while (offset < buf_len) {
67 size = ((offset + length) > buf_len) ?
68 (buf_len - offset) : length;
69
70 iov.iov_len = size;
71 iov.iov_base = buf;
72
73 rx_got = rx_data(conn, &iov, 1, size);
74 if (rx_got != size) {
75 ret = DATAOUT_CANNOT_RECOVER;
76 goto out;
77 }
78
79 offset += size;
80 }
81
82 if (!dump_padding_digest)
83 goto out;
84
85 padding = ((-buf_len) & 3);
86 if (padding != 0) {
87 iov.iov_len = padding;
88 iov.iov_base = pad_bytes;
89
90 rx_got = rx_data(conn, &iov, 1, padding);
91 if (rx_got != padding) {
92 ret = DATAOUT_CANNOT_RECOVER;
93 goto out;
94 }
95 }
96
97 if (conn->conn_ops->DataDigest) {
98 u32 data_crc;
99
100 iov.iov_len = ISCSI_CRC_LEN;
101 iov.iov_base = &data_crc;
102
103 rx_got = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
104 if (rx_got != ISCSI_CRC_LEN) {
105 ret = DATAOUT_CANNOT_RECOVER;
106 goto out;
107 }
108 }
109
110out:
111 kfree(buf);
112 return ret;
113}
114
115/*
116 * Used for retransmitting R2Ts from a R2T SNACK request.
117 */
118static int iscsit_send_recovery_r2t_for_snack(
119 struct iscsi_cmd *cmd,
120 struct iscsi_r2t *r2t)
121{
122 /*
123 * If the struct iscsi_r2t has not been sent yet, we can safely
124 * ignore retransmission
125 * of the R2TSN in question.
126 */
127 spin_lock_bh(&cmd->r2t_lock);
128 if (!r2t->sent_r2t) {
129 spin_unlock_bh(&cmd->r2t_lock);
130 return 0;
131 }
132 r2t->sent_r2t = 0;
133 spin_unlock_bh(&cmd->r2t_lock);
134
135 iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
136
137 return 0;
138}
139
140static int iscsit_handle_r2t_snack(
141 struct iscsi_cmd *cmd,
142 unsigned char *buf,
143 u32 begrun,
144 u32 runlength)
145{
146 u32 last_r2tsn;
147 struct iscsi_r2t *r2t;
148
149 /*
150 * Make sure the initiator is not requesting retransmission
151 * of R2TSNs already acknowledged by a TMR TASK_REASSIGN.
152 */
153 if ((cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
154 (begrun <= cmd->acked_data_sn)) {
155 pr_err("ITT: 0x%08x, R2T SNACK requesting"
156 " retransmission of R2TSN: 0x%08x to 0x%08x but already"
157 " acked to R2TSN: 0x%08x by TMR TASK_REASSIGN,"
158 " protocol error.\n", cmd->init_task_tag, begrun,
159 (begrun + runlength), cmd->acked_data_sn);
160
161 return iscsit_add_reject_from_cmd(
162 ISCSI_REASON_PROTOCOL_ERROR,
163 1, 0, buf, cmd);
164 }
165
166 if (runlength) {
167 if ((begrun + runlength) > cmd->r2t_sn) {
168 pr_err("Command ITT: 0x%08x received R2T SNACK"
169 " with BegRun: 0x%08x, RunLength: 0x%08x, exceeds"
170 " current R2TSN: 0x%08x, protocol error.\n",
171 cmd->init_task_tag, begrun, runlength, cmd->r2t_sn);
172 return iscsit_add_reject_from_cmd(
173 ISCSI_REASON_BOOKMARK_INVALID, 1, 0, buf, cmd);
174 }
175 last_r2tsn = (begrun + runlength);
176 } else
177 last_r2tsn = cmd->r2t_sn;
178
179 while (begrun < last_r2tsn) {
180 r2t = iscsit_get_holder_for_r2tsn(cmd, begrun);
181 if (!r2t)
182 return -1;
183 if (iscsit_send_recovery_r2t_for_snack(cmd, r2t) < 0)
184 return -1;
185
186 begrun++;
187 }
188
189 return 0;
190}
191
192/*
193 * Generates Offsets and NextBurstLength based on Begrun and Runlength
194 * carried in a Data SNACK or ExpDataSN in TMR TASK_REASSIGN.
195 *
196 * For DataSequenceInOrder=Yes and DataPDUInOrder=[Yes,No] only.
197 *
198 * FIXME: How is this handled for a RData SNACK?
199 */
200int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
201 struct iscsi_cmd *cmd,
202 struct iscsi_datain_req *dr)
203{
204 u32 data_sn = 0, data_sn_count = 0;
205 u32 pdu_start = 0, seq_no = 0;
206 u32 begrun = dr->begrun;
207 struct iscsi_conn *conn = cmd->conn;
208
209 while (begrun > data_sn++) {
210 data_sn_count++;
211 if ((dr->next_burst_len +
212 conn->conn_ops->MaxRecvDataSegmentLength) <
213 conn->sess->sess_ops->MaxBurstLength) {
214 dr->read_data_done +=
215 conn->conn_ops->MaxRecvDataSegmentLength;
216 dr->next_burst_len +=
217 conn->conn_ops->MaxRecvDataSegmentLength;
218 } else {
219 dr->read_data_done +=
220 (conn->sess->sess_ops->MaxBurstLength -
221 dr->next_burst_len);
222 dr->next_burst_len = 0;
223 pdu_start += data_sn_count;
224 data_sn_count = 0;
225 seq_no++;
226 }
227 }
228
229 if (!conn->sess->sess_ops->DataPDUInOrder) {
230 cmd->seq_no = seq_no;
231 cmd->pdu_start = pdu_start;
232 cmd->pdu_send_order = data_sn_count;
233 }
234
235 return 0;
236}
237
238/*
239 * Generates Offsets and NextBurstLength based on Begrun and Runlength
240 * carried in a Data SNACK or ExpDataSN in TMR TASK_REASSIGN.
241 *
242 * For DataSequenceInOrder=No and DataPDUInOrder=[Yes,No] only.
243 *
244 * FIXME: How is this handled for a RData SNACK?
245 */
246int iscsit_create_recovery_datain_values_datasequenceinorder_no(
247 struct iscsi_cmd *cmd,
248 struct iscsi_datain_req *dr)
249{
250 int found_seq = 0, i;
251 u32 data_sn, read_data_done = 0, seq_send_order = 0;
252 u32 begrun = dr->begrun;
253 u32 runlength = dr->runlength;
254 struct iscsi_conn *conn = cmd->conn;
255 struct iscsi_seq *first_seq = NULL, *seq = NULL;
256
257 if (!cmd->seq_list) {
258 pr_err("struct iscsi_cmd->seq_list is NULL!\n");
259 return -1;
260 }
261
262 /*
263 * Calculate read_data_done for all sequences containing a
264 * first_datasn and last_datasn less than the BegRun.
265 *
266 * Locate the struct iscsi_seq the BegRun lies within and calculate
267 * NextBurstLenghth up to the DataSN based on MaxRecvDataSegmentLength.
268 *
269 * Also use struct iscsi_seq->seq_send_order to determine where to start.
270 */
271 for (i = 0; i < cmd->seq_count; i++) {
272 seq = &cmd->seq_list[i];
273
274 if (!seq->seq_send_order)
275 first_seq = seq;
276
277 /*
278 * No data has been transferred for this DataIN sequence, so the
279 * seq->first_datasn and seq->last_datasn have not been set.
280 */
281 if (!seq->sent) {
282#if 0
283 pr_err("Ignoring non-sent sequence 0x%08x ->"
284 " 0x%08x\n\n", seq->first_datasn,
285 seq->last_datasn);
286#endif
287 continue;
288 }
289
290 /*
291 * This DataIN sequence is precedes the received BegRun, add the
292 * total xfer_len of the sequence to read_data_done and reset
293 * seq->pdu_send_order.
294 */
295 if ((seq->first_datasn < begrun) &&
296 (seq->last_datasn < begrun)) {
297#if 0
298 pr_err("Pre BegRun sequence 0x%08x ->"
299 " 0x%08x\n", seq->first_datasn,
300 seq->last_datasn);
301#endif
302 read_data_done += cmd->seq_list[i].xfer_len;
303 seq->next_burst_len = seq->pdu_send_order = 0;
304 continue;
305 }
306
307 /*
308 * The BegRun lies within this DataIN sequence.
309 */
310 if ((seq->first_datasn <= begrun) &&
311 (seq->last_datasn >= begrun)) {
312#if 0
313 pr_err("Found sequence begrun: 0x%08x in"
314 " 0x%08x -> 0x%08x\n", begrun,
315 seq->first_datasn, seq->last_datasn);
316#endif
317 seq_send_order = seq->seq_send_order;
318 data_sn = seq->first_datasn;
319 seq->next_burst_len = seq->pdu_send_order = 0;
320 found_seq = 1;
321
322 /*
323 * For DataPDUInOrder=Yes, while the first DataSN of
324 * the sequence is less than the received BegRun, add
325 * the MaxRecvDataSegmentLength to read_data_done and
326 * to the sequence's next_burst_len;
327 *
328 * For DataPDUInOrder=No, while the first DataSN of the
329 * sequence is less than the received BegRun, find the
330 * struct iscsi_pdu of the DataSN in question and add the
331 * MaxRecvDataSegmentLength to read_data_done and to the
332 * sequence's next_burst_len;
333 */
334 if (conn->sess->sess_ops->DataPDUInOrder) {
335 while (data_sn < begrun) {
336 seq->pdu_send_order++;
337 read_data_done +=
338 conn->conn_ops->MaxRecvDataSegmentLength;
339 seq->next_burst_len +=
340 conn->conn_ops->MaxRecvDataSegmentLength;
341 data_sn++;
342 }
343 } else {
344 int j;
345 struct iscsi_pdu *pdu;
346
347 while (data_sn < begrun) {
348 seq->pdu_send_order++;
349
350 for (j = 0; j < seq->pdu_count; j++) {
351 pdu = &cmd->pdu_list[
352 seq->pdu_start + j];
353 if (pdu->data_sn == data_sn) {
354 read_data_done +=
355 pdu->length;
356 seq->next_burst_len +=
357 pdu->length;
358 }
359 }
360 data_sn++;
361 }
362 }
363 continue;
364 }
365
366 /*
367 * This DataIN sequence is larger than the received BegRun,
368 * reset seq->pdu_send_order and continue.
369 */
370 if ((seq->first_datasn > begrun) ||
371 (seq->last_datasn > begrun)) {
372#if 0
373 pr_err("Post BegRun sequence 0x%08x -> 0x%08x\n",
374 seq->first_datasn, seq->last_datasn);
375#endif
376 seq->next_burst_len = seq->pdu_send_order = 0;
377 continue;
378 }
379 }
380
381 if (!found_seq) {
382 if (!begrun) {
383 if (!first_seq) {
384 pr_err("ITT: 0x%08x, Begrun: 0x%08x"
385 " but first_seq is NULL\n",
386 cmd->init_task_tag, begrun);
387 return -1;
388 }
389 seq_send_order = first_seq->seq_send_order;
390 seq->next_burst_len = seq->pdu_send_order = 0;
391 goto done;
392 }
393
394 pr_err("Unable to locate struct iscsi_seq for ITT: 0x%08x,"
395 " BegRun: 0x%08x, RunLength: 0x%08x while"
396 " DataSequenceInOrder=No and DataPDUInOrder=%s.\n",
397 cmd->init_task_tag, begrun, runlength,
398 (conn->sess->sess_ops->DataPDUInOrder) ? "Yes" : "No");
399 return -1;
400 }
401
402done:
403 dr->read_data_done = read_data_done;
404 dr->seq_send_order = seq_send_order;
405
406 return 0;
407}
408
409static int iscsit_handle_recovery_datain(
410 struct iscsi_cmd *cmd,
411 unsigned char *buf,
412 u32 begrun,
413 u32 runlength)
414{
415 struct iscsi_conn *conn = cmd->conn;
416 struct iscsi_datain_req *dr;
417 struct se_cmd *se_cmd = &cmd->se_cmd;
418
419 if (!atomic_read(&se_cmd->t_transport_complete)) {
420 pr_err("Ignoring ITT: 0x%08x Data SNACK\n",
421 cmd->init_task_tag);
422 return 0;
423 }
424
425 /*
426 * Make sure the initiator is not requesting retransmission
427 * of DataSNs already acknowledged by a Data ACK SNACK.
428 */
429 if ((cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
430 (begrun <= cmd->acked_data_sn)) {
431 pr_err("ITT: 0x%08x, Data SNACK requesting"
432 " retransmission of DataSN: 0x%08x to 0x%08x but"
433 " already acked to DataSN: 0x%08x by Data ACK SNACK,"
434 " protocol error.\n", cmd->init_task_tag, begrun,
435 (begrun + runlength), cmd->acked_data_sn);
436
437 return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
438 1, 0, buf, cmd);
439 }
440
441 /*
442 * Make sure BegRun and RunLength in the Data SNACK are sane.
443 * Note: (cmd->data_sn - 1) will carry the maximum DataSN sent.
444 */
445 if ((begrun + runlength) > (cmd->data_sn - 1)) {
446 pr_err("Initiator requesting BegRun: 0x%08x, RunLength"
447 ": 0x%08x greater than maximum DataSN: 0x%08x.\n",
448 begrun, runlength, (cmd->data_sn - 1));
449 return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
450 1, 0, buf, cmd);
451 }
452
453 dr = iscsit_allocate_datain_req();
454 if (!dr)
455 return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
456 1, 0, buf, cmd);
457
458 dr->data_sn = dr->begrun = begrun;
459 dr->runlength = runlength;
460 dr->generate_recovery_values = 1;
461 dr->recovery = DATAIN_WITHIN_COMMAND_RECOVERY;
462
463 iscsit_attach_datain_req(cmd, dr);
464
465 cmd->i_state = ISTATE_SEND_DATAIN;
466 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
467
468 return 0;
469}
470
471int iscsit_handle_recovery_datain_or_r2t(
472 struct iscsi_conn *conn,
473 unsigned char *buf,
474 u32 init_task_tag,
475 u32 targ_xfer_tag,
476 u32 begrun,
477 u32 runlength)
478{
479 struct iscsi_cmd *cmd;
480
481 cmd = iscsit_find_cmd_from_itt(conn, init_task_tag);
482 if (!cmd)
483 return 0;
484
485 /*
486 * FIXME: This will not work for bidi commands.
487 */
488 switch (cmd->data_direction) {
489 case DMA_TO_DEVICE:
490 return iscsit_handle_r2t_snack(cmd, buf, begrun, runlength);
491 case DMA_FROM_DEVICE:
492 return iscsit_handle_recovery_datain(cmd, buf, begrun,
493 runlength);
494 default:
495 pr_err("Unknown cmd->data_direction: 0x%02x\n",
496 cmd->data_direction);
497 return -1;
498 }
499
500 return 0;
501}
502
503/* #warning FIXME: Status SNACK needs to be dependent on OPCODE!!! */
504int iscsit_handle_status_snack(
505 struct iscsi_conn *conn,
506 u32 init_task_tag,
507 u32 targ_xfer_tag,
508 u32 begrun,
509 u32 runlength)
510{
511 struct iscsi_cmd *cmd = NULL;
512 u32 last_statsn;
513 int found_cmd;
514
515 if (conn->exp_statsn > begrun) {
516 pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:"
517 " 0x%08x but already got ExpStatSN: 0x%08x on CID:"
518 " %hu.\n", begrun, runlength, conn->exp_statsn,
519 conn->cid);
520 return 0;
521 }
522
523 last_statsn = (!runlength) ? conn->stat_sn : (begrun + runlength);
524
525 while (begrun < last_statsn) {
526 found_cmd = 0;
527
528 spin_lock_bh(&conn->cmd_lock);
529 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
530 if (cmd->stat_sn == begrun) {
531 found_cmd = 1;
532 break;
533 }
534 }
535 spin_unlock_bh(&conn->cmd_lock);
536
537 if (!found_cmd) {
538 pr_err("Unable to find StatSN: 0x%08x for"
539 " a Status SNACK, assuming this was a"
540 " protactic SNACK for an untransmitted"
541 " StatSN, ignoring.\n", begrun);
542 begrun++;
543 continue;
544 }
545
546 spin_lock_bh(&cmd->istate_lock);
547 if (cmd->i_state == ISTATE_SEND_DATAIN) {
548 spin_unlock_bh(&cmd->istate_lock);
549 pr_err("Ignoring Status SNACK for BegRun:"
550 " 0x%08x, RunLength: 0x%08x, assuming this was"
551 " a protactic SNACK for an untransmitted"
552 " StatSN\n", begrun, runlength);
553 begrun++;
554 continue;
555 }
556 spin_unlock_bh(&cmd->istate_lock);
557
558 cmd->i_state = ISTATE_SEND_STATUS_RECOVERY;
559 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
560 begrun++;
561 }
562
563 return 0;
564}
565
566int iscsit_handle_data_ack(
567 struct iscsi_conn *conn,
568 u32 targ_xfer_tag,
569 u32 begrun,
570 u32 runlength)
571{
572 struct iscsi_cmd *cmd = NULL;
573
574 cmd = iscsit_find_cmd_from_ttt(conn, targ_xfer_tag);
575 if (!cmd) {
576 pr_err("Data ACK SNACK for TTT: 0x%08x is"
577 " invalid.\n", targ_xfer_tag);
578 return -1;
579 }
580
581 if (begrun <= cmd->acked_data_sn) {
582 pr_err("ITT: 0x%08x Data ACK SNACK BegRUN: 0x%08x is"
583 " less than the already acked DataSN: 0x%08x.\n",
584 cmd->init_task_tag, begrun, cmd->acked_data_sn);
585 return -1;
586 }
587
588 /*
589 * For Data ACK SNACK, BegRun is the next expected DataSN.
590 * (see iSCSI v19: 10.16.6)
591 */
592 cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
593 cmd->acked_data_sn = (begrun - 1);
594
595 pr_debug("Received Data ACK SNACK for ITT: 0x%08x,"
596 " updated acked DataSN to 0x%08x.\n",
597 cmd->init_task_tag, cmd->acked_data_sn);
598
599 return 0;
600}
601
602static int iscsit_send_recovery_r2t(
603 struct iscsi_cmd *cmd,
604 u32 offset,
605 u32 xfer_len)
606{
607 int ret;
608
609 spin_lock_bh(&cmd->r2t_lock);
610 ret = iscsit_add_r2t_to_list(cmd, offset, xfer_len, 1, 0);
611 spin_unlock_bh(&cmd->r2t_lock);
612
613 return ret;
614}
615
616int iscsit_dataout_datapduinorder_no_fbit(
617 struct iscsi_cmd *cmd,
618 struct iscsi_pdu *pdu)
619{
620 int i, send_recovery_r2t = 0, recovery = 0;
621 u32 length = 0, offset = 0, pdu_count = 0, xfer_len = 0;
622 struct iscsi_conn *conn = cmd->conn;
623 struct iscsi_pdu *first_pdu = NULL;
624
625 /*
626 * Get an struct iscsi_pdu pointer to the first PDU, and total PDU count
627 * of the DataOUT sequence.
628 */
629 if (conn->sess->sess_ops->DataSequenceInOrder) {
630 for (i = 0; i < cmd->pdu_count; i++) {
631 if (cmd->pdu_list[i].seq_no == pdu->seq_no) {
632 if (!first_pdu)
633 first_pdu = &cmd->pdu_list[i];
634 xfer_len += cmd->pdu_list[i].length;
635 pdu_count++;
636 } else if (pdu_count)
637 break;
638 }
639 } else {
640 struct iscsi_seq *seq = cmd->seq_ptr;
641
642 first_pdu = &cmd->pdu_list[seq->pdu_start];
643 pdu_count = seq->pdu_count;
644 }
645
646 if (!first_pdu || !pdu_count)
647 return DATAOUT_CANNOT_RECOVER;
648
649 /*
650 * Loop through the ending DataOUT Sequence checking each struct iscsi_pdu.
651 * The following ugly logic does batching of not received PDUs.
652 */
653 for (i = 0; i < pdu_count; i++) {
654 if (first_pdu[i].status == ISCSI_PDU_RECEIVED_OK) {
655 if (!send_recovery_r2t)
656 continue;
657
658 if (iscsit_send_recovery_r2t(cmd, offset, length) < 0)
659 return DATAOUT_CANNOT_RECOVER;
660
661 send_recovery_r2t = length = offset = 0;
662 continue;
663 }
664 /*
665 * Set recovery = 1 for any missing, CRC failed, or timed
666 * out PDUs to let the DataOUT logic know that this sequence
667 * has not been completed yet.
668 *
669 * Also, only send a Recovery R2T for ISCSI_PDU_NOT_RECEIVED.
670 * We assume if the PDU either failed CRC or timed out
671 * that a Recovery R2T has already been sent.
672 */
673 recovery = 1;
674
675 if (first_pdu[i].status != ISCSI_PDU_NOT_RECEIVED)
676 continue;
677
678 if (!offset)
679 offset = first_pdu[i].offset;
680 length += first_pdu[i].length;
681
682 send_recovery_r2t = 1;
683 }
684
685 if (send_recovery_r2t)
686 if (iscsit_send_recovery_r2t(cmd, offset, length) < 0)
687 return DATAOUT_CANNOT_RECOVER;
688
689 return (!recovery) ? DATAOUT_NORMAL : DATAOUT_WITHIN_COMMAND_RECOVERY;
690}
691
692static int iscsit_recalculate_dataout_values(
693 struct iscsi_cmd *cmd,
694 u32 pdu_offset,
695 u32 pdu_length,
696 u32 *r2t_offset,
697 u32 *r2t_length)
698{
699 int i;
700 struct iscsi_conn *conn = cmd->conn;
701 struct iscsi_pdu *pdu = NULL;
702
703 if (conn->sess->sess_ops->DataSequenceInOrder) {
704 cmd->data_sn = 0;
705
706 if (conn->sess->sess_ops->DataPDUInOrder) {
707 *r2t_offset = cmd->write_data_done;
708 *r2t_length = (cmd->seq_end_offset -
709 cmd->write_data_done);
710 return 0;
711 }
712
713 *r2t_offset = cmd->seq_start_offset;
714 *r2t_length = (cmd->seq_end_offset - cmd->seq_start_offset);
715
716 for (i = 0; i < cmd->pdu_count; i++) {
717 pdu = &cmd->pdu_list[i];
718
719 if (pdu->status != ISCSI_PDU_RECEIVED_OK)
720 continue;
721
722 if ((pdu->offset >= cmd->seq_start_offset) &&
723 ((pdu->offset + pdu->length) <=
724 cmd->seq_end_offset)) {
725 if (!cmd->unsolicited_data)
726 cmd->next_burst_len -= pdu->length;
727 else
728 cmd->first_burst_len -= pdu->length;
729
730 cmd->write_data_done -= pdu->length;
731 pdu->status = ISCSI_PDU_NOT_RECEIVED;
732 }
733 }
734 } else {
735 struct iscsi_seq *seq = NULL;
736
737 seq = iscsit_get_seq_holder(cmd, pdu_offset, pdu_length);
738 if (!seq)
739 return -1;
740
741 *r2t_offset = seq->orig_offset;
742 *r2t_length = seq->xfer_len;
743
744 cmd->write_data_done -= (seq->offset - seq->orig_offset);
745 if (cmd->immediate_data)
746 cmd->first_burst_len = cmd->write_data_done;
747
748 seq->data_sn = 0;
749 seq->offset = seq->orig_offset;
750 seq->next_burst_len = 0;
751 seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
752
753 if (conn->sess->sess_ops->DataPDUInOrder)
754 return 0;
755
756 for (i = 0; i < seq->pdu_count; i++) {
757 pdu = &cmd->pdu_list[i+seq->pdu_start];
758
759 if (pdu->status != ISCSI_PDU_RECEIVED_OK)
760 continue;
761
762 pdu->status = ISCSI_PDU_NOT_RECEIVED;
763 }
764 }
765
766 return 0;
767}
768
769int iscsit_recover_dataout_sequence(
770 struct iscsi_cmd *cmd,
771 u32 pdu_offset,
772 u32 pdu_length)
773{
774 u32 r2t_length = 0, r2t_offset = 0;
775
776 spin_lock_bh(&cmd->istate_lock);
777 cmd->cmd_flags |= ICF_WITHIN_COMMAND_RECOVERY;
778 spin_unlock_bh(&cmd->istate_lock);
779
780 if (iscsit_recalculate_dataout_values(cmd, pdu_offset, pdu_length,
781 &r2t_offset, &r2t_length) < 0)
782 return DATAOUT_CANNOT_RECOVER;
783
784 iscsit_send_recovery_r2t(cmd, r2t_offset, r2t_length);
785
786 return DATAOUT_WITHIN_COMMAND_RECOVERY;
787}
788
789static struct iscsi_ooo_cmdsn *iscsit_allocate_ooo_cmdsn(void)
790{
791 struct iscsi_ooo_cmdsn *ooo_cmdsn = NULL;
792
793 ooo_cmdsn = kmem_cache_zalloc(lio_ooo_cache, GFP_ATOMIC);
794 if (!ooo_cmdsn) {
795 pr_err("Unable to allocate memory for"
796 " struct iscsi_ooo_cmdsn.\n");
797 return NULL;
798 }
799 INIT_LIST_HEAD(&ooo_cmdsn->ooo_list);
800
801 return ooo_cmdsn;
802}
803
804/*
805 * Called with sess->cmdsn_mutex held.
806 */
807static int iscsit_attach_ooo_cmdsn(
808 struct iscsi_session *sess,
809 struct iscsi_ooo_cmdsn *ooo_cmdsn)
810{
811 struct iscsi_ooo_cmdsn *ooo_tail, *ooo_tmp;
812 /*
813 * We attach the struct iscsi_ooo_cmdsn entry to the out of order
814 * list in increasing CmdSN order.
815 * This allows iscsi_execute_ooo_cmdsns() to detect any
816 * additional CmdSN holes while performing delayed execution.
817 */
818 if (list_empty(&sess->sess_ooo_cmdsn_list))
819 list_add_tail(&ooo_cmdsn->ooo_list,
820 &sess->sess_ooo_cmdsn_list);
821 else {
822 ooo_tail = list_entry(sess->sess_ooo_cmdsn_list.prev,
823 typeof(*ooo_tail), ooo_list);
824 /*
825 * CmdSN is greater than the tail of the list.
826 */
827 if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn)
828 list_add_tail(&ooo_cmdsn->ooo_list,
829 &sess->sess_ooo_cmdsn_list);
830 else {
831 /*
832 * CmdSN is either lower than the head, or somewhere
833 * in the middle.
834 */
835 list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
836 ooo_list) {
837 while (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)
838 continue;
839
840 list_add(&ooo_cmdsn->ooo_list,
841 &ooo_tmp->ooo_list);
842 break;
843 }
844 }
845 }
846
847 return 0;
848}
849
850/*
851 * Removes an struct iscsi_ooo_cmdsn from a session's list,
852 * called with struct iscsi_session->cmdsn_mutex held.
853 */
854void iscsit_remove_ooo_cmdsn(
855 struct iscsi_session *sess,
856 struct iscsi_ooo_cmdsn *ooo_cmdsn)
857{
858 list_del(&ooo_cmdsn->ooo_list);
859 kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
860}
861
862void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
863{
864 struct iscsi_ooo_cmdsn *ooo_cmdsn;
865 struct iscsi_session *sess = conn->sess;
866
867 mutex_lock(&sess->cmdsn_mutex);
868 list_for_each_entry(ooo_cmdsn, &sess->sess_ooo_cmdsn_list, ooo_list) {
869 if (ooo_cmdsn->cid != conn->cid)
870 continue;
871
872 ooo_cmdsn->cmd = NULL;
873 }
874 mutex_unlock(&sess->cmdsn_mutex);
875}
876
877/*
878 * Called with sess->cmdsn_mutex held.
879 */
880int iscsit_execute_ooo_cmdsns(struct iscsi_session *sess)
881{
882 int ooo_count = 0;
883 struct iscsi_cmd *cmd = NULL;
884 struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
885
886 list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
887 &sess->sess_ooo_cmdsn_list, ooo_list) {
888 if (ooo_cmdsn->cmdsn != sess->exp_cmd_sn)
889 continue;
890
891 if (!ooo_cmdsn->cmd) {
892 sess->exp_cmd_sn++;
893 iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
894 continue;
895 }
896
897 cmd = ooo_cmdsn->cmd;
898 cmd->i_state = cmd->deferred_i_state;
899 ooo_count++;
900 sess->exp_cmd_sn++;
901 pr_debug("Executing out of order CmdSN: 0x%08x,"
902 " incremented ExpCmdSN to 0x%08x.\n",
903 cmd->cmd_sn, sess->exp_cmd_sn);
904
905 iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
906
907 if (iscsit_execute_cmd(cmd, 1) < 0)
908 return -1;
909
910 continue;
911 }
912
913 return ooo_count;
914}
915
916/*
917 * Called either:
918 *
919 * 1. With sess->cmdsn_mutex held from iscsi_execute_ooo_cmdsns()
920 * or iscsi_check_received_cmdsn().
921 * 2. With no locks held directly from iscsi_handle_XXX_pdu() functions
922 * for immediate commands.
923 */
924int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
925{
926 struct se_cmd *se_cmd = &cmd->se_cmd;
927 int lr = 0;
928
929 spin_lock_bh(&cmd->istate_lock);
930 if (ooo)
931 cmd->cmd_flags &= ~ICF_OOO_CMDSN;
932
933 switch (cmd->iscsi_opcode) {
934 case ISCSI_OP_SCSI_CMD:
935 /*
936 * Go ahead and send the CHECK_CONDITION status for
937 * any SCSI CDB exceptions that may have occurred, also
938 * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
939 */
940 if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
941 if (se_cmd->se_cmd_flags &
942 SCF_SCSI_RESERVATION_CONFLICT) {
943 cmd->i_state = ISTATE_SEND_STATUS;
944 spin_unlock_bh(&cmd->istate_lock);
945 iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
946 cmd->i_state);
947 return 0;
948 }
949 spin_unlock_bh(&cmd->istate_lock);
950 /*
951 * Determine if delayed TASK_ABORTED status for WRITEs
952 * should be sent now if no unsolicited data out
953 * payloads are expected, or if the delayed status
954 * should be sent after unsolicited data out with
955 * ISCSI_FLAG_CMD_FINAL set in iscsi_handle_data_out()
956 */
957 if (transport_check_aborted_status(se_cmd,
958 (cmd->unsolicited_data == 0)) != 0)
959 return 0;
960 /*
961 * Otherwise send CHECK_CONDITION and sense for
962 * exception
963 */
964 return transport_send_check_condition_and_sense(se_cmd,
965 se_cmd->scsi_sense_reason, 0);
966 }
967 /*
968 * Special case for delayed CmdSN with Immediate
969 * Data and/or Unsolicited Data Out attached.
970 */
971 if (cmd->immediate_data) {
972 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
973 spin_unlock_bh(&cmd->istate_lock);
974 return transport_generic_handle_data(
975 &cmd->se_cmd);
976 }
977 spin_unlock_bh(&cmd->istate_lock);
978
979 if (!(cmd->cmd_flags &
980 ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
981 /*
982 * Send the delayed TASK_ABORTED status for
983 * WRITEs if no more unsolicitied data is
984 * expected.
985 */
986 if (transport_check_aborted_status(se_cmd, 1)
987 != 0)
988 return 0;
989
990 iscsit_set_dataout_sequence_values(cmd);
991 iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 0);
992 }
993 return 0;
994 }
995 /*
996 * The default handler.
997 */
998 spin_unlock_bh(&cmd->istate_lock);
999
1000 if ((cmd->data_direction == DMA_TO_DEVICE) &&
1001 !(cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
1002 /*
1003 * Send the delayed TASK_ABORTED status for WRITEs if
1004 * no more nsolicitied data is expected.
1005 */
1006 if (transport_check_aborted_status(se_cmd, 1) != 0)
1007 return 0;
1008
1009 iscsit_set_dataout_sequence_values(cmd);
1010 spin_lock_bh(&cmd->dataout_timeout_lock);
1011 iscsit_start_dataout_timer(cmd, cmd->conn);
1012 spin_unlock_bh(&cmd->dataout_timeout_lock);
1013 }
1014 return transport_handle_cdb_direct(&cmd->se_cmd);
1015
1016 case ISCSI_OP_NOOP_OUT:
1017 case ISCSI_OP_TEXT:
1018 spin_unlock_bh(&cmd->istate_lock);
1019 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
1020 break;
1021 case ISCSI_OP_SCSI_TMFUNC:
1022 if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
1023 spin_unlock_bh(&cmd->istate_lock);
1024 iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
1025 cmd->i_state);
1026 return 0;
1027 }
1028 spin_unlock_bh(&cmd->istate_lock);
1029
1030 return transport_generic_handle_tmr(&cmd->se_cmd);
1031 case ISCSI_OP_LOGOUT:
1032 spin_unlock_bh(&cmd->istate_lock);
1033 switch (cmd->logout_reason) {
1034 case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
1035 lr = iscsit_logout_closesession(cmd, cmd->conn);
1036 break;
1037 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
1038 lr = iscsit_logout_closeconnection(cmd, cmd->conn);
1039 break;
1040 case ISCSI_LOGOUT_REASON_RECOVERY:
1041 lr = iscsit_logout_removeconnforrecovery(cmd, cmd->conn);
1042 break;
1043 default:
1044 pr_err("Unknown iSCSI Logout Request Code:"
1045 " 0x%02x\n", cmd->logout_reason);
1046 return -1;
1047 }
1048
1049 return lr;
1050 default:
1051 spin_unlock_bh(&cmd->istate_lock);
1052 pr_err("Cannot perform out of order execution for"
1053 " unknown iSCSI Opcode: 0x%02x\n", cmd->iscsi_opcode);
1054 return -1;
1055 }
1056
1057 return 0;
1058}
1059
1060void iscsit_free_all_ooo_cmdsns(struct iscsi_session *sess)
1061{
1062 struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
1063
1064 mutex_lock(&sess->cmdsn_mutex);
1065 list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
1066 &sess->sess_ooo_cmdsn_list, ooo_list) {
1067
1068 list_del(&ooo_cmdsn->ooo_list);
1069 kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
1070 }
1071 mutex_unlock(&sess->cmdsn_mutex);
1072}
1073
1074int iscsit_handle_ooo_cmdsn(
1075 struct iscsi_session *sess,
1076 struct iscsi_cmd *cmd,
1077 u32 cmdsn)
1078{
1079 int batch = 0;
1080 struct iscsi_ooo_cmdsn *ooo_cmdsn = NULL, *ooo_tail = NULL;
1081
1082 cmd->deferred_i_state = cmd->i_state;
1083 cmd->i_state = ISTATE_DEFERRED_CMD;
1084 cmd->cmd_flags |= ICF_OOO_CMDSN;
1085
1086 if (list_empty(&sess->sess_ooo_cmdsn_list))
1087 batch = 1;
1088 else {
1089 ooo_tail = list_entry(sess->sess_ooo_cmdsn_list.prev,
1090 typeof(*ooo_tail), ooo_list);
1091 if (ooo_tail->cmdsn != (cmdsn - 1))
1092 batch = 1;
1093 }
1094
1095 ooo_cmdsn = iscsit_allocate_ooo_cmdsn();
1096 if (!ooo_cmdsn)
1097 return CMDSN_ERROR_CANNOT_RECOVER;
1098
1099 ooo_cmdsn->cmd = cmd;
1100 ooo_cmdsn->batch_count = (batch) ?
1101 (cmdsn - sess->exp_cmd_sn) : 1;
1102 ooo_cmdsn->cid = cmd->conn->cid;
1103 ooo_cmdsn->exp_cmdsn = sess->exp_cmd_sn;
1104 ooo_cmdsn->cmdsn = cmdsn;
1105
1106 if (iscsit_attach_ooo_cmdsn(sess, ooo_cmdsn) < 0) {
1107 kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
1108 return CMDSN_ERROR_CANNOT_RECOVER;
1109 }
1110
1111 return CMDSN_HIGHER_THAN_EXP;
1112}
1113
1114static int iscsit_set_dataout_timeout_values(
1115 struct iscsi_cmd *cmd,
1116 u32 *offset,
1117 u32 *length)
1118{
1119 struct iscsi_conn *conn = cmd->conn;
1120 struct iscsi_r2t *r2t;
1121
1122 if (cmd->unsolicited_data) {
1123 *offset = 0;
1124 *length = (conn->sess->sess_ops->FirstBurstLength >
1125 cmd->data_length) ?
1126 cmd->data_length :
1127 conn->sess->sess_ops->FirstBurstLength;
1128 return 0;
1129 }
1130
1131 spin_lock_bh(&cmd->r2t_lock);
1132 if (list_empty(&cmd->cmd_r2t_list)) {
1133 pr_err("cmd->cmd_r2t_list is empty!\n");
1134 spin_unlock_bh(&cmd->r2t_lock);
1135 return -1;
1136 }
1137
1138 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
1139 if (r2t->sent_r2t && !r2t->recovery_r2t && !r2t->seq_complete) {
1140 *offset = r2t->offset;
1141 *length = r2t->xfer_len;
1142 spin_unlock_bh(&cmd->r2t_lock);
1143 return 0;
1144 }
1145 }
1146 spin_unlock_bh(&cmd->r2t_lock);
1147
1148 pr_err("Unable to locate any incomplete DataOUT"
1149 " sequences for ITT: 0x%08x.\n", cmd->init_task_tag);
1150
1151 return -1;
1152}
1153
1154/*
1155 * NOTE: Called from interrupt (timer) context.
1156 */
1157static void iscsit_handle_dataout_timeout(unsigned long data)
1158{
1159 u32 pdu_length = 0, pdu_offset = 0;
1160 u32 r2t_length = 0, r2t_offset = 0;
1161 struct iscsi_cmd *cmd = (struct iscsi_cmd *) data;
1162 struct iscsi_conn *conn = cmd->conn;
1163 struct iscsi_session *sess = NULL;
1164 struct iscsi_node_attrib *na;
1165
1166 iscsit_inc_conn_usage_count(conn);
1167
1168 spin_lock_bh(&cmd->dataout_timeout_lock);
1169 if (cmd->dataout_timer_flags & ISCSI_TF_STOP) {
1170 spin_unlock_bh(&cmd->dataout_timeout_lock);
1171 iscsit_dec_conn_usage_count(conn);
1172 return;
1173 }
1174 cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING;
1175 sess = conn->sess;
1176 na = iscsit_tpg_get_node_attrib(sess);
1177
1178 if (!sess->sess_ops->ErrorRecoveryLevel) {
1179 pr_debug("Unable to recover from DataOut timeout while"
1180 " in ERL=0.\n");
1181 goto failure;
1182 }
1183
1184 if (++cmd->dataout_timeout_retries == na->dataout_timeout_retries) {
1185 pr_debug("Command ITT: 0x%08x exceeded max retries"
1186 " for DataOUT timeout %u, closing iSCSI connection.\n",
1187 cmd->init_task_tag, na->dataout_timeout_retries);
1188 goto failure;
1189 }
1190
1191 cmd->cmd_flags |= ICF_WITHIN_COMMAND_RECOVERY;
1192
1193 if (conn->sess->sess_ops->DataSequenceInOrder) {
1194 if (conn->sess->sess_ops->DataPDUInOrder) {
1195 pdu_offset = cmd->write_data_done;
1196 if ((pdu_offset + (conn->sess->sess_ops->MaxBurstLength -
1197 cmd->next_burst_len)) > cmd->data_length)
1198 pdu_length = (cmd->data_length -
1199 cmd->write_data_done);
1200 else
1201 pdu_length = (conn->sess->sess_ops->MaxBurstLength -
1202 cmd->next_burst_len);
1203 } else {
1204 pdu_offset = cmd->seq_start_offset;
1205 pdu_length = (cmd->seq_end_offset -
1206 cmd->seq_start_offset);
1207 }
1208 } else {
1209 if (iscsit_set_dataout_timeout_values(cmd, &pdu_offset,
1210 &pdu_length) < 0)
1211 goto failure;
1212 }
1213
1214 if (iscsit_recalculate_dataout_values(cmd, pdu_offset, pdu_length,
1215 &r2t_offset, &r2t_length) < 0)
1216 goto failure;
1217
1218 pr_debug("Command ITT: 0x%08x timed out waiting for"
1219 " completion of %sDataOUT Sequence Offset: %u, Length: %u\n",
1220 cmd->init_task_tag, (cmd->unsolicited_data) ? "Unsolicited " :
1221 "", r2t_offset, r2t_length);
1222
1223 if (iscsit_send_recovery_r2t(cmd, r2t_offset, r2t_length) < 0)
1224 goto failure;
1225
1226 iscsit_start_dataout_timer(cmd, conn);
1227 spin_unlock_bh(&cmd->dataout_timeout_lock);
1228 iscsit_dec_conn_usage_count(conn);
1229
1230 return;
1231
1232failure:
1233 spin_unlock_bh(&cmd->dataout_timeout_lock);
1234 iscsit_cause_connection_reinstatement(conn, 0);
1235 iscsit_dec_conn_usage_count(conn);
1236}
1237
1238void iscsit_mod_dataout_timer(struct iscsi_cmd *cmd)
1239{
1240 struct iscsi_conn *conn = cmd->conn;
1241 struct iscsi_session *sess = conn->sess;
1242 struct iscsi_node_attrib *na = na = iscsit_tpg_get_node_attrib(sess);
1243
1244 spin_lock_bh(&cmd->dataout_timeout_lock);
1245 if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) {
1246 spin_unlock_bh(&cmd->dataout_timeout_lock);
1247 return;
1248 }
1249
1250 mod_timer(&cmd->dataout_timer,
1251 (get_jiffies_64() + na->dataout_timeout * HZ));
1252 pr_debug("Updated DataOUT timer for ITT: 0x%08x",
1253 cmd->init_task_tag);
1254 spin_unlock_bh(&cmd->dataout_timeout_lock);
1255}
1256
1257/*
1258 * Called with cmd->dataout_timeout_lock held.
1259 */
1260void iscsit_start_dataout_timer(
1261 struct iscsi_cmd *cmd,
1262 struct iscsi_conn *conn)
1263{
1264 struct iscsi_session *sess = conn->sess;
1265 struct iscsi_node_attrib *na = na = iscsit_tpg_get_node_attrib(sess);
1266
1267 if (cmd->dataout_timer_flags & ISCSI_TF_RUNNING)
1268 return;
1269
1270 pr_debug("Starting DataOUT timer for ITT: 0x%08x on"
1271 " CID: %hu.\n", cmd->init_task_tag, conn->cid);
1272
1273 init_timer(&cmd->dataout_timer);
1274 cmd->dataout_timer.expires = (get_jiffies_64() + na->dataout_timeout * HZ);
1275 cmd->dataout_timer.data = (unsigned long)cmd;
1276 cmd->dataout_timer.function = iscsit_handle_dataout_timeout;
1277 cmd->dataout_timer_flags &= ~ISCSI_TF_STOP;
1278 cmd->dataout_timer_flags |= ISCSI_TF_RUNNING;
1279 add_timer(&cmd->dataout_timer);
1280}
1281
1282void iscsit_stop_dataout_timer(struct iscsi_cmd *cmd)
1283{
1284 spin_lock_bh(&cmd->dataout_timeout_lock);
1285 if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) {
1286 spin_unlock_bh(&cmd->dataout_timeout_lock);
1287 return;
1288 }
1289 cmd->dataout_timer_flags |= ISCSI_TF_STOP;
1290 spin_unlock_bh(&cmd->dataout_timeout_lock);
1291
1292 del_timer_sync(&cmd->dataout_timer);
1293
1294 spin_lock_bh(&cmd->dataout_timeout_lock);
1295 cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING;
1296 pr_debug("Stopped DataOUT Timer for ITT: 0x%08x\n",
1297 cmd->init_task_tag);
1298 spin_unlock_bh(&cmd->dataout_timeout_lock);
1299}
diff --git a/drivers/target/iscsi/iscsi_target_erl1.h b/drivers/target/iscsi/iscsi_target_erl1.h
new file mode 100644
index 000000000000..85e67e29de6b
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl1.h
@@ -0,0 +1,26 @@
1#ifndef ISCSI_TARGET_ERL1_H
2#define ISCSI_TARGET_ERL1_H
3
4extern int iscsit_dump_data_payload(struct iscsi_conn *, u32, int);
5extern int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
6 struct iscsi_cmd *, struct iscsi_datain_req *);
7extern int iscsit_create_recovery_datain_values_datasequenceinorder_no(
8 struct iscsi_cmd *, struct iscsi_datain_req *);
9extern int iscsit_handle_recovery_datain_or_r2t(struct iscsi_conn *, unsigned char *,
10 u32, u32, u32, u32);
11extern int iscsit_handle_status_snack(struct iscsi_conn *, u32, u32,
12 u32, u32);
13extern int iscsit_handle_data_ack(struct iscsi_conn *, u32, u32, u32);
14extern int iscsit_dataout_datapduinorder_no_fbit(struct iscsi_cmd *, struct iscsi_pdu *);
15extern int iscsit_recover_dataout_sequence(struct iscsi_cmd *, u32, u32);
16extern void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *);
17extern void iscsit_free_all_ooo_cmdsns(struct iscsi_session *);
18extern int iscsit_execute_ooo_cmdsns(struct iscsi_session *);
19extern int iscsit_execute_cmd(struct iscsi_cmd *, int);
20extern int iscsit_handle_ooo_cmdsn(struct iscsi_session *, struct iscsi_cmd *, u32);
21extern void iscsit_remove_ooo_cmdsn(struct iscsi_session *, struct iscsi_ooo_cmdsn *);
22extern void iscsit_mod_dataout_timer(struct iscsi_cmd *);
23extern void iscsit_start_dataout_timer(struct iscsi_cmd *, struct iscsi_conn *);
24extern void iscsit_stop_dataout_timer(struct iscsi_cmd *);
25
26#endif /* ISCSI_TARGET_ERL1_H */
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
new file mode 100644
index 000000000000..91a4d170bda4
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -0,0 +1,474 @@
1/*******************************************************************************
2 * This file contains error recovery level two functions used by
3 * the iSCSI Target driver.
4 *
5 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 *
9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 ******************************************************************************/
21
22#include <scsi/iscsi_proto.h>
23#include <target/target_core_base.h>
24#include <target/target_core_transport.h>
25
26#include "iscsi_target_core.h"
27#include "iscsi_target_datain_values.h"
28#include "iscsi_target_util.h"
29#include "iscsi_target_erl0.h"
30#include "iscsi_target_erl1.h"
31#include "iscsi_target_erl2.h"
32#include "iscsi_target.h"
33
34/*
35 * FIXME: Does RData SNACK apply here as well?
36 */
37void iscsit_create_conn_recovery_datain_values(
38 struct iscsi_cmd *cmd,
39 u32 exp_data_sn)
40{
41 u32 data_sn = 0;
42 struct iscsi_conn *conn = cmd->conn;
43
44 cmd->next_burst_len = 0;
45 cmd->read_data_done = 0;
46
47 while (exp_data_sn > data_sn) {
48 if ((cmd->next_burst_len +
49 conn->conn_ops->MaxRecvDataSegmentLength) <
50 conn->sess->sess_ops->MaxBurstLength) {
51 cmd->read_data_done +=
52 conn->conn_ops->MaxRecvDataSegmentLength;
53 cmd->next_burst_len +=
54 conn->conn_ops->MaxRecvDataSegmentLength;
55 } else {
56 cmd->read_data_done +=
57 (conn->sess->sess_ops->MaxBurstLength -
58 cmd->next_burst_len);
59 cmd->next_burst_len = 0;
60 }
61 data_sn++;
62 }
63}
64
65void iscsit_create_conn_recovery_dataout_values(
66 struct iscsi_cmd *cmd)
67{
68 u32 write_data_done = 0;
69 struct iscsi_conn *conn = cmd->conn;
70
71 cmd->data_sn = 0;
72 cmd->next_burst_len = 0;
73
74 while (cmd->write_data_done > write_data_done) {
75 if ((write_data_done + conn->sess->sess_ops->MaxBurstLength) <=
76 cmd->write_data_done)
77 write_data_done += conn->sess->sess_ops->MaxBurstLength;
78 else
79 break;
80 }
81
82 cmd->write_data_done = write_data_done;
83}
84
85static int iscsit_attach_active_connection_recovery_entry(
86 struct iscsi_session *sess,
87 struct iscsi_conn_recovery *cr)
88{
89 spin_lock(&sess->cr_a_lock);
90 list_add_tail(&cr->cr_list, &sess->cr_active_list);
91 spin_unlock(&sess->cr_a_lock);
92
93 return 0;
94}
95
96static int iscsit_attach_inactive_connection_recovery_entry(
97 struct iscsi_session *sess,
98 struct iscsi_conn_recovery *cr)
99{
100 spin_lock(&sess->cr_i_lock);
101 list_add_tail(&cr->cr_list, &sess->cr_inactive_list);
102
103 sess->conn_recovery_count++;
104 pr_debug("Incremented connection recovery count to %u for"
105 " SID: %u\n", sess->conn_recovery_count, sess->sid);
106 spin_unlock(&sess->cr_i_lock);
107
108 return 0;
109}
110
111struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
112 struct iscsi_session *sess,
113 u16 cid)
114{
115 struct iscsi_conn_recovery *cr;
116
117 spin_lock(&sess->cr_i_lock);
118 list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
119 if (cr->cid == cid) {
120 spin_unlock(&sess->cr_i_lock);
121 return cr;
122 }
123 }
124 spin_unlock(&sess->cr_i_lock);
125
126 return NULL;
127}
128
129void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
130{
131 struct iscsi_cmd *cmd, *cmd_tmp;
132 struct iscsi_conn_recovery *cr, *cr_tmp;
133
134 spin_lock(&sess->cr_a_lock);
135 list_for_each_entry_safe(cr, cr_tmp, &sess->cr_active_list, cr_list) {
136 list_del(&cr->cr_list);
137 spin_unlock(&sess->cr_a_lock);
138
139 spin_lock(&cr->conn_recovery_cmd_lock);
140 list_for_each_entry_safe(cmd, cmd_tmp,
141 &cr->conn_recovery_cmd_list, i_list) {
142
143 list_del(&cmd->i_list);
144 cmd->conn = NULL;
145 spin_unlock(&cr->conn_recovery_cmd_lock);
146 if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
147 !(cmd->se_cmd.transport_wait_for_tasks))
148 iscsit_release_cmd(cmd);
149 else
150 cmd->se_cmd.transport_wait_for_tasks(
151 &cmd->se_cmd, 1, 1);
152 spin_lock(&cr->conn_recovery_cmd_lock);
153 }
154 spin_unlock(&cr->conn_recovery_cmd_lock);
155 spin_lock(&sess->cr_a_lock);
156
157 kfree(cr);
158 }
159 spin_unlock(&sess->cr_a_lock);
160
161 spin_lock(&sess->cr_i_lock);
162 list_for_each_entry_safe(cr, cr_tmp, &sess->cr_inactive_list, cr_list) {
163 list_del(&cr->cr_list);
164 spin_unlock(&sess->cr_i_lock);
165
166 spin_lock(&cr->conn_recovery_cmd_lock);
167 list_for_each_entry_safe(cmd, cmd_tmp,
168 &cr->conn_recovery_cmd_list, i_list) {
169
170 list_del(&cmd->i_list);
171 cmd->conn = NULL;
172 spin_unlock(&cr->conn_recovery_cmd_lock);
173 if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
174 !(cmd->se_cmd.transport_wait_for_tasks))
175 iscsit_release_cmd(cmd);
176 else
177 cmd->se_cmd.transport_wait_for_tasks(
178 &cmd->se_cmd, 1, 1);
179 spin_lock(&cr->conn_recovery_cmd_lock);
180 }
181 spin_unlock(&cr->conn_recovery_cmd_lock);
182 spin_lock(&sess->cr_i_lock);
183
184 kfree(cr);
185 }
186 spin_unlock(&sess->cr_i_lock);
187}
188
189int iscsit_remove_active_connection_recovery_entry(
190 struct iscsi_conn_recovery *cr,
191 struct iscsi_session *sess)
192{
193 spin_lock(&sess->cr_a_lock);
194 list_del(&cr->cr_list);
195
196 sess->conn_recovery_count--;
197 pr_debug("Decremented connection recovery count to %u for"
198 " SID: %u\n", sess->conn_recovery_count, sess->sid);
199 spin_unlock(&sess->cr_a_lock);
200
201 kfree(cr);
202
203 return 0;
204}
205
206int iscsit_remove_inactive_connection_recovery_entry(
207 struct iscsi_conn_recovery *cr,
208 struct iscsi_session *sess)
209{
210 spin_lock(&sess->cr_i_lock);
211 list_del(&cr->cr_list);
212 spin_unlock(&sess->cr_i_lock);
213
214 return 0;
215}
216
217/*
218 * Called with cr->conn_recovery_cmd_lock help.
219 */
220int iscsit_remove_cmd_from_connection_recovery(
221 struct iscsi_cmd *cmd,
222 struct iscsi_session *sess)
223{
224 struct iscsi_conn_recovery *cr;
225
226 if (!cmd->cr) {
227 pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
228 " is NULL!\n", cmd->init_task_tag);
229 BUG();
230 }
231 cr = cmd->cr;
232
233 list_del(&cmd->i_list);
234 return --cr->cmd_count;
235}
236
237void iscsit_discard_cr_cmds_by_expstatsn(
238 struct iscsi_conn_recovery *cr,
239 u32 exp_statsn)
240{
241 u32 dropped_count = 0;
242 struct iscsi_cmd *cmd, *cmd_tmp;
243 struct iscsi_session *sess = cr->sess;
244
245 spin_lock(&cr->conn_recovery_cmd_lock);
246 list_for_each_entry_safe(cmd, cmd_tmp,
247 &cr->conn_recovery_cmd_list, i_list) {
248
249 if (((cmd->deferred_i_state != ISTATE_SENT_STATUS) &&
250 (cmd->deferred_i_state != ISTATE_REMOVE)) ||
251 (cmd->stat_sn >= exp_statsn)) {
252 continue;
253 }
254
255 dropped_count++;
256 pr_debug("Dropping Acknowledged ITT: 0x%08x, StatSN:"
257 " 0x%08x, CID: %hu.\n", cmd->init_task_tag,
258 cmd->stat_sn, cr->cid);
259
260 iscsit_remove_cmd_from_connection_recovery(cmd, sess);
261
262 spin_unlock(&cr->conn_recovery_cmd_lock);
263 if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
264 !(cmd->se_cmd.transport_wait_for_tasks))
265 iscsit_release_cmd(cmd);
266 else
267 cmd->se_cmd.transport_wait_for_tasks(
268 &cmd->se_cmd, 1, 0);
269 spin_lock(&cr->conn_recovery_cmd_lock);
270 }
271 spin_unlock(&cr->conn_recovery_cmd_lock);
272
273 pr_debug("Dropped %u total acknowledged commands on"
274 " CID: %hu less than old ExpStatSN: 0x%08x\n",
275 dropped_count, cr->cid, exp_statsn);
276
277 if (!cr->cmd_count) {
278 pr_debug("No commands to be reassigned for failed"
279 " connection CID: %hu on SID: %u\n",
280 cr->cid, sess->sid);
281 iscsit_remove_inactive_connection_recovery_entry(cr, sess);
282 iscsit_attach_active_connection_recovery_entry(sess, cr);
283 pr_debug("iSCSI connection recovery successful for CID:"
284 " %hu on SID: %u\n", cr->cid, sess->sid);
285 iscsit_remove_active_connection_recovery_entry(cr, sess);
286 } else {
287 iscsit_remove_inactive_connection_recovery_entry(cr, sess);
288 iscsit_attach_active_connection_recovery_entry(sess, cr);
289 }
290}
291
292int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
293{
294 u32 dropped_count = 0;
295 struct iscsi_cmd *cmd, *cmd_tmp;
296 struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
297 struct iscsi_session *sess = conn->sess;
298
299 mutex_lock(&sess->cmdsn_mutex);
300 list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
301 &sess->sess_ooo_cmdsn_list, ooo_list) {
302
303 if (ooo_cmdsn->cid != conn->cid)
304 continue;
305
306 dropped_count++;
307 pr_debug("Dropping unacknowledged CmdSN:"
308 " 0x%08x during connection recovery on CID: %hu\n",
309 ooo_cmdsn->cmdsn, conn->cid);
310 iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
311 }
312 mutex_unlock(&sess->cmdsn_mutex);
313
314 spin_lock_bh(&conn->cmd_lock);
315 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
316 if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
317 continue;
318
319 list_del(&cmd->i_list);
320
321 spin_unlock_bh(&conn->cmd_lock);
322 if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
323 !(cmd->se_cmd.transport_wait_for_tasks))
324 iscsit_release_cmd(cmd);
325 else
326 cmd->se_cmd.transport_wait_for_tasks(
327 &cmd->se_cmd, 1, 1);
328 spin_lock_bh(&conn->cmd_lock);
329 }
330 spin_unlock_bh(&conn->cmd_lock);
331
332 pr_debug("Dropped %u total unacknowledged commands on CID:"
333 " %hu for ExpCmdSN: 0x%08x.\n", dropped_count, conn->cid,
334 sess->exp_cmd_sn);
335 return 0;
336}
337
338int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
339{
340 u32 cmd_count = 0;
341 struct iscsi_cmd *cmd, *cmd_tmp;
342 struct iscsi_conn_recovery *cr;
343
344 /*
345 * Allocate an struct iscsi_conn_recovery for this connection.
346 * Each struct iscsi_cmd contains an struct iscsi_conn_recovery pointer
347 * (struct iscsi_cmd->cr) so we need to allocate this before preparing the
348 * connection's command list for connection recovery.
349 */
350 cr = kzalloc(sizeof(struct iscsi_conn_recovery), GFP_KERNEL);
351 if (!cr) {
352 pr_err("Unable to allocate memory for"
353 " struct iscsi_conn_recovery.\n");
354 return -1;
355 }
356 INIT_LIST_HEAD(&cr->cr_list);
357 INIT_LIST_HEAD(&cr->conn_recovery_cmd_list);
358 spin_lock_init(&cr->conn_recovery_cmd_lock);
359 /*
360 * Only perform connection recovery on ISCSI_OP_SCSI_CMD or
361 * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call
362 * list_del(&cmd->i_list); to release the command to the
363 * session pool and remove it from the connection's list.
364 *
365 * Also stop the DataOUT timer, which will be restarted after
366 * sending the TMR response.
367 */
368 spin_lock_bh(&conn->cmd_lock);
369 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
370
371 if ((cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) &&
372 (cmd->iscsi_opcode != ISCSI_OP_NOOP_OUT)) {
373 pr_debug("Not performing realligence on"
374 " Opcode: 0x%02x, ITT: 0x%08x, CmdSN: 0x%08x,"
375 " CID: %hu\n", cmd->iscsi_opcode,
376 cmd->init_task_tag, cmd->cmd_sn, conn->cid);
377
378 list_del(&cmd->i_list);
379 spin_unlock_bh(&conn->cmd_lock);
380
381 if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
382 !(cmd->se_cmd.transport_wait_for_tasks))
383 iscsit_release_cmd(cmd);
384 else
385 cmd->se_cmd.transport_wait_for_tasks(
386 &cmd->se_cmd, 1, 0);
387 spin_lock_bh(&conn->cmd_lock);
388 continue;
389 }
390
391 /*
392 * Special case where commands greater than or equal to
393 * the session's ExpCmdSN are attached to the connection
394 * list but not to the out of order CmdSN list. The one
395 * obvious case is when a command with immediate data
396 * attached must only check the CmdSN against ExpCmdSN
397 * after the data is received. The special case below
398 * is when the connection fails before data is received,
399 * but also may apply to other PDUs, so it has been
400 * made generic here.
401 */
402 if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
403 (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) {
404 list_del(&cmd->i_list);
405 spin_unlock_bh(&conn->cmd_lock);
406
407 if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
408 !(cmd->se_cmd.transport_wait_for_tasks))
409 iscsit_release_cmd(cmd);
410 else
411 cmd->se_cmd.transport_wait_for_tasks(
412 &cmd->se_cmd, 1, 1);
413 spin_lock_bh(&conn->cmd_lock);
414 continue;
415 }
416
417 cmd_count++;
418 pr_debug("Preparing Opcode: 0x%02x, ITT: 0x%08x,"
419 " CmdSN: 0x%08x, StatSN: 0x%08x, CID: %hu for"
420 " realligence.\n", cmd->iscsi_opcode,
421 cmd->init_task_tag, cmd->cmd_sn, cmd->stat_sn,
422 conn->cid);
423
424 cmd->deferred_i_state = cmd->i_state;
425 cmd->i_state = ISTATE_IN_CONNECTION_RECOVERY;
426
427 if (cmd->data_direction == DMA_TO_DEVICE)
428 iscsit_stop_dataout_timer(cmd);
429
430 cmd->sess = conn->sess;
431
432 list_del(&cmd->i_list);
433 spin_unlock_bh(&conn->cmd_lock);
434
435 iscsit_free_all_datain_reqs(cmd);
436
437 if ((cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) &&
438 cmd->se_cmd.transport_wait_for_tasks)
439 cmd->se_cmd.transport_wait_for_tasks(&cmd->se_cmd,
440 0, 0);
441 /*
442 * Add the struct iscsi_cmd to the connection recovery cmd list
443 */
444 spin_lock(&cr->conn_recovery_cmd_lock);
445 list_add_tail(&cmd->i_list, &cr->conn_recovery_cmd_list);
446 spin_unlock(&cr->conn_recovery_cmd_lock);
447
448 spin_lock_bh(&conn->cmd_lock);
449 cmd->cr = cr;
450 cmd->conn = NULL;
451 }
452 spin_unlock_bh(&conn->cmd_lock);
453 /*
454 * Fill in the various values in the preallocated struct iscsi_conn_recovery.
455 */
456 cr->cid = conn->cid;
457 cr->cmd_count = cmd_count;
458 cr->maxrecvdatasegmentlength = conn->conn_ops->MaxRecvDataSegmentLength;
459 cr->sess = conn->sess;
460
461 iscsit_attach_inactive_connection_recovery_entry(conn->sess, cr);
462
463 return 0;
464}
465
466int iscsit_connection_recovery_transport_reset(struct iscsi_conn *conn)
467{
468 atomic_set(&conn->connection_recovery, 1);
469
470 if (iscsit_close_connection(conn) < 0)
471 return -1;
472
473 return 0;
474}
diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h
new file mode 100644
index 000000000000..22f8d24780a6
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl2.h
@@ -0,0 +1,18 @@
1#ifndef ISCSI_TARGET_ERL2_H
2#define ISCSI_TARGET_ERL2_H
3
4extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, u32);
5extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *);
6extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
7 struct iscsi_session *, u16);
8extern void iscsit_free_connection_recovery_entires(struct iscsi_session *);
9extern int iscsit_remove_active_connection_recovery_entry(
10 struct iscsi_conn_recovery *, struct iscsi_session *);
11extern int iscsit_remove_cmd_from_connection_recovery(struct iscsi_cmd *,
12 struct iscsi_session *);
13extern void iscsit_discard_cr_cmds_by_expstatsn(struct iscsi_conn_recovery *, u32);
14extern int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *);
15extern int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *);
16extern int iscsit_connection_recovery_transport_reset(struct iscsi_conn *);
17
18#endif /*** ISCSI_TARGET_ERL2_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
new file mode 100644
index 000000000000..bcaf82f47037
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -0,0 +1,1232 @@
1/*******************************************************************************
2 * This file contains the login functions used by the iSCSI Target driver.
3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 ******************************************************************************/
20
21#include <linux/string.h>
22#include <linux/kthread.h>
23#include <linux/crypto.h>
24#include <scsi/iscsi_proto.h>
25#include <target/target_core_base.h>
26#include <target/target_core_transport.h>
27
28#include "iscsi_target_core.h"
29#include "iscsi_target_tq.h"
30#include "iscsi_target_device.h"
31#include "iscsi_target_nego.h"
32#include "iscsi_target_erl0.h"
33#include "iscsi_target_erl2.h"
34#include "iscsi_target_login.h"
35#include "iscsi_target_stat.h"
36#include "iscsi_target_tpg.h"
37#include "iscsi_target_util.h"
38#include "iscsi_target.h"
39#include "iscsi_target_parameters.h"
40
41extern struct idr sess_idr;
42extern struct mutex auth_id_lock;
43extern spinlock_t sess_idr_lock;
44
45static int iscsi_login_init_conn(struct iscsi_conn *conn)
46{
47 INIT_LIST_HEAD(&conn->conn_list);
48 INIT_LIST_HEAD(&conn->conn_cmd_list);
49 INIT_LIST_HEAD(&conn->immed_queue_list);
50 INIT_LIST_HEAD(&conn->response_queue_list);
51 init_completion(&conn->conn_post_wait_comp);
52 init_completion(&conn->conn_wait_comp);
53 init_completion(&conn->conn_wait_rcfr_comp);
54 init_completion(&conn->conn_waiting_on_uc_comp);
55 init_completion(&conn->conn_logout_comp);
56 init_completion(&conn->rx_half_close_comp);
57 init_completion(&conn->tx_half_close_comp);
58 spin_lock_init(&conn->cmd_lock);
59 spin_lock_init(&conn->conn_usage_lock);
60 spin_lock_init(&conn->immed_queue_lock);
61 spin_lock_init(&conn->nopin_timer_lock);
62 spin_lock_init(&conn->response_queue_lock);
63 spin_lock_init(&conn->state_lock);
64
65 if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
66 pr_err("Unable to allocate conn->conn_cpumask\n");
67 return -ENOMEM;
68 }
69
70 return 0;
71}
72
73/*
74 * Used by iscsi_target_nego.c:iscsi_target_locate_portal() to setup
75 * per struct iscsi_conn libcrypto contexts for crc32c and crc32-intel
76 */
77int iscsi_login_setup_crypto(struct iscsi_conn *conn)
78{
79 /*
80 * Setup slicing by CRC32C algorithm for RX and TX libcrypto contexts
81 * which will default to crc32c_intel.ko for cpu_has_xmm4_2, or fallback
82 * to software 1x8 byte slicing from crc32c.ko
83 */
84 conn->conn_rx_hash.flags = 0;
85 conn->conn_rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
86 CRYPTO_ALG_ASYNC);
87 if (IS_ERR(conn->conn_rx_hash.tfm)) {
88 pr_err("crypto_alloc_hash() failed for conn_rx_tfm\n");
89 return -ENOMEM;
90 }
91
92 conn->conn_tx_hash.flags = 0;
93 conn->conn_tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
94 CRYPTO_ALG_ASYNC);
95 if (IS_ERR(conn->conn_tx_hash.tfm)) {
96 pr_err("crypto_alloc_hash() failed for conn_tx_tfm\n");
97 crypto_free_hash(conn->conn_rx_hash.tfm);
98 return -ENOMEM;
99 }
100
101 return 0;
102}
103
104static int iscsi_login_check_initiator_version(
105 struct iscsi_conn *conn,
106 u8 version_max,
107 u8 version_min)
108{
109 if ((version_max != 0x00) || (version_min != 0x00)) {
110 pr_err("Unsupported iSCSI IETF Pre-RFC Revision,"
111 " version Min/Max 0x%02x/0x%02x, rejecting login.\n",
112 version_min, version_max);
113 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
114 ISCSI_LOGIN_STATUS_NO_VERSION);
115 return -1;
116 }
117
118 return 0;
119}
120
121int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
122{
123 int sessiontype;
124 struct iscsi_param *initiatorname_param = NULL, *sessiontype_param = NULL;
125 struct iscsi_portal_group *tpg = conn->tpg;
126 struct iscsi_session *sess = NULL, *sess_p = NULL;
127 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
128 struct se_session *se_sess, *se_sess_tmp;
129
130 initiatorname_param = iscsi_find_param_from_key(
131 INITIATORNAME, conn->param_list);
132 if (!initiatorname_param)
133 return -1;
134
135 sessiontype_param = iscsi_find_param_from_key(
136 SESSIONTYPE, conn->param_list);
137 if (!sessiontype_param)
138 return -1;
139
140 sessiontype = (strncmp(sessiontype_param->value, NORMAL, 6)) ? 1 : 0;
141
142 spin_lock_bh(&se_tpg->session_lock);
143 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
144 sess_list) {
145
146 sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
147 spin_lock(&sess_p->conn_lock);
148 if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
149 atomic_read(&sess_p->session_logout) ||
150 (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
151 spin_unlock(&sess_p->conn_lock);
152 continue;
153 }
154 if (!memcmp((void *)sess_p->isid, (void *)conn->sess->isid, 6) &&
155 (!strcmp((void *)sess_p->sess_ops->InitiatorName,
156 (void *)initiatorname_param->value) &&
157 (sess_p->sess_ops->SessionType == sessiontype))) {
158 atomic_set(&sess_p->session_reinstatement, 1);
159 spin_unlock(&sess_p->conn_lock);
160 iscsit_inc_session_usage_count(sess_p);
161 iscsit_stop_time2retain_timer(sess_p);
162 sess = sess_p;
163 break;
164 }
165 spin_unlock(&sess_p->conn_lock);
166 }
167 spin_unlock_bh(&se_tpg->session_lock);
168 /*
169 * If the Time2Retain handler has expired, the session is already gone.
170 */
171 if (!sess)
172 return 0;
173
174 pr_debug("%s iSCSI Session SID %u is still active for %s,"
175 " preforming session reinstatement.\n", (sessiontype) ?
176 "Discovery" : "Normal", sess->sid,
177 sess->sess_ops->InitiatorName);
178
179 spin_lock_bh(&sess->conn_lock);
180 if (sess->session_state == TARG_SESS_STATE_FAILED) {
181 spin_unlock_bh(&sess->conn_lock);
182 iscsit_dec_session_usage_count(sess);
183 return iscsit_close_session(sess);
184 }
185 spin_unlock_bh(&sess->conn_lock);
186
187 iscsit_stop_session(sess, 1, 1);
188 iscsit_dec_session_usage_count(sess);
189
190 return iscsit_close_session(sess);
191}
192
193static void iscsi_login_set_conn_values(
194 struct iscsi_session *sess,
195 struct iscsi_conn *conn,
196 u16 cid)
197{
198 conn->sess = sess;
199 conn->cid = cid;
200 /*
201 * Generate a random Status sequence number (statsn) for the new
202 * iSCSI connection.
203 */
204 get_random_bytes(&conn->stat_sn, sizeof(u32));
205
206 mutex_lock(&auth_id_lock);
207 conn->auth_id = iscsit_global->auth_id++;
208 mutex_unlock(&auth_id_lock);
209}
210
211/*
212 * This is the leading connection of a new session,
213 * or session reinstatement.
214 */
215static int iscsi_login_zero_tsih_s1(
216 struct iscsi_conn *conn,
217 unsigned char *buf)
218{
219 struct iscsi_session *sess = NULL;
220 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
221
222 sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
223 if (!sess) {
224 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
225 ISCSI_LOGIN_STATUS_NO_RESOURCES);
226 pr_err("Could not allocate memory for session\n");
227 return -1;
228 }
229
230 iscsi_login_set_conn_values(sess, conn, pdu->cid);
231 sess->init_task_tag = pdu->itt;
232 memcpy((void *)&sess->isid, (void *)pdu->isid, 6);
233 sess->exp_cmd_sn = pdu->cmdsn;
234 INIT_LIST_HEAD(&sess->sess_conn_list);
235 INIT_LIST_HEAD(&sess->sess_ooo_cmdsn_list);
236 INIT_LIST_HEAD(&sess->cr_active_list);
237 INIT_LIST_HEAD(&sess->cr_inactive_list);
238 init_completion(&sess->async_msg_comp);
239 init_completion(&sess->reinstatement_comp);
240 init_completion(&sess->session_wait_comp);
241 init_completion(&sess->session_waiting_on_uc_comp);
242 mutex_init(&sess->cmdsn_mutex);
243 spin_lock_init(&sess->conn_lock);
244 spin_lock_init(&sess->cr_a_lock);
245 spin_lock_init(&sess->cr_i_lock);
246 spin_lock_init(&sess->session_usage_lock);
247 spin_lock_init(&sess->ttt_lock);
248
249 if (!idr_pre_get(&sess_idr, GFP_KERNEL)) {
250 pr_err("idr_pre_get() for sess_idr failed\n");
251 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
252 ISCSI_LOGIN_STATUS_NO_RESOURCES);
253 return -1;
254 }
255 spin_lock(&sess_idr_lock);
256 idr_get_new(&sess_idr, NULL, &sess->session_index);
257 spin_unlock(&sess_idr_lock);
258
259 sess->creation_time = get_jiffies_64();
260 spin_lock_init(&sess->session_stats_lock);
261 /*
262 * The FFP CmdSN window values will be allocated from the TPG's
263 * Initiator Node's ACL once the login has been successfully completed.
264 */
265 sess->max_cmd_sn = pdu->cmdsn;
266
267 sess->sess_ops = kzalloc(sizeof(struct iscsi_sess_ops), GFP_KERNEL);
268 if (!sess->sess_ops) {
269 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
270 ISCSI_LOGIN_STATUS_NO_RESOURCES);
271 pr_err("Unable to allocate memory for"
272 " struct iscsi_sess_ops.\n");
273 return -1;
274 }
275
276 sess->se_sess = transport_init_session();
277 if (!sess->se_sess) {
278 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
279 ISCSI_LOGIN_STATUS_NO_RESOURCES);
280 return -1;
281 }
282
283 return 0;
284}
285
286static int iscsi_login_zero_tsih_s2(
287 struct iscsi_conn *conn)
288{
289 struct iscsi_node_attrib *na;
290 struct iscsi_session *sess = conn->sess;
291 unsigned char buf[32];
292
293 sess->tpg = conn->tpg;
294
295 /*
296 * Assign a new TPG Session Handle. Note this is protected with
297 * struct iscsi_portal_group->np_login_sem from iscsit_access_np().
298 */
299 sess->tsih = ++ISCSI_TPG_S(sess)->ntsih;
300 if (!sess->tsih)
301 sess->tsih = ++ISCSI_TPG_S(sess)->ntsih;
302
303 /*
304 * Create the default params from user defined values..
305 */
306 if (iscsi_copy_param_list(&conn->param_list,
307 ISCSI_TPG_C(conn)->param_list, 1) < 0) {
308 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
309 ISCSI_LOGIN_STATUS_NO_RESOURCES);
310 return -1;
311 }
312
313 iscsi_set_keys_to_negotiate(0, conn->param_list);
314
315 if (sess->sess_ops->SessionType)
316 return iscsi_set_keys_irrelevant_for_discovery(
317 conn->param_list);
318
319 na = iscsit_tpg_get_node_attrib(sess);
320
321 /*
322 * Need to send TargetPortalGroupTag back in first login response
323 * on any iSCSI connection where the Initiator provides TargetName.
324 * See 5.3.1. Login Phase Start
325 *
326 * In our case, we have already located the struct iscsi_tiqn at this point.
327 */
328 memset(buf, 0, 32);
329 sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
330 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
331 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
332 ISCSI_LOGIN_STATUS_NO_RESOURCES);
333 return -1;
334 }
335
336 /*
337 * Workaround for Initiators that have broken connection recovery logic.
338 *
339 * "We would really like to get rid of this." Linux-iSCSI.org team
340 */
341 memset(buf, 0, 32);
342 sprintf(buf, "ErrorRecoveryLevel=%d", na->default_erl);
343 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
344 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
345 ISCSI_LOGIN_STATUS_NO_RESOURCES);
346 return -1;
347 }
348
349 if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0)
350 return -1;
351
352 return 0;
353}
354
355/*
356 * Remove PSTATE_NEGOTIATE for the four FIM related keys.
357 * The Initiator node will be able to enable FIM by proposing them itself.
358 */
359int iscsi_login_disable_FIM_keys(
360 struct iscsi_param_list *param_list,
361 struct iscsi_conn *conn)
362{
363 struct iscsi_param *param;
364
365 param = iscsi_find_param_from_key("OFMarker", param_list);
366 if (!param) {
367 pr_err("iscsi_find_param_from_key() for"
368 " OFMarker failed\n");
369 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
370 ISCSI_LOGIN_STATUS_NO_RESOURCES);
371 return -1;
372 }
373 param->state &= ~PSTATE_NEGOTIATE;
374
375 param = iscsi_find_param_from_key("OFMarkInt", param_list);
376 if (!param) {
377 pr_err("iscsi_find_param_from_key() for"
378 " IFMarker failed\n");
379 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
380 ISCSI_LOGIN_STATUS_NO_RESOURCES);
381 return -1;
382 }
383 param->state &= ~PSTATE_NEGOTIATE;
384
385 param = iscsi_find_param_from_key("IFMarker", param_list);
386 if (!param) {
387 pr_err("iscsi_find_param_from_key() for"
388 " IFMarker failed\n");
389 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
390 ISCSI_LOGIN_STATUS_NO_RESOURCES);
391 return -1;
392 }
393 param->state &= ~PSTATE_NEGOTIATE;
394
395 param = iscsi_find_param_from_key("IFMarkInt", param_list);
396 if (!param) {
397 pr_err("iscsi_find_param_from_key() for"
398 " IFMarker failed\n");
399 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
400 ISCSI_LOGIN_STATUS_NO_RESOURCES);
401 return -1;
402 }
403 param->state &= ~PSTATE_NEGOTIATE;
404
405 return 0;
406}
407
408static int iscsi_login_non_zero_tsih_s1(
409 struct iscsi_conn *conn,
410 unsigned char *buf)
411{
412 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
413
414 iscsi_login_set_conn_values(NULL, conn, pdu->cid);
415 return 0;
416}
417
418/*
419 * Add a new connection to an existing session.
420 */
421static int iscsi_login_non_zero_tsih_s2(
422 struct iscsi_conn *conn,
423 unsigned char *buf)
424{
425 struct iscsi_portal_group *tpg = conn->tpg;
426 struct iscsi_session *sess = NULL, *sess_p = NULL;
427 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
428 struct se_session *se_sess, *se_sess_tmp;
429 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
430
431 spin_lock_bh(&se_tpg->session_lock);
432 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
433 sess_list) {
434
435 sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
436 if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
437 atomic_read(&sess_p->session_logout) ||
438 (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
439 continue;
440 if (!memcmp((const void *)sess_p->isid,
441 (const void *)pdu->isid, 6) &&
442 (sess_p->tsih == pdu->tsih)) {
443 iscsit_inc_session_usage_count(sess_p);
444 iscsit_stop_time2retain_timer(sess_p);
445 sess = sess_p;
446 break;
447 }
448 }
449 spin_unlock_bh(&se_tpg->session_lock);
450
451 /*
452 * If the Time2Retain handler has expired, the session is already gone.
453 */
454 if (!sess) {
455 pr_err("Initiator attempting to add a connection to"
456 " a non-existent session, rejecting iSCSI Login.\n");
457 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
458 ISCSI_LOGIN_STATUS_NO_SESSION);
459 return -1;
460 }
461
462 /*
463 * Stop the Time2Retain timer if this is a failed session, we restart
464 * the timer if the login is not successful.
465 */
466 spin_lock_bh(&sess->conn_lock);
467 if (sess->session_state == TARG_SESS_STATE_FAILED)
468 atomic_set(&sess->session_continuation, 1);
469 spin_unlock_bh(&sess->conn_lock);
470
471 iscsi_login_set_conn_values(sess, conn, pdu->cid);
472
473 if (iscsi_copy_param_list(&conn->param_list,
474 ISCSI_TPG_C(conn)->param_list, 0) < 0) {
475 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
476 ISCSI_LOGIN_STATUS_NO_RESOURCES);
477 return -1;
478 }
479
480 iscsi_set_keys_to_negotiate(0, conn->param_list);
481 /*
482 * Need to send TargetPortalGroupTag back in first login response
483 * on any iSCSI connection where the Initiator provides TargetName.
484 * See 5.3.1. Login Phase Start
485 *
486 * In our case, we have already located the struct iscsi_tiqn at this point.
487 */
488 memset(buf, 0, 32);
489 sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
490 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
491 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
492 ISCSI_LOGIN_STATUS_NO_RESOURCES);
493 return -1;
494 }
495
496 return iscsi_login_disable_FIM_keys(conn->param_list, conn);
497}
498
499int iscsi_login_post_auth_non_zero_tsih(
500 struct iscsi_conn *conn,
501 u16 cid,
502 u32 exp_statsn)
503{
504 struct iscsi_conn *conn_ptr = NULL;
505 struct iscsi_conn_recovery *cr = NULL;
506 struct iscsi_session *sess = conn->sess;
507
508 /*
509 * By following item 5 in the login table, if we have found
510 * an existing ISID and a valid/existing TSIH and an existing
511 * CID we do connection reinstatement. Currently we dont not
512 * support it so we send back an non-zero status class to the
513 * initiator and release the new connection.
514 */
515 conn_ptr = iscsit_get_conn_from_cid_rcfr(sess, cid);
516 if ((conn_ptr)) {
517 pr_err("Connection exists with CID %hu for %s,"
518 " performing connection reinstatement.\n",
519 conn_ptr->cid, sess->sess_ops->InitiatorName);
520
521 iscsit_connection_reinstatement_rcfr(conn_ptr);
522 iscsit_dec_conn_usage_count(conn_ptr);
523 }
524
525 /*
526 * Check for any connection recovery entires containing CID.
527 * We use the original ExpStatSN sent in the first login request
528 * to acknowledge commands for the failed connection.
529 *
530 * Also note that an explict logout may have already been sent,
531 * but the response may not be sent due to additional connection
532 * loss.
533 */
534 if (sess->sess_ops->ErrorRecoveryLevel == 2) {
535 cr = iscsit_get_inactive_connection_recovery_entry(
536 sess, cid);
537 if ((cr)) {
538 pr_debug("Performing implicit logout"
539 " for connection recovery on CID: %hu\n",
540 conn->cid);
541 iscsit_discard_cr_cmds_by_expstatsn(cr, exp_statsn);
542 }
543 }
544
545 /*
546 * Else we follow item 4 from the login table in that we have
547 * found an existing ISID and a valid/existing TSIH and a new
548 * CID we go ahead and continue to add a new connection to the
549 * session.
550 */
551 pr_debug("Adding CID %hu to existing session for %s.\n",
552 cid, sess->sess_ops->InitiatorName);
553
554 if ((atomic_read(&sess->nconn) + 1) > sess->sess_ops->MaxConnections) {
555 pr_err("Adding additional connection to this session"
556 " would exceed MaxConnections %d, login failed.\n",
557 sess->sess_ops->MaxConnections);
558 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
559 ISCSI_LOGIN_STATUS_ISID_ERROR);
560 return -1;
561 }
562
563 return 0;
564}
565
566static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
567{
568 struct iscsi_session *sess = conn->sess;
569
570 if (!sess->sess_ops->SessionType)
571 iscsit_start_nopin_timer(conn);
572}
573
574static int iscsi_post_login_handler(
575 struct iscsi_np *np,
576 struct iscsi_conn *conn,
577 u8 zero_tsih)
578{
579 int stop_timer = 0;
580 struct iscsi_session *sess = conn->sess;
581 struct se_session *se_sess = sess->se_sess;
582 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
583 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
584 struct iscsi_thread_set *ts;
585
586 iscsit_inc_conn_usage_count(conn);
587
588 iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_SUCCESS,
589 ISCSI_LOGIN_STATUS_ACCEPT);
590
591 pr_debug("Moving to TARG_CONN_STATE_LOGGED_IN.\n");
592 conn->conn_state = TARG_CONN_STATE_LOGGED_IN;
593
594 iscsi_set_connection_parameters(conn->conn_ops, conn->param_list);
595 iscsit_set_sync_and_steering_values(conn);
596 /*
597 * SCSI Initiator -> SCSI Target Port Mapping
598 */
599 ts = iscsi_get_thread_set();
600 if (!zero_tsih) {
601 iscsi_set_session_parameters(sess->sess_ops,
602 conn->param_list, 0);
603 iscsi_release_param_list(conn->param_list);
604 conn->param_list = NULL;
605
606 spin_lock_bh(&sess->conn_lock);
607 atomic_set(&sess->session_continuation, 0);
608 if (sess->session_state == TARG_SESS_STATE_FAILED) {
609 pr_debug("Moving to"
610 " TARG_SESS_STATE_LOGGED_IN.\n");
611 sess->session_state = TARG_SESS_STATE_LOGGED_IN;
612 stop_timer = 1;
613 }
614
615 pr_debug("iSCSI Login successful on CID: %hu from %s to"
616 " %s:%hu,%hu\n", conn->cid, conn->login_ip, np->np_ip,
617 np->np_port, tpg->tpgt);
618
619 list_add_tail(&conn->conn_list, &sess->sess_conn_list);
620 atomic_inc(&sess->nconn);
621 pr_debug("Incremented iSCSI Connection count to %hu"
622 " from node: %s\n", atomic_read(&sess->nconn),
623 sess->sess_ops->InitiatorName);
624 spin_unlock_bh(&sess->conn_lock);
625
626 iscsi_post_login_start_timers(conn);
627 iscsi_activate_thread_set(conn, ts);
628 /*
629 * Determine CPU mask to ensure connection's RX and TX kthreads
630 * are scheduled on the same CPU.
631 */
632 iscsit_thread_get_cpumask(conn);
633 conn->conn_rx_reset_cpumask = 1;
634 conn->conn_tx_reset_cpumask = 1;
635
636 iscsit_dec_conn_usage_count(conn);
637 if (stop_timer) {
638 spin_lock_bh(&se_tpg->session_lock);
639 iscsit_stop_time2retain_timer(sess);
640 spin_unlock_bh(&se_tpg->session_lock);
641 }
642 iscsit_dec_session_usage_count(sess);
643 return 0;
644 }
645
646 iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
647 iscsi_release_param_list(conn->param_list);
648 conn->param_list = NULL;
649
650 iscsit_determine_maxcmdsn(sess);
651
652 spin_lock_bh(&se_tpg->session_lock);
653 __transport_register_session(&sess->tpg->tpg_se_tpg,
654 se_sess->se_node_acl, se_sess, (void *)sess);
655 pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n");
656 sess->session_state = TARG_SESS_STATE_LOGGED_IN;
657
658 pr_debug("iSCSI Login successful on CID: %hu from %s to %s:%hu,%hu\n",
659 conn->cid, conn->login_ip, np->np_ip, np->np_port, tpg->tpgt);
660
661 spin_lock_bh(&sess->conn_lock);
662 list_add_tail(&conn->conn_list, &sess->sess_conn_list);
663 atomic_inc(&sess->nconn);
664 pr_debug("Incremented iSCSI Connection count to %hu from node:"
665 " %s\n", atomic_read(&sess->nconn),
666 sess->sess_ops->InitiatorName);
667 spin_unlock_bh(&sess->conn_lock);
668
669 sess->sid = tpg->sid++;
670 if (!sess->sid)
671 sess->sid = tpg->sid++;
672 pr_debug("Established iSCSI session from node: %s\n",
673 sess->sess_ops->InitiatorName);
674
675 tpg->nsessions++;
676 if (tpg->tpg_tiqn)
677 tpg->tpg_tiqn->tiqn_nsessions++;
678
679 pr_debug("Incremented number of active iSCSI sessions to %u on"
680 " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
681 spin_unlock_bh(&se_tpg->session_lock);
682
683 iscsi_post_login_start_timers(conn);
684 iscsi_activate_thread_set(conn, ts);
685 /*
686 * Determine CPU mask to ensure connection's RX and TX kthreads
687 * are scheduled on the same CPU.
688 */
689 iscsit_thread_get_cpumask(conn);
690 conn->conn_rx_reset_cpumask = 1;
691 conn->conn_tx_reset_cpumask = 1;
692
693 iscsit_dec_conn_usage_count(conn);
694
695 return 0;
696}
697
698static void iscsi_handle_login_thread_timeout(unsigned long data)
699{
700 struct iscsi_np *np = (struct iscsi_np *) data;
701
702 spin_lock_bh(&np->np_thread_lock);
703 pr_err("iSCSI Login timeout on Network Portal %s:%hu\n",
704 np->np_ip, np->np_port);
705
706 if (np->np_login_timer_flags & ISCSI_TF_STOP) {
707 spin_unlock_bh(&np->np_thread_lock);
708 return;
709 }
710
711 if (np->np_thread)
712 send_sig(SIGINT, np->np_thread, 1);
713
714 np->np_login_timer_flags &= ~ISCSI_TF_RUNNING;
715 spin_unlock_bh(&np->np_thread_lock);
716}
717
718static void iscsi_start_login_thread_timer(struct iscsi_np *np)
719{
720 /*
721 * This used the TA_LOGIN_TIMEOUT constant because at this
722 * point we do not have access to ISCSI_TPG_ATTRIB(tpg)->login_timeout
723 */
724 spin_lock_bh(&np->np_thread_lock);
725 init_timer(&np->np_login_timer);
726 np->np_login_timer.expires = (get_jiffies_64() + TA_LOGIN_TIMEOUT * HZ);
727 np->np_login_timer.data = (unsigned long)np;
728 np->np_login_timer.function = iscsi_handle_login_thread_timeout;
729 np->np_login_timer_flags &= ~ISCSI_TF_STOP;
730 np->np_login_timer_flags |= ISCSI_TF_RUNNING;
731 add_timer(&np->np_login_timer);
732
733 pr_debug("Added timeout timer to iSCSI login request for"
734 " %u seconds.\n", TA_LOGIN_TIMEOUT);
735 spin_unlock_bh(&np->np_thread_lock);
736}
737
738static void iscsi_stop_login_thread_timer(struct iscsi_np *np)
739{
740 spin_lock_bh(&np->np_thread_lock);
741 if (!(np->np_login_timer_flags & ISCSI_TF_RUNNING)) {
742 spin_unlock_bh(&np->np_thread_lock);
743 return;
744 }
745 np->np_login_timer_flags |= ISCSI_TF_STOP;
746 spin_unlock_bh(&np->np_thread_lock);
747
748 del_timer_sync(&np->np_login_timer);
749
750 spin_lock_bh(&np->np_thread_lock);
751 np->np_login_timer_flags &= ~ISCSI_TF_RUNNING;
752 spin_unlock_bh(&np->np_thread_lock);
753}
754
755int iscsi_target_setup_login_socket(
756 struct iscsi_np *np,
757 struct __kernel_sockaddr_storage *sockaddr)
758{
759 struct socket *sock;
760 int backlog = 5, ret, opt = 0, len;
761
762 switch (np->np_network_transport) {
763 case ISCSI_TCP:
764 np->np_ip_proto = IPPROTO_TCP;
765 np->np_sock_type = SOCK_STREAM;
766 break;
767 case ISCSI_SCTP_TCP:
768 np->np_ip_proto = IPPROTO_SCTP;
769 np->np_sock_type = SOCK_STREAM;
770 break;
771 case ISCSI_SCTP_UDP:
772 np->np_ip_proto = IPPROTO_SCTP;
773 np->np_sock_type = SOCK_SEQPACKET;
774 break;
775 case ISCSI_IWARP_TCP:
776 case ISCSI_IWARP_SCTP:
777 case ISCSI_INFINIBAND:
778 default:
779 pr_err("Unsupported network_transport: %d\n",
780 np->np_network_transport);
781 return -EINVAL;
782 }
783
784 ret = sock_create(sockaddr->ss_family, np->np_sock_type,
785 np->np_ip_proto, &sock);
786 if (ret < 0) {
787 pr_err("sock_create() failed.\n");
788 return ret;
789 }
790 np->np_socket = sock;
791 /*
792 * The SCTP stack needs struct socket->file.
793 */
794 if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
795 (np->np_network_transport == ISCSI_SCTP_UDP)) {
796 if (!sock->file) {
797 sock->file = kzalloc(sizeof(struct file), GFP_KERNEL);
798 if (!sock->file) {
799 pr_err("Unable to allocate struct"
800 " file for SCTP\n");
801 ret = -ENOMEM;
802 goto fail;
803 }
804 np->np_flags |= NPF_SCTP_STRUCT_FILE;
805 }
806 }
807 /*
808 * Setup the np->np_sockaddr from the passed sockaddr setup
809 * in iscsi_target_configfs.c code..
810 */
811 memcpy((void *)&np->np_sockaddr, (void *)sockaddr,
812 sizeof(struct __kernel_sockaddr_storage));
813
814 if (sockaddr->ss_family == AF_INET6)
815 len = sizeof(struct sockaddr_in6);
816 else
817 len = sizeof(struct sockaddr_in);
818 /*
819 * Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY.
820 */
821 opt = 1;
822 if (np->np_network_transport == ISCSI_TCP) {
823 ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_NODELAY,
824 (char *)&opt, sizeof(opt));
825 if (ret < 0) {
826 pr_err("kernel_setsockopt() for TCP_NODELAY"
827 " failed: %d\n", ret);
828 goto fail;
829 }
830 }
831
832 ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
833 (char *)&opt, sizeof(opt));
834 if (ret < 0) {
835 pr_err("kernel_setsockopt() for SO_REUSEADDR"
836 " failed\n");
837 goto fail;
838 }
839
840 ret = kernel_bind(sock, (struct sockaddr *)&np->np_sockaddr, len);
841 if (ret < 0) {
842 pr_err("kernel_bind() failed: %d\n", ret);
843 goto fail;
844 }
845
846 ret = kernel_listen(sock, backlog);
847 if (ret != 0) {
848 pr_err("kernel_listen() failed: %d\n", ret);
849 goto fail;
850 }
851
852 return 0;
853
854fail:
855 np->np_socket = NULL;
856 if (sock) {
857 if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
858 kfree(sock->file);
859 sock->file = NULL;
860 }
861
862 sock_release(sock);
863 }
864 return ret;
865}
866
867static int __iscsi_target_login_thread(struct iscsi_np *np)
868{
869 u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0;
870 int err, ret = 0, ip_proto, sock_type, set_sctp_conn_flag, stop;
871 struct iscsi_conn *conn = NULL;
872 struct iscsi_login *login;
873 struct iscsi_portal_group *tpg = NULL;
874 struct socket *new_sock, *sock;
875 struct kvec iov;
876 struct iscsi_login_req *pdu;
877 struct sockaddr_in sock_in;
878 struct sockaddr_in6 sock_in6;
879
880 flush_signals(current);
881 set_sctp_conn_flag = 0;
882 sock = np->np_socket;
883 ip_proto = np->np_ip_proto;
884 sock_type = np->np_sock_type;
885
886 spin_lock_bh(&np->np_thread_lock);
887 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
888 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
889 complete(&np->np_restart_comp);
890 } else {
891 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
892 }
893 spin_unlock_bh(&np->np_thread_lock);
894
895 if (kernel_accept(sock, &new_sock, 0) < 0) {
896 spin_lock_bh(&np->np_thread_lock);
897 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
898 spin_unlock_bh(&np->np_thread_lock);
899 complete(&np->np_restart_comp);
900 /* Get another socket */
901 return 1;
902 }
903 spin_unlock_bh(&np->np_thread_lock);
904 goto out;
905 }
906 /*
907 * The SCTP stack needs struct socket->file.
908 */
909 if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
910 (np->np_network_transport == ISCSI_SCTP_UDP)) {
911 if (!new_sock->file) {
912 new_sock->file = kzalloc(
913 sizeof(struct file), GFP_KERNEL);
914 if (!new_sock->file) {
915 pr_err("Unable to allocate struct"
916 " file for SCTP\n");
917 sock_release(new_sock);
918 /* Get another socket */
919 return 1;
920 }
921 set_sctp_conn_flag = 1;
922 }
923 }
924
925 iscsi_start_login_thread_timer(np);
926
927 conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
928 if (!conn) {
929 pr_err("Could not allocate memory for"
930 " new connection\n");
931 if (set_sctp_conn_flag) {
932 kfree(new_sock->file);
933 new_sock->file = NULL;
934 }
935 sock_release(new_sock);
936 /* Get another socket */
937 return 1;
938 }
939
940 pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
941 conn->conn_state = TARG_CONN_STATE_FREE;
942 conn->sock = new_sock;
943
944 if (set_sctp_conn_flag)
945 conn->conn_flags |= CONNFLAG_SCTP_STRUCT_FILE;
946
947 pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n");
948 conn->conn_state = TARG_CONN_STATE_XPT_UP;
949
950 /*
951 * Allocate conn->conn_ops early as a failure calling
952 * iscsit_tx_login_rsp() below will call tx_data().
953 */
954 conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
955 if (!conn->conn_ops) {
956 pr_err("Unable to allocate memory for"
957 " struct iscsi_conn_ops.\n");
958 goto new_sess_out;
959 }
960 /*
961 * Perform the remaining iSCSI connection initialization items..
962 */
963 if (iscsi_login_init_conn(conn) < 0)
964 goto new_sess_out;
965
966 memset(buffer, 0, ISCSI_HDR_LEN);
967 memset(&iov, 0, sizeof(struct kvec));
968 iov.iov_base = buffer;
969 iov.iov_len = ISCSI_HDR_LEN;
970
971 if (rx_data(conn, &iov, 1, ISCSI_HDR_LEN) <= 0) {
972 pr_err("rx_data() returned an error.\n");
973 goto new_sess_out;
974 }
975
976 iscsi_opcode = (buffer[0] & ISCSI_OPCODE_MASK);
977 if (!(iscsi_opcode & ISCSI_OP_LOGIN)) {
978 pr_err("First opcode is not login request,"
979 " failing login request.\n");
980 goto new_sess_out;
981 }
982
983 pdu = (struct iscsi_login_req *) buffer;
984 pdu->cid = be16_to_cpu(pdu->cid);
985 pdu->tsih = be16_to_cpu(pdu->tsih);
986 pdu->itt = be32_to_cpu(pdu->itt);
987 pdu->cmdsn = be32_to_cpu(pdu->cmdsn);
988 pdu->exp_statsn = be32_to_cpu(pdu->exp_statsn);
989 /*
990 * Used by iscsit_tx_login_rsp() for Login Resonses PDUs
991 * when Status-Class != 0.
992 */
993 conn->login_itt = pdu->itt;
994
995 spin_lock_bh(&np->np_thread_lock);
996 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
997 spin_unlock_bh(&np->np_thread_lock);
998 pr_err("iSCSI Network Portal on %s:%hu currently not"
999 " active.\n", np->np_ip, np->np_port);
1000 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1001 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
1002 goto new_sess_out;
1003 }
1004 spin_unlock_bh(&np->np_thread_lock);
1005
1006 if (np->np_sockaddr.ss_family == AF_INET6) {
1007 memset(&sock_in6, 0, sizeof(struct sockaddr_in6));
1008
1009 if (conn->sock->ops->getname(conn->sock,
1010 (struct sockaddr *)&sock_in6, &err, 1) < 0) {
1011 pr_err("sock_ops->getname() failed.\n");
1012 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1013 ISCSI_LOGIN_STATUS_TARGET_ERROR);
1014 goto new_sess_out;
1015 }
1016#if 0
1017 if (!iscsi_ntop6((const unsigned char *)
1018 &sock_in6.sin6_addr.in6_u,
1019 (char *)&conn->ipv6_login_ip[0],
1020 IPV6_ADDRESS_SPACE)) {
1021 pr_err("iscsi_ntop6() failed\n");
1022 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1023 ISCSI_LOGIN_STATUS_TARGET_ERROR);
1024 goto new_sess_out;
1025 }
1026#else
1027 pr_debug("Skipping iscsi_ntop6()\n");
1028#endif
1029 } else {
1030 memset(&sock_in, 0, sizeof(struct sockaddr_in));
1031
1032 if (conn->sock->ops->getname(conn->sock,
1033 (struct sockaddr *)&sock_in, &err, 1) < 0) {
1034 pr_err("sock_ops->getname() failed.\n");
1035 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1036 ISCSI_LOGIN_STATUS_TARGET_ERROR);
1037 goto new_sess_out;
1038 }
1039 sprintf(conn->login_ip, "%pI4", &sock_in.sin_addr.s_addr);
1040 conn->login_port = ntohs(sock_in.sin_port);
1041 }
1042
1043 conn->network_transport = np->np_network_transport;
1044
1045 pr_debug("Received iSCSI login request from %s on %s Network"
1046 " Portal %s:%hu\n", conn->login_ip,
1047 (conn->network_transport == ISCSI_TCP) ? "TCP" : "SCTP",
1048 np->np_ip, np->np_port);
1049
1050 pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n");
1051 conn->conn_state = TARG_CONN_STATE_IN_LOGIN;
1052
1053 if (iscsi_login_check_initiator_version(conn, pdu->max_version,
1054 pdu->min_version) < 0)
1055 goto new_sess_out;
1056
1057 zero_tsih = (pdu->tsih == 0x0000);
1058 if ((zero_tsih)) {
1059 /*
1060 * This is the leading connection of a new session.
1061 * We wait until after authentication to check for
1062 * session reinstatement.
1063 */
1064 if (iscsi_login_zero_tsih_s1(conn, buffer) < 0)
1065 goto new_sess_out;
1066 } else {
1067 /*
1068 * Add a new connection to an existing session.
1069 * We check for a non-existant session in
1070 * iscsi_login_non_zero_tsih_s2() below based
1071 * on ISID/TSIH, but wait until after authentication
1072 * to check for connection reinstatement, etc.
1073 */
1074 if (iscsi_login_non_zero_tsih_s1(conn, buffer) < 0)
1075 goto new_sess_out;
1076 }
1077
1078 /*
1079 * This will process the first login request, and call
1080 * iscsi_target_locate_portal(), and return a valid struct iscsi_login.
1081 */
1082 login = iscsi_target_init_negotiation(np, conn, buffer);
1083 if (!login) {
1084 tpg = conn->tpg;
1085 goto new_sess_out;
1086 }
1087
1088 tpg = conn->tpg;
1089 if (!tpg) {
1090 pr_err("Unable to locate struct iscsi_conn->tpg\n");
1091 goto new_sess_out;
1092 }
1093
1094 if (zero_tsih) {
1095 if (iscsi_login_zero_tsih_s2(conn) < 0) {
1096 iscsi_target_nego_release(login, conn);
1097 goto new_sess_out;
1098 }
1099 } else {
1100 if (iscsi_login_non_zero_tsih_s2(conn, buffer) < 0) {
1101 iscsi_target_nego_release(login, conn);
1102 goto old_sess_out;
1103 }
1104 }
1105
1106 if (iscsi_target_start_negotiation(login, conn) < 0)
1107 goto new_sess_out;
1108
1109 if (!conn->sess) {
1110 pr_err("struct iscsi_conn session pointer is NULL!\n");
1111 goto new_sess_out;
1112 }
1113
1114 iscsi_stop_login_thread_timer(np);
1115
1116 if (signal_pending(current))
1117 goto new_sess_out;
1118
1119 ret = iscsi_post_login_handler(np, conn, zero_tsih);
1120
1121 if (ret < 0)
1122 goto new_sess_out;
1123
1124 iscsit_deaccess_np(np, tpg);
1125 tpg = NULL;
1126 /* Get another socket */
1127 return 1;
1128
1129new_sess_out:
1130 pr_err("iSCSI Login negotiation failed.\n");
1131 iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
1132 ISCSI_LOGIN_STATUS_INIT_ERR);
1133 if (!zero_tsih || !conn->sess)
1134 goto old_sess_out;
1135 if (conn->sess->se_sess)
1136 transport_free_session(conn->sess->se_sess);
1137 if (conn->sess->session_index != 0) {
1138 spin_lock_bh(&sess_idr_lock);
1139 idr_remove(&sess_idr, conn->sess->session_index);
1140 spin_unlock_bh(&sess_idr_lock);
1141 }
1142 if (conn->sess->sess_ops)
1143 kfree(conn->sess->sess_ops);
1144 if (conn->sess)
1145 kfree(conn->sess);
1146old_sess_out:
1147 iscsi_stop_login_thread_timer(np);
1148 /*
1149 * If login negotiation fails check if the Time2Retain timer
1150 * needs to be restarted.
1151 */
1152 if (!zero_tsih && conn->sess) {
1153 spin_lock_bh(&conn->sess->conn_lock);
1154 if (conn->sess->session_state == TARG_SESS_STATE_FAILED) {
1155 struct se_portal_group *se_tpg =
1156 &ISCSI_TPG_C(conn)->tpg_se_tpg;
1157
1158 atomic_set(&conn->sess->session_continuation, 0);
1159 spin_unlock_bh(&conn->sess->conn_lock);
1160 spin_lock_bh(&se_tpg->session_lock);
1161 iscsit_start_time2retain_handler(conn->sess);
1162 spin_unlock_bh(&se_tpg->session_lock);
1163 } else
1164 spin_unlock_bh(&conn->sess->conn_lock);
1165 iscsit_dec_session_usage_count(conn->sess);
1166 }
1167
1168 if (!IS_ERR(conn->conn_rx_hash.tfm))
1169 crypto_free_hash(conn->conn_rx_hash.tfm);
1170 if (!IS_ERR(conn->conn_tx_hash.tfm))
1171 crypto_free_hash(conn->conn_tx_hash.tfm);
1172
1173 if (conn->conn_cpumask)
1174 free_cpumask_var(conn->conn_cpumask);
1175
1176 kfree(conn->conn_ops);
1177
1178 if (conn->param_list) {
1179 iscsi_release_param_list(conn->param_list);
1180 conn->param_list = NULL;
1181 }
1182 if (conn->sock) {
1183 if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
1184 kfree(conn->sock->file);
1185 conn->sock->file = NULL;
1186 }
1187 sock_release(conn->sock);
1188 }
1189 kfree(conn);
1190
1191 if (tpg) {
1192 iscsit_deaccess_np(np, tpg);
1193 tpg = NULL;
1194 }
1195
1196out:
1197 stop = kthread_should_stop();
1198 if (!stop && signal_pending(current)) {
1199 spin_lock_bh(&np->np_thread_lock);
1200 stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN);
1201 spin_unlock_bh(&np->np_thread_lock);
1202 }
1203 /* Wait for another socket.. */
1204 if (!stop)
1205 return 1;
1206
1207 iscsi_stop_login_thread_timer(np);
1208 spin_lock_bh(&np->np_thread_lock);
1209 np->np_thread_state = ISCSI_NP_THREAD_EXIT;
1210 spin_unlock_bh(&np->np_thread_lock);
1211 return 0;
1212}
1213
1214int iscsi_target_login_thread(void *arg)
1215{
1216 struct iscsi_np *np = (struct iscsi_np *)arg;
1217 int ret;
1218
1219 allow_signal(SIGINT);
1220
1221 while (!kthread_should_stop()) {
1222 ret = __iscsi_target_login_thread(np);
1223 /*
1224 * We break and exit here unless another sock_accept() call
1225 * is expected.
1226 */
1227 if (ret != 1)
1228 break;
1229 }
1230
1231 return 0;
1232}
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
new file mode 100644
index 000000000000..091dcae2532b
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -0,0 +1,12 @@
1#ifndef ISCSI_TARGET_LOGIN_H
2#define ISCSI_TARGET_LOGIN_H
3
4extern int iscsi_login_setup_crypto(struct iscsi_conn *);
5extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *);
6extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32);
7extern int iscsi_target_setup_login_socket(struct iscsi_np *,
8 struct __kernel_sockaddr_storage *);
9extern int iscsi_target_login_thread(void *);
10extern int iscsi_login_disable_FIM_keys(struct iscsi_param_list *, struct iscsi_conn *);
11
12#endif /*** ISCSI_TARGET_LOGIN_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
new file mode 100644
index 000000000000..713a4d23557a
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -0,0 +1,1067 @@
1/*******************************************************************************
2 * This file contains main functions related to iSCSI Parameter negotiation.
3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 ******************************************************************************/
20
21#include <linux/ctype.h>
22#include <scsi/iscsi_proto.h>
23#include <target/target_core_base.h>
24#include <target/target_core_tpg.h>
25
26#include "iscsi_target_core.h"
27#include "iscsi_target_parameters.h"
28#include "iscsi_target_login.h"
29#include "iscsi_target_nego.h"
30#include "iscsi_target_tpg.h"
31#include "iscsi_target_util.h"
32#include "iscsi_target.h"
33#include "iscsi_target_auth.h"
34
35#define MAX_LOGIN_PDUS 7
36#define TEXT_LEN 4096
37
38void convert_null_to_semi(char *buf, int len)
39{
40 int i;
41
42 for (i = 0; i < len; i++)
43 if (buf[i] == '\0')
44 buf[i] = ';';
45}
46
47int strlen_semi(char *buf)
48{
49 int i = 0;
50
51 while (buf[i] != '\0') {
52 if (buf[i] == ';')
53 return i;
54 i++;
55 }
56
57 return -1;
58}
59
60int extract_param(
61 const char *in_buf,
62 const char *pattern,
63 unsigned int max_length,
64 char *out_buf,
65 unsigned char *type)
66{
67 char *ptr;
68 int len;
69
70 if (!in_buf || !pattern || !out_buf || !type)
71 return -1;
72
73 ptr = strstr(in_buf, pattern);
74 if (!ptr)
75 return -1;
76
77 ptr = strstr(ptr, "=");
78 if (!ptr)
79 return -1;
80
81 ptr += 1;
82 if (*ptr == '0' && (*(ptr+1) == 'x' || *(ptr+1) == 'X')) {
83 ptr += 2; /* skip 0x */
84 *type = HEX;
85 } else
86 *type = DECIMAL;
87
88 len = strlen_semi(ptr);
89 if (len < 0)
90 return -1;
91
92 if (len > max_length) {
93 pr_err("Length of input: %d exeeds max_length:"
94 " %d\n", len, max_length);
95 return -1;
96 }
97 memcpy(out_buf, ptr, len);
98 out_buf[len] = '\0';
99
100 return 0;
101}
102
103static u32 iscsi_handle_authentication(
104 struct iscsi_conn *conn,
105 char *in_buf,
106 char *out_buf,
107 int in_length,
108 int *out_length,
109 unsigned char *authtype)
110{
111 struct iscsi_session *sess = conn->sess;
112 struct iscsi_node_auth *auth;
113 struct iscsi_node_acl *iscsi_nacl;
114 struct se_node_acl *se_nacl;
115
116 if (!sess->sess_ops->SessionType) {
117 /*
118 * For SessionType=Normal
119 */
120 se_nacl = conn->sess->se_sess->se_node_acl;
121 if (!se_nacl) {
122 pr_err("Unable to locate struct se_node_acl for"
123 " CHAP auth\n");
124 return -1;
125 }
126 iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
127 se_node_acl);
128 if (!iscsi_nacl) {
129 pr_err("Unable to locate struct iscsi_node_acl for"
130 " CHAP auth\n");
131 return -1;
132 }
133
134 auth = ISCSI_NODE_AUTH(iscsi_nacl);
135 } else {
136 /*
137 * For SessionType=Discovery
138 */
139 auth = &iscsit_global->discovery_acl.node_auth;
140 }
141
142 if (strstr("CHAP", authtype))
143 strcpy(conn->sess->auth_type, "CHAP");
144 else
145 strcpy(conn->sess->auth_type, NONE);
146
147 if (strstr("None", authtype))
148 return 1;
149#ifdef CANSRP
150 else if (strstr("SRP", authtype))
151 return srp_main_loop(conn, auth, in_buf, out_buf,
152 &in_length, out_length);
153#endif
154 else if (strstr("CHAP", authtype))
155 return chap_main_loop(conn, auth, in_buf, out_buf,
156 &in_length, out_length);
157 else if (strstr("SPKM1", authtype))
158 return 2;
159 else if (strstr("SPKM2", authtype))
160 return 2;
161 else if (strstr("KRB5", authtype))
162 return 2;
163 else
164 return 2;
165}
166
167static void iscsi_remove_failed_auth_entry(struct iscsi_conn *conn)
168{
169 kfree(conn->auth_protocol);
170}
171
172static int iscsi_target_check_login_request(
173 struct iscsi_conn *conn,
174 struct iscsi_login *login)
175{
176 int req_csg, req_nsg, rsp_csg, rsp_nsg;
177 u32 payload_length;
178 struct iscsi_login_req *login_req;
179 struct iscsi_login_rsp *login_rsp;
180
181 login_req = (struct iscsi_login_req *) login->req;
182 login_rsp = (struct iscsi_login_rsp *) login->rsp;
183 payload_length = ntoh24(login_req->dlength);
184
185 switch (login_req->opcode & ISCSI_OPCODE_MASK) {
186 case ISCSI_OP_LOGIN:
187 break;
188 default:
189 pr_err("Received unknown opcode 0x%02x.\n",
190 login_req->opcode & ISCSI_OPCODE_MASK);
191 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
192 ISCSI_LOGIN_STATUS_INIT_ERR);
193 return -1;
194 }
195
196 if ((login_req->flags & ISCSI_FLAG_LOGIN_CONTINUE) &&
197 (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
198 pr_err("Login request has both ISCSI_FLAG_LOGIN_CONTINUE"
199 " and ISCSI_FLAG_LOGIN_TRANSIT set, protocol error.\n");
200 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
201 ISCSI_LOGIN_STATUS_INIT_ERR);
202 return -1;
203 }
204
205 req_csg = (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
206 rsp_csg = (login_rsp->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
207 req_nsg = (login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK);
208 rsp_nsg = (login_rsp->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK);
209
210 if (req_csg != login->current_stage) {
211 pr_err("Initiator unexpectedly changed login stage"
212 " from %d to %d, login failed.\n", login->current_stage,
213 req_csg);
214 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
215 ISCSI_LOGIN_STATUS_INIT_ERR);
216 return -1;
217 }
218
219 if ((req_nsg == 2) || (req_csg >= 2) ||
220 ((login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT) &&
221 (req_nsg <= req_csg))) {
222 pr_err("Illegal login_req->flags Combination, CSG: %d,"
223 " NSG: %d, ISCSI_FLAG_LOGIN_TRANSIT: %d.\n", req_csg,
224 req_nsg, (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT));
225 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
226 ISCSI_LOGIN_STATUS_INIT_ERR);
227 return -1;
228 }
229
230 if ((login_req->max_version != login->version_max) ||
231 (login_req->min_version != login->version_min)) {
232 pr_err("Login request changed Version Max/Nin"
233 " unexpectedly to 0x%02x/0x%02x, protocol error\n",
234 login_req->max_version, login_req->min_version);
235 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
236 ISCSI_LOGIN_STATUS_INIT_ERR);
237 return -1;
238 }
239
240 if (memcmp(login_req->isid, login->isid, 6) != 0) {
241 pr_err("Login request changed ISID unexpectedly,"
242 " protocol error.\n");
243 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
244 ISCSI_LOGIN_STATUS_INIT_ERR);
245 return -1;
246 }
247
248 if (login_req->itt != login->init_task_tag) {
249 pr_err("Login request changed ITT unexpectedly to"
250 " 0x%08x, protocol error.\n", login_req->itt);
251 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
252 ISCSI_LOGIN_STATUS_INIT_ERR);
253 return -1;
254 }
255
256 if (payload_length > MAX_KEY_VALUE_PAIRS) {
257 pr_err("Login request payload exceeds default"
258 " MaxRecvDataSegmentLength: %u, protocol error.\n",
259 MAX_KEY_VALUE_PAIRS);
260 return -1;
261 }
262
263 return 0;
264}
265
266static int iscsi_target_check_first_request(
267 struct iscsi_conn *conn,
268 struct iscsi_login *login)
269{
270 struct iscsi_param *param = NULL;
271 struct se_node_acl *se_nacl;
272
273 login->first_request = 0;
274
275 list_for_each_entry(param, &conn->param_list->param_list, p_list) {
276 if (!strncmp(param->name, SESSIONTYPE, 11)) {
277 if (!IS_PSTATE_ACCEPTOR(param)) {
278 pr_err("SessionType key not received"
279 " in first login request.\n");
280 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
281 ISCSI_LOGIN_STATUS_MISSING_FIELDS);
282 return -1;
283 }
284 if (!strncmp(param->value, DISCOVERY, 9))
285 return 0;
286 }
287
288 if (!strncmp(param->name, INITIATORNAME, 13)) {
289 if (!IS_PSTATE_ACCEPTOR(param)) {
290 if (!login->leading_connection)
291 continue;
292
293 pr_err("InitiatorName key not received"
294 " in first login request.\n");
295 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
296 ISCSI_LOGIN_STATUS_MISSING_FIELDS);
297 return -1;
298 }
299
300 /*
301 * For non-leading connections, double check that the
302 * received InitiatorName matches the existing session's
303 * struct iscsi_node_acl.
304 */
305 if (!login->leading_connection) {
306 se_nacl = conn->sess->se_sess->se_node_acl;
307 if (!se_nacl) {
308 pr_err("Unable to locate"
309 " struct se_node_acl\n");
310 iscsit_tx_login_rsp(conn,
311 ISCSI_STATUS_CLS_INITIATOR_ERR,
312 ISCSI_LOGIN_STATUS_TGT_NOT_FOUND);
313 return -1;
314 }
315
316 if (strcmp(param->value,
317 se_nacl->initiatorname)) {
318 pr_err("Incorrect"
319 " InitiatorName: %s for this"
320 " iSCSI Initiator Node.\n",
321 param->value);
322 iscsit_tx_login_rsp(conn,
323 ISCSI_STATUS_CLS_INITIATOR_ERR,
324 ISCSI_LOGIN_STATUS_TGT_NOT_FOUND);
325 return -1;
326 }
327 }
328 }
329 }
330
331 return 0;
332}
333
334static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
335{
336 u32 padding = 0;
337 struct iscsi_session *sess = conn->sess;
338 struct iscsi_login_rsp *login_rsp;
339
340 login_rsp = (struct iscsi_login_rsp *) login->rsp;
341
342 login_rsp->opcode = ISCSI_OP_LOGIN_RSP;
343 hton24(login_rsp->dlength, login->rsp_length);
344 memcpy(login_rsp->isid, login->isid, 6);
345 login_rsp->tsih = cpu_to_be16(login->tsih);
346 login_rsp->itt = cpu_to_be32(login->init_task_tag);
347 login_rsp->statsn = cpu_to_be32(conn->stat_sn++);
348 login_rsp->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
349 login_rsp->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
350
351 pr_debug("Sending Login Response, Flags: 0x%02x, ITT: 0x%08x,"
352 " ExpCmdSN; 0x%08x, MaxCmdSN: 0x%08x, StatSN: 0x%08x, Length:"
353 " %u\n", login_rsp->flags, ntohl(login_rsp->itt),
354 ntohl(login_rsp->exp_cmdsn), ntohl(login_rsp->max_cmdsn),
355 ntohl(login_rsp->statsn), login->rsp_length);
356
357 padding = ((-login->rsp_length) & 3);
358
359 if (iscsi_login_tx_data(
360 conn,
361 login->rsp,
362 login->rsp_buf,
363 login->rsp_length + padding) < 0)
364 return -1;
365
366 login->rsp_length = 0;
367 login_rsp->tsih = be16_to_cpu(login_rsp->tsih);
368 login_rsp->itt = be32_to_cpu(login_rsp->itt);
369 login_rsp->statsn = be32_to_cpu(login_rsp->statsn);
370 mutex_lock(&sess->cmdsn_mutex);
371 login_rsp->exp_cmdsn = be32_to_cpu(sess->exp_cmd_sn);
372 login_rsp->max_cmdsn = be32_to_cpu(sess->max_cmd_sn);
373 mutex_unlock(&sess->cmdsn_mutex);
374
375 return 0;
376}
377
378static int iscsi_target_do_rx_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
379{
380 u32 padding = 0, payload_length;
381 struct iscsi_login_req *login_req;
382
383 if (iscsi_login_rx_data(conn, login->req, ISCSI_HDR_LEN) < 0)
384 return -1;
385
386 login_req = (struct iscsi_login_req *) login->req;
387 payload_length = ntoh24(login_req->dlength);
388 login_req->tsih = be16_to_cpu(login_req->tsih);
389 login_req->itt = be32_to_cpu(login_req->itt);
390 login_req->cid = be16_to_cpu(login_req->cid);
391 login_req->cmdsn = be32_to_cpu(login_req->cmdsn);
392 login_req->exp_statsn = be32_to_cpu(login_req->exp_statsn);
393
394 pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
395 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
396 login_req->flags, login_req->itt, login_req->cmdsn,
397 login_req->exp_statsn, login_req->cid, payload_length);
398
399 if (iscsi_target_check_login_request(conn, login) < 0)
400 return -1;
401
402 padding = ((-payload_length) & 3);
403 memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS);
404
405 if (iscsi_login_rx_data(
406 conn,
407 login->req_buf,
408 payload_length + padding) < 0)
409 return -1;
410
411 return 0;
412}
413
414static int iscsi_target_do_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
415{
416 if (iscsi_target_do_tx_login_io(conn, login) < 0)
417 return -1;
418
419 if (iscsi_target_do_rx_login_io(conn, login) < 0)
420 return -1;
421
422 return 0;
423}
424
425static int iscsi_target_get_initial_payload(
426 struct iscsi_conn *conn,
427 struct iscsi_login *login)
428{
429 u32 padding = 0, payload_length;
430 struct iscsi_login_req *login_req;
431
432 login_req = (struct iscsi_login_req *) login->req;
433 payload_length = ntoh24(login_req->dlength);
434
435 pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
436 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
437 login_req->flags, login_req->itt, login_req->cmdsn,
438 login_req->exp_statsn, payload_length);
439
440 if (iscsi_target_check_login_request(conn, login) < 0)
441 return -1;
442
443 padding = ((-payload_length) & 3);
444
445 if (iscsi_login_rx_data(
446 conn,
447 login->req_buf,
448 payload_length + padding) < 0)
449 return -1;
450
451 return 0;
452}
453
454/*
455 * NOTE: We check for existing sessions or connections AFTER the initiator
456 * has been successfully authenticated in order to protect against faked
457 * ISID/TSIH combinations.
458 */
459static int iscsi_target_check_for_existing_instances(
460 struct iscsi_conn *conn,
461 struct iscsi_login *login)
462{
463 if (login->checked_for_existing)
464 return 0;
465
466 login->checked_for_existing = 1;
467
468 if (!login->tsih)
469 return iscsi_check_for_session_reinstatement(conn);
470 else
471 return iscsi_login_post_auth_non_zero_tsih(conn, login->cid,
472 login->initial_exp_statsn);
473}
474
475static int iscsi_target_do_authentication(
476 struct iscsi_conn *conn,
477 struct iscsi_login *login)
478{
479 int authret;
480 u32 payload_length;
481 struct iscsi_param *param;
482 struct iscsi_login_req *login_req;
483 struct iscsi_login_rsp *login_rsp;
484
485 login_req = (struct iscsi_login_req *) login->req;
486 login_rsp = (struct iscsi_login_rsp *) login->rsp;
487 payload_length = ntoh24(login_req->dlength);
488
489 param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list);
490 if (!param)
491 return -1;
492
493 authret = iscsi_handle_authentication(
494 conn,
495 login->req_buf,
496 login->rsp_buf,
497 payload_length,
498 &login->rsp_length,
499 param->value);
500 switch (authret) {
501 case 0:
502 pr_debug("Received OK response"
503 " from LIO Authentication, continuing.\n");
504 break;
505 case 1:
506 pr_debug("iSCSI security negotiation"
507 " completed sucessfully.\n");
508 login->auth_complete = 1;
509 if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&
510 (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
511 login_rsp->flags |= (ISCSI_FLAG_LOGIN_NEXT_STAGE1 |
512 ISCSI_FLAG_LOGIN_TRANSIT);
513 login->current_stage = 1;
514 }
515 return iscsi_target_check_for_existing_instances(
516 conn, login);
517 case 2:
518 pr_err("Security negotiation"
519 " failed.\n");
520 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
521 ISCSI_LOGIN_STATUS_AUTH_FAILED);
522 return -1;
523 default:
524 pr_err("Received unknown error %d from LIO"
525 " Authentication\n", authret);
526 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
527 ISCSI_LOGIN_STATUS_TARGET_ERROR);
528 return -1;
529 }
530
531 return 0;
532}
533
534static int iscsi_target_handle_csg_zero(
535 struct iscsi_conn *conn,
536 struct iscsi_login *login)
537{
538 int ret;
539 u32 payload_length;
540 struct iscsi_param *param;
541 struct iscsi_login_req *login_req;
542 struct iscsi_login_rsp *login_rsp;
543
544 login_req = (struct iscsi_login_req *) login->req;
545 login_rsp = (struct iscsi_login_rsp *) login->rsp;
546 payload_length = ntoh24(login_req->dlength);
547
548 param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list);
549 if (!param)
550 return -1;
551
552 ret = iscsi_decode_text_input(
553 PHASE_SECURITY|PHASE_DECLARATIVE,
554 SENDER_INITIATOR|SENDER_RECEIVER,
555 login->req_buf,
556 payload_length,
557 conn->param_list);
558 if (ret < 0)
559 return -1;
560
561 if (ret > 0) {
562 if (login->auth_complete) {
563 pr_err("Initiator has already been"
564 " successfully authenticated, but is still"
565 " sending %s keys.\n", param->value);
566 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
567 ISCSI_LOGIN_STATUS_INIT_ERR);
568 return -1;
569 }
570
571 goto do_auth;
572 }
573
574 if (login->first_request)
575 if (iscsi_target_check_first_request(conn, login) < 0)
576 return -1;
577
578 ret = iscsi_encode_text_output(
579 PHASE_SECURITY|PHASE_DECLARATIVE,
580 SENDER_TARGET,
581 login->rsp_buf,
582 &login->rsp_length,
583 conn->param_list);
584 if (ret < 0)
585 return -1;
586
587 if (!iscsi_check_negotiated_keys(conn->param_list)) {
588 if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
589 !strncmp(param->value, NONE, 4)) {
590 pr_err("Initiator sent AuthMethod=None but"
591 " Target is enforcing iSCSI Authentication,"
592 " login failed.\n");
593 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
594 ISCSI_LOGIN_STATUS_AUTH_FAILED);
595 return -1;
596 }
597
598 if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
599 !login->auth_complete)
600 return 0;
601
602 if (strncmp(param->value, NONE, 4) && !login->auth_complete)
603 return 0;
604
605 if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&
606 (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
607 login_rsp->flags |= ISCSI_FLAG_LOGIN_NEXT_STAGE1 |
608 ISCSI_FLAG_LOGIN_TRANSIT;
609 login->current_stage = 1;
610 }
611 }
612
613 return 0;
614do_auth:
615 return iscsi_target_do_authentication(conn, login);
616}
617
618static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_login *login)
619{
620 int ret;
621 u32 payload_length;
622 struct iscsi_login_req *login_req;
623 struct iscsi_login_rsp *login_rsp;
624
625 login_req = (struct iscsi_login_req *) login->req;
626 login_rsp = (struct iscsi_login_rsp *) login->rsp;
627 payload_length = ntoh24(login_req->dlength);
628
629 ret = iscsi_decode_text_input(
630 PHASE_OPERATIONAL|PHASE_DECLARATIVE,
631 SENDER_INITIATOR|SENDER_RECEIVER,
632 login->req_buf,
633 payload_length,
634 conn->param_list);
635 if (ret < 0)
636 return -1;
637
638 if (login->first_request)
639 if (iscsi_target_check_first_request(conn, login) < 0)
640 return -1;
641
642 if (iscsi_target_check_for_existing_instances(conn, login) < 0)
643 return -1;
644
645 ret = iscsi_encode_text_output(
646 PHASE_OPERATIONAL|PHASE_DECLARATIVE,
647 SENDER_TARGET,
648 login->rsp_buf,
649 &login->rsp_length,
650 conn->param_list);
651 if (ret < 0)
652 return -1;
653
654 if (!login->auth_complete &&
655 ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) {
656 pr_err("Initiator is requesting CSG: 1, has not been"
657 " successfully authenticated, and the Target is"
658 " enforcing iSCSI Authentication, login failed.\n");
659 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
660 ISCSI_LOGIN_STATUS_AUTH_FAILED);
661 return -1;
662 }
663
664 if (!iscsi_check_negotiated_keys(conn->param_list))
665 if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE3) &&
666 (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT))
667 login_rsp->flags |= ISCSI_FLAG_LOGIN_NEXT_STAGE3 |
668 ISCSI_FLAG_LOGIN_TRANSIT;
669
670 return 0;
671}
672
673static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *login)
674{
675 int pdu_count = 0;
676 struct iscsi_login_req *login_req;
677 struct iscsi_login_rsp *login_rsp;
678
679 login_req = (struct iscsi_login_req *) login->req;
680 login_rsp = (struct iscsi_login_rsp *) login->rsp;
681
682 while (1) {
683 if (++pdu_count > MAX_LOGIN_PDUS) {
684 pr_err("MAX_LOGIN_PDUS count reached.\n");
685 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
686 ISCSI_LOGIN_STATUS_TARGET_ERROR);
687 return -1;
688 }
689
690 switch ((login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2) {
691 case 0:
692 login_rsp->flags |= (0 & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK);
693 if (iscsi_target_handle_csg_zero(conn, login) < 0)
694 return -1;
695 break;
696 case 1:
697 login_rsp->flags |= ISCSI_FLAG_LOGIN_CURRENT_STAGE1;
698 if (iscsi_target_handle_csg_one(conn, login) < 0)
699 return -1;
700 if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
701 login->tsih = conn->sess->tsih;
702 if (iscsi_target_do_tx_login_io(conn,
703 login) < 0)
704 return -1;
705 return 0;
706 }
707 break;
708 default:
709 pr_err("Illegal CSG: %d received from"
710 " Initiator, protocol error.\n",
711 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
712 >> 2);
713 break;
714 }
715
716 if (iscsi_target_do_login_io(conn, login) < 0)
717 return -1;
718
719 if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
720 login_rsp->flags &= ~ISCSI_FLAG_LOGIN_TRANSIT;
721 login_rsp->flags &= ~ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK;
722 }
723 }
724
725 return 0;
726}
727
728static void iscsi_initiatorname_tolower(
729 char *param_buf)
730{
731 char *c;
732 u32 iqn_size = strlen(param_buf), i;
733
734 for (i = 0; i < iqn_size; i++) {
735 c = (char *)&param_buf[i];
736 if (!isupper(*c))
737 continue;
738
739 *c = tolower(*c);
740 }
741}
742
743/*
744 * Processes the first Login Request..
745 */
746static int iscsi_target_locate_portal(
747 struct iscsi_np *np,
748 struct iscsi_conn *conn,
749 struct iscsi_login *login)
750{
751 char *i_buf = NULL, *s_buf = NULL, *t_buf = NULL;
752 char *tmpbuf, *start = NULL, *end = NULL, *key, *value;
753 struct iscsi_session *sess = conn->sess;
754 struct iscsi_tiqn *tiqn;
755 struct iscsi_login_req *login_req;
756 struct iscsi_targ_login_rsp *login_rsp;
757 u32 payload_length;
758 int sessiontype = 0, ret = 0;
759
760 login_req = (struct iscsi_login_req *) login->req;
761 login_rsp = (struct iscsi_targ_login_rsp *) login->rsp;
762 payload_length = ntoh24(login_req->dlength);
763
764 login->first_request = 1;
765 login->leading_connection = (!login_req->tsih) ? 1 : 0;
766 login->current_stage =
767 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
768 login->version_min = login_req->min_version;
769 login->version_max = login_req->max_version;
770 memcpy(login->isid, login_req->isid, 6);
771 login->cmd_sn = login_req->cmdsn;
772 login->init_task_tag = login_req->itt;
773 login->initial_exp_statsn = login_req->exp_statsn;
774 login->cid = login_req->cid;
775 login->tsih = login_req->tsih;
776
777 if (iscsi_target_get_initial_payload(conn, login) < 0)
778 return -1;
779
780 tmpbuf = kzalloc(payload_length + 1, GFP_KERNEL);
781 if (!tmpbuf) {
782 pr_err("Unable to allocate memory for tmpbuf.\n");
783 return -1;
784 }
785
786 memcpy(tmpbuf, login->req_buf, payload_length);
787 tmpbuf[payload_length] = '\0';
788 start = tmpbuf;
789 end = (start + payload_length);
790
791 /*
792 * Locate the initial keys expected from the Initiator node in
793 * the first login request in order to progress with the login phase.
794 */
795 while (start < end) {
796 if (iscsi_extract_key_value(start, &key, &value) < 0) {
797 ret = -1;
798 goto out;
799 }
800
801 if (!strncmp(key, "InitiatorName", 13))
802 i_buf = value;
803 else if (!strncmp(key, "SessionType", 11))
804 s_buf = value;
805 else if (!strncmp(key, "TargetName", 10))
806 t_buf = value;
807
808 start += strlen(key) + strlen(value) + 2;
809 }
810
811 /*
812 * See 5.3. Login Phase.
813 */
814 if (!i_buf) {
815 pr_err("InitiatorName key not received"
816 " in first login request.\n");
817 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
818 ISCSI_LOGIN_STATUS_MISSING_FIELDS);
819 ret = -1;
820 goto out;
821 }
822 /*
823 * Convert the incoming InitiatorName to lowercase following
824 * RFC-3720 3.2.6.1. section c) that says that iSCSI IQNs
825 * are NOT case sensitive.
826 */
827 iscsi_initiatorname_tolower(i_buf);
828
829 if (!s_buf) {
830 if (!login->leading_connection)
831 goto get_target;
832
833 pr_err("SessionType key not received"
834 " in first login request.\n");
835 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
836 ISCSI_LOGIN_STATUS_MISSING_FIELDS);
837 ret = -1;
838 goto out;
839 }
840
841 /*
842 * Use default portal group for discovery sessions.
843 */
844 sessiontype = strncmp(s_buf, DISCOVERY, 9);
845 if (!sessiontype) {
846 conn->tpg = iscsit_global->discovery_tpg;
847 if (!login->leading_connection)
848 goto get_target;
849
850 sess->sess_ops->SessionType = 1;
851 /*
852 * Setup crc32c modules from libcrypto
853 */
854 if (iscsi_login_setup_crypto(conn) < 0) {
855 pr_err("iscsi_login_setup_crypto() failed\n");
856 ret = -1;
857 goto out;
858 }
859 /*
860 * Serialize access across the discovery struct iscsi_portal_group to
861 * process login attempt.
862 */
863 if (iscsit_access_np(np, conn->tpg) < 0) {
864 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
865 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
866 ret = -1;
867 goto out;
868 }
869 ret = 0;
870 goto out;
871 }
872
873get_target:
874 if (!t_buf) {
875 pr_err("TargetName key not received"
876 " in first login request while"
877 " SessionType=Normal.\n");
878 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
879 ISCSI_LOGIN_STATUS_MISSING_FIELDS);
880 ret = -1;
881 goto out;
882 }
883
884 /*
885 * Locate Target IQN from Storage Node.
886 */
887 tiqn = iscsit_get_tiqn_for_login(t_buf);
888 if (!tiqn) {
889 pr_err("Unable to locate Target IQN: %s in"
890 " Storage Node\n", t_buf);
891 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
892 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
893 ret = -1;
894 goto out;
895 }
896 pr_debug("Located Storage Object: %s\n", tiqn->tiqn);
897
898 /*
899 * Locate Target Portal Group from Storage Node.
900 */
901 conn->tpg = iscsit_get_tpg_from_np(tiqn, np);
902 if (!conn->tpg) {
903 pr_err("Unable to locate Target Portal Group"
904 " on %s\n", tiqn->tiqn);
905 iscsit_put_tiqn_for_login(tiqn);
906 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
907 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
908 ret = -1;
909 goto out;
910 }
911 pr_debug("Located Portal Group Object: %hu\n", conn->tpg->tpgt);
912 /*
913 * Setup crc32c modules from libcrypto
914 */
915 if (iscsi_login_setup_crypto(conn) < 0) {
916 pr_err("iscsi_login_setup_crypto() failed\n");
917 ret = -1;
918 goto out;
919 }
920 /*
921 * Serialize access across the struct iscsi_portal_group to
922 * process login attempt.
923 */
924 if (iscsit_access_np(np, conn->tpg) < 0) {
925 iscsit_put_tiqn_for_login(tiqn);
926 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
927 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
928 ret = -1;
929 conn->tpg = NULL;
930 goto out;
931 }
932
933 /*
934 * conn->sess->node_acl will be set when the referenced
935 * struct iscsi_session is located from received ISID+TSIH in
936 * iscsi_login_non_zero_tsih_s2().
937 */
938 if (!login->leading_connection) {
939 ret = 0;
940 goto out;
941 }
942
943 /*
944 * This value is required in iscsi_login_zero_tsih_s2()
945 */
946 sess->sess_ops->SessionType = 0;
947
948 /*
949 * Locate incoming Initiator IQN reference from Storage Node.
950 */
951 sess->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
952 &conn->tpg->tpg_se_tpg, i_buf);
953 if (!sess->se_sess->se_node_acl) {
954 pr_err("iSCSI Initiator Node: %s is not authorized to"
955 " access iSCSI target portal group: %hu.\n",
956 i_buf, conn->tpg->tpgt);
957 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
958 ISCSI_LOGIN_STATUS_TGT_FORBIDDEN);
959 ret = -1;
960 goto out;
961 }
962
963 ret = 0;
964out:
965 kfree(tmpbuf);
966 return ret;
967}
968
969struct iscsi_login *iscsi_target_init_negotiation(
970 struct iscsi_np *np,
971 struct iscsi_conn *conn,
972 char *login_pdu)
973{
974 struct iscsi_login *login;
975
976 login = kzalloc(sizeof(struct iscsi_login), GFP_KERNEL);
977 if (!login) {
978 pr_err("Unable to allocate memory for struct iscsi_login.\n");
979 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
980 ISCSI_LOGIN_STATUS_NO_RESOURCES);
981 goto out;
982 }
983
984 login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
985 if (!login->req) {
986 pr_err("Unable to allocate memory for Login Request.\n");
987 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
988 ISCSI_LOGIN_STATUS_NO_RESOURCES);
989 goto out;
990 }
991 memcpy(login->req, login_pdu, ISCSI_HDR_LEN);
992
993 login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
994 if (!login->req_buf) {
995 pr_err("Unable to allocate memory for response buffer.\n");
996 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
997 ISCSI_LOGIN_STATUS_NO_RESOURCES);
998 goto out;
999 }
1000 /*
1001 * SessionType: Discovery
1002 *
1003 * Locates Default Portal
1004 *
1005 * SessionType: Normal
1006 *
1007 * Locates Target Portal from NP -> Target IQN
1008 */
1009 if (iscsi_target_locate_portal(np, conn, login) < 0) {
1010 pr_err("iSCSI Login negotiation failed.\n");
1011 goto out;
1012 }
1013
1014 return login;
1015out:
1016 kfree(login->req);
1017 kfree(login->req_buf);
1018 kfree(login);
1019
1020 return NULL;
1021}
1022
1023int iscsi_target_start_negotiation(
1024 struct iscsi_login *login,
1025 struct iscsi_conn *conn)
1026{
1027 int ret = -1;
1028
1029 login->rsp = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
1030 if (!login->rsp) {
1031 pr_err("Unable to allocate memory for"
1032 " Login Response.\n");
1033 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1034 ISCSI_LOGIN_STATUS_NO_RESOURCES);
1035 ret = -1;
1036 goto out;
1037 }
1038
1039 login->rsp_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
1040 if (!login->rsp_buf) {
1041 pr_err("Unable to allocate memory for"
1042 " request buffer.\n");
1043 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1044 ISCSI_LOGIN_STATUS_NO_RESOURCES);
1045 ret = -1;
1046 goto out;
1047 }
1048
1049 ret = iscsi_target_do_login(conn, login);
1050out:
1051 if (ret != 0)
1052 iscsi_remove_failed_auth_entry(conn);
1053
1054 iscsi_target_nego_release(login, conn);
1055 return ret;
1056}
1057
1058void iscsi_target_nego_release(
1059 struct iscsi_login *login,
1060 struct iscsi_conn *conn)
1061{
1062 kfree(login->req);
1063 kfree(login->rsp);
1064 kfree(login->req_buf);
1065 kfree(login->rsp_buf);
1066 kfree(login);
1067}
diff --git a/drivers/target/iscsi/iscsi_target_nego.h b/drivers/target/iscsi/iscsi_target_nego.h
new file mode 100644
index 000000000000..92e133a5158f
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_nego.h
@@ -0,0 +1,17 @@
1#ifndef ISCSI_TARGET_NEGO_H
2#define ISCSI_TARGET_NEGO_H
3
4#define DECIMAL 0
5#define HEX 1
6
7extern void convert_null_to_semi(char *, int);
8extern int extract_param(const char *, const char *, unsigned int, char *,
9 unsigned char *);
10extern struct iscsi_login *iscsi_target_init_negotiation(
11 struct iscsi_np *, struct iscsi_conn *, char *);
12extern int iscsi_target_start_negotiation(
13 struct iscsi_login *, struct iscsi_conn *);
14extern void iscsi_target_nego_release(
15 struct iscsi_login *, struct iscsi_conn *);
16
17#endif /* ISCSI_TARGET_NEGO_H */
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
new file mode 100644
index 000000000000..aeafbe0cd7d1
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -0,0 +1,263 @@
1/*******************************************************************************
2 * This file contains the main functions related to Initiator Node Attributes.
3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 ******************************************************************************/
20
21#include <target/target_core_base.h>
22#include <target/target_core_transport.h>
23
24#include "iscsi_target_core.h"
25#include "iscsi_target_device.h"
26#include "iscsi_target_tpg.h"
27#include "iscsi_target_util.h"
28#include "iscsi_target_nodeattrib.h"
29
30static inline char *iscsit_na_get_initiatorname(
31 struct iscsi_node_acl *nacl)
32{
33 struct se_node_acl *se_nacl = &nacl->se_node_acl;
34
35 return &se_nacl->initiatorname[0];
36}
37
38void iscsit_set_default_node_attribues(
39 struct iscsi_node_acl *acl)
40{
41 struct iscsi_node_attrib *a = &acl->node_attrib;
42
43 a->dataout_timeout = NA_DATAOUT_TIMEOUT;
44 a->dataout_timeout_retries = NA_DATAOUT_TIMEOUT_RETRIES;
45 a->nopin_timeout = NA_NOPIN_TIMEOUT;
46 a->nopin_response_timeout = NA_NOPIN_RESPONSE_TIMEOUT;
47 a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS;
48 a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS;
49 a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS;
50 a->default_erl = NA_DEFAULT_ERL;
51}
52
53extern int iscsit_na_dataout_timeout(
54 struct iscsi_node_acl *acl,
55 u32 dataout_timeout)
56{
57 struct iscsi_node_attrib *a = &acl->node_attrib;
58
59 if (dataout_timeout > NA_DATAOUT_TIMEOUT_MAX) {
60 pr_err("Requested DataOut Timeout %u larger than"
61 " maximum %u\n", dataout_timeout,
62 NA_DATAOUT_TIMEOUT_MAX);
63 return -EINVAL;
64 } else if (dataout_timeout < NA_DATAOUT_TIMEOUT_MIX) {
65 pr_err("Requested DataOut Timeout %u smaller than"
66 " minimum %u\n", dataout_timeout,
67 NA_DATAOUT_TIMEOUT_MIX);
68 return -EINVAL;
69 }
70
71 a->dataout_timeout = dataout_timeout;
72 pr_debug("Set DataOut Timeout to %u for Initiator Node"
73 " %s\n", a->dataout_timeout, iscsit_na_get_initiatorname(acl));
74
75 return 0;
76}
77
78extern int iscsit_na_dataout_timeout_retries(
79 struct iscsi_node_acl *acl,
80 u32 dataout_timeout_retries)
81{
82 struct iscsi_node_attrib *a = &acl->node_attrib;
83
84 if (dataout_timeout_retries > NA_DATAOUT_TIMEOUT_RETRIES_MAX) {
85 pr_err("Requested DataOut Timeout Retries %u larger"
86 " than maximum %u", dataout_timeout_retries,
87 NA_DATAOUT_TIMEOUT_RETRIES_MAX);
88 return -EINVAL;
89 } else if (dataout_timeout_retries < NA_DATAOUT_TIMEOUT_RETRIES_MIN) {
90 pr_err("Requested DataOut Timeout Retries %u smaller"
91 " than minimum %u", dataout_timeout_retries,
92 NA_DATAOUT_TIMEOUT_RETRIES_MIN);
93 return -EINVAL;
94 }
95
96 a->dataout_timeout_retries = dataout_timeout_retries;
97 pr_debug("Set DataOut Timeout Retries to %u for"
98 " Initiator Node %s\n", a->dataout_timeout_retries,
99 iscsit_na_get_initiatorname(acl));
100
101 return 0;
102}
103
104extern int iscsit_na_nopin_timeout(
105 struct iscsi_node_acl *acl,
106 u32 nopin_timeout)
107{
108 struct iscsi_node_attrib *a = &acl->node_attrib;
109 struct iscsi_session *sess;
110 struct iscsi_conn *conn;
111 struct se_node_acl *se_nacl = &a->nacl->se_node_acl;
112 struct se_session *se_sess;
113 u32 orig_nopin_timeout = a->nopin_timeout;
114
115 if (nopin_timeout > NA_NOPIN_TIMEOUT_MAX) {
116 pr_err("Requested NopIn Timeout %u larger than maximum"
117 " %u\n", nopin_timeout, NA_NOPIN_TIMEOUT_MAX);
118 return -EINVAL;
119 } else if ((nopin_timeout < NA_NOPIN_TIMEOUT_MIN) &&
120 (nopin_timeout != 0)) {
121 pr_err("Requested NopIn Timeout %u smaller than"
122 " minimum %u and not 0\n", nopin_timeout,
123 NA_NOPIN_TIMEOUT_MIN);
124 return -EINVAL;
125 }
126
127 a->nopin_timeout = nopin_timeout;
128 pr_debug("Set NopIn Timeout to %u for Initiator"
129 " Node %s\n", a->nopin_timeout,
130 iscsit_na_get_initiatorname(acl));
131 /*
132 * Reenable disabled nopin_timeout timer for all iSCSI connections.
133 */
134 if (!orig_nopin_timeout) {
135 spin_lock_bh(&se_nacl->nacl_sess_lock);
136 se_sess = se_nacl->nacl_sess;
137 if (se_sess) {
138 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
139
140 spin_lock(&sess->conn_lock);
141 list_for_each_entry(conn, &sess->sess_conn_list,
142 conn_list) {
143 if (conn->conn_state !=
144 TARG_CONN_STATE_LOGGED_IN)
145 continue;
146
147 spin_lock(&conn->nopin_timer_lock);
148 __iscsit_start_nopin_timer(conn);
149 spin_unlock(&conn->nopin_timer_lock);
150 }
151 spin_unlock(&sess->conn_lock);
152 }
153 spin_unlock_bh(&se_nacl->nacl_sess_lock);
154 }
155
156 return 0;
157}
158
159extern int iscsit_na_nopin_response_timeout(
160 struct iscsi_node_acl *acl,
161 u32 nopin_response_timeout)
162{
163 struct iscsi_node_attrib *a = &acl->node_attrib;
164
165 if (nopin_response_timeout > NA_NOPIN_RESPONSE_TIMEOUT_MAX) {
166 pr_err("Requested NopIn Response Timeout %u larger"
167 " than maximum %u\n", nopin_response_timeout,
168 NA_NOPIN_RESPONSE_TIMEOUT_MAX);
169 return -EINVAL;
170 } else if (nopin_response_timeout < NA_NOPIN_RESPONSE_TIMEOUT_MIN) {
171 pr_err("Requested NopIn Response Timeout %u smaller"
172 " than minimum %u\n", nopin_response_timeout,
173 NA_NOPIN_RESPONSE_TIMEOUT_MIN);
174 return -EINVAL;
175 }
176
177 a->nopin_response_timeout = nopin_response_timeout;
178 pr_debug("Set NopIn Response Timeout to %u for"
179 " Initiator Node %s\n", a->nopin_timeout,
180 iscsit_na_get_initiatorname(acl));
181
182 return 0;
183}
184
185extern int iscsit_na_random_datain_pdu_offsets(
186 struct iscsi_node_acl *acl,
187 u32 random_datain_pdu_offsets)
188{
189 struct iscsi_node_attrib *a = &acl->node_attrib;
190
191 if (random_datain_pdu_offsets != 0 && random_datain_pdu_offsets != 1) {
192 pr_err("Requested Random DataIN PDU Offsets: %u not"
193 " 0 or 1\n", random_datain_pdu_offsets);
194 return -EINVAL;
195 }
196
197 a->random_datain_pdu_offsets = random_datain_pdu_offsets;
198 pr_debug("Set Random DataIN PDU Offsets to %u for"
199 " Initiator Node %s\n", a->random_datain_pdu_offsets,
200 iscsit_na_get_initiatorname(acl));
201
202 return 0;
203}
204
205extern int iscsit_na_random_datain_seq_offsets(
206 struct iscsi_node_acl *acl,
207 u32 random_datain_seq_offsets)
208{
209 struct iscsi_node_attrib *a = &acl->node_attrib;
210
211 if (random_datain_seq_offsets != 0 && random_datain_seq_offsets != 1) {
212 pr_err("Requested Random DataIN Sequence Offsets: %u"
213 " not 0 or 1\n", random_datain_seq_offsets);
214 return -EINVAL;
215 }
216
217 a->random_datain_seq_offsets = random_datain_seq_offsets;
218 pr_debug("Set Random DataIN Sequence Offsets to %u for"
219 " Initiator Node %s\n", a->random_datain_seq_offsets,
220 iscsit_na_get_initiatorname(acl));
221
222 return 0;
223}
224
225extern int iscsit_na_random_r2t_offsets(
226 struct iscsi_node_acl *acl,
227 u32 random_r2t_offsets)
228{
229 struct iscsi_node_attrib *a = &acl->node_attrib;
230
231 if (random_r2t_offsets != 0 && random_r2t_offsets != 1) {
232 pr_err("Requested Random R2T Offsets: %u not"
233 " 0 or 1\n", random_r2t_offsets);
234 return -EINVAL;
235 }
236
237 a->random_r2t_offsets = random_r2t_offsets;
238 pr_debug("Set Random R2T Offsets to %u for"
239 " Initiator Node %s\n", a->random_r2t_offsets,
240 iscsit_na_get_initiatorname(acl));
241
242 return 0;
243}
244
245extern int iscsit_na_default_erl(
246 struct iscsi_node_acl *acl,
247 u32 default_erl)
248{
249 struct iscsi_node_attrib *a = &acl->node_attrib;
250
251 if (default_erl != 0 && default_erl != 1 && default_erl != 2) {
252 pr_err("Requested default ERL: %u not 0, 1, or 2\n",
253 default_erl);
254 return -EINVAL;
255 }
256
257 a->default_erl = default_erl;
258 pr_debug("Set use ERL0 flag to %u for Initiator"
259 " Node %s\n", a->default_erl,
260 iscsit_na_get_initiatorname(acl));
261
262 return 0;
263}
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.h b/drivers/target/iscsi/iscsi_target_nodeattrib.h
new file mode 100644
index 000000000000..c970b326ef23
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.h
@@ -0,0 +1,14 @@
1#ifndef ISCSI_TARGET_NODEATTRIB_H
2#define ISCSI_TARGET_NODEATTRIB_H
3
4extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *);
5extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32);
6extern int iscsit_na_dataout_timeout_retries(struct iscsi_node_acl *, u32);
7extern int iscsit_na_nopin_timeout(struct iscsi_node_acl *, u32);
8extern int iscsit_na_nopin_response_timeout(struct iscsi_node_acl *, u32);
9extern int iscsit_na_random_datain_pdu_offsets(struct iscsi_node_acl *, u32);
10extern int iscsit_na_random_datain_seq_offsets(struct iscsi_node_acl *, u32);
11extern int iscsit_na_random_r2t_offsets(struct iscsi_node_acl *, u32);
12extern int iscsit_na_default_erl(struct iscsi_node_acl *, u32);
13
14#endif /* ISCSI_TARGET_NODEATTRIB_H */
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
new file mode 100644
index 000000000000..252e246cf51e
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -0,0 +1,1905 @@
1/*******************************************************************************
2 * This file contains main functions related to iSCSI Parameter negotiation.
3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 ******************************************************************************/
20
21#include <linux/slab.h>
22
23#include "iscsi_target_core.h"
24#include "iscsi_target_util.h"
25#include "iscsi_target_parameters.h"
26
27int iscsi_login_rx_data(
28 struct iscsi_conn *conn,
29 char *buf,
30 int length)
31{
32 int rx_got;
33 struct kvec iov;
34
35 memset(&iov, 0, sizeof(struct kvec));
36 iov.iov_len = length;
37 iov.iov_base = buf;
38
39 /*
40 * Initial Marker-less Interval.
41 * Add the values regardless of IFMarker/OFMarker, considering
42 * it may not be negoitated yet.
43 */
44 conn->of_marker += length;
45
46 rx_got = rx_data(conn, &iov, 1, length);
47 if (rx_got != length) {
48 pr_err("rx_data returned %d, expecting %d.\n",
49 rx_got, length);
50 return -1;
51 }
52
53 return 0 ;
54}
55
56int iscsi_login_tx_data(
57 struct iscsi_conn *conn,
58 char *pdu_buf,
59 char *text_buf,
60 int text_length)
61{
62 int length, tx_sent;
63 struct kvec iov[2];
64
65 length = (ISCSI_HDR_LEN + text_length);
66
67 memset(&iov[0], 0, 2 * sizeof(struct kvec));
68 iov[0].iov_len = ISCSI_HDR_LEN;
69 iov[0].iov_base = pdu_buf;
70 iov[1].iov_len = text_length;
71 iov[1].iov_base = text_buf;
72
73 /*
74 * Initial Marker-less Interval.
75 * Add the values regardless of IFMarker/OFMarker, considering
76 * it may not be negoitated yet.
77 */
78 conn->if_marker += length;
79
80 tx_sent = tx_data(conn, &iov[0], 2, length);
81 if (tx_sent != length) {
82 pr_err("tx_data returned %d, expecting %d.\n",
83 tx_sent, length);
84 return -1;
85 }
86
87 return 0;
88}
89
90void iscsi_dump_conn_ops(struct iscsi_conn_ops *conn_ops)
91{
92 pr_debug("HeaderDigest: %s\n", (conn_ops->HeaderDigest) ?
93 "CRC32C" : "None");
94 pr_debug("DataDigest: %s\n", (conn_ops->DataDigest) ?
95 "CRC32C" : "None");
96 pr_debug("MaxRecvDataSegmentLength: %u\n",
97 conn_ops->MaxRecvDataSegmentLength);
98 pr_debug("OFMarker: %s\n", (conn_ops->OFMarker) ? "Yes" : "No");
99 pr_debug("IFMarker: %s\n", (conn_ops->IFMarker) ? "Yes" : "No");
100 if (conn_ops->OFMarker)
101 pr_debug("OFMarkInt: %u\n", conn_ops->OFMarkInt);
102 if (conn_ops->IFMarker)
103 pr_debug("IFMarkInt: %u\n", conn_ops->IFMarkInt);
104}
105
106void iscsi_dump_sess_ops(struct iscsi_sess_ops *sess_ops)
107{
108 pr_debug("InitiatorName: %s\n", sess_ops->InitiatorName);
109 pr_debug("InitiatorAlias: %s\n", sess_ops->InitiatorAlias);
110 pr_debug("TargetName: %s\n", sess_ops->TargetName);
111 pr_debug("TargetAlias: %s\n", sess_ops->TargetAlias);
112 pr_debug("TargetPortalGroupTag: %hu\n",
113 sess_ops->TargetPortalGroupTag);
114 pr_debug("MaxConnections: %hu\n", sess_ops->MaxConnections);
115 pr_debug("InitialR2T: %s\n",
116 (sess_ops->InitialR2T) ? "Yes" : "No");
117 pr_debug("ImmediateData: %s\n", (sess_ops->ImmediateData) ?
118 "Yes" : "No");
119 pr_debug("MaxBurstLength: %u\n", sess_ops->MaxBurstLength);
120 pr_debug("FirstBurstLength: %u\n", sess_ops->FirstBurstLength);
121 pr_debug("DefaultTime2Wait: %hu\n", sess_ops->DefaultTime2Wait);
122 pr_debug("DefaultTime2Retain: %hu\n",
123 sess_ops->DefaultTime2Retain);
124 pr_debug("MaxOutstandingR2T: %hu\n",
125 sess_ops->MaxOutstandingR2T);
126 pr_debug("DataPDUInOrder: %s\n",
127 (sess_ops->DataPDUInOrder) ? "Yes" : "No");
128 pr_debug("DataSequenceInOrder: %s\n",
129 (sess_ops->DataSequenceInOrder) ? "Yes" : "No");
130 pr_debug("ErrorRecoveryLevel: %hu\n",
131 sess_ops->ErrorRecoveryLevel);
132 pr_debug("SessionType: %s\n", (sess_ops->SessionType) ?
133 "Discovery" : "Normal");
134}
135
136void iscsi_print_params(struct iscsi_param_list *param_list)
137{
138 struct iscsi_param *param;
139
140 list_for_each_entry(param, &param_list->param_list, p_list)
141 pr_debug("%s: %s\n", param->name, param->value);
142}
143
144static struct iscsi_param *iscsi_set_default_param(struct iscsi_param_list *param_list,
145 char *name, char *value, u8 phase, u8 scope, u8 sender,
146 u16 type_range, u8 use)
147{
148 struct iscsi_param *param = NULL;
149
150 param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL);
151 if (!param) {
152 pr_err("Unable to allocate memory for parameter.\n");
153 goto out;
154 }
155 INIT_LIST_HEAD(&param->p_list);
156
157 param->name = kzalloc(strlen(name) + 1, GFP_KERNEL);
158 if (!param->name) {
159 pr_err("Unable to allocate memory for parameter name.\n");
160 goto out;
161 }
162
163 param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
164 if (!param->value) {
165 pr_err("Unable to allocate memory for parameter value.\n");
166 goto out;
167 }
168
169 memcpy(param->name, name, strlen(name));
170 param->name[strlen(name)] = '\0';
171 memcpy(param->value, value, strlen(value));
172 param->value[strlen(value)] = '\0';
173 param->phase = phase;
174 param->scope = scope;
175 param->sender = sender;
176 param->use = use;
177 param->type_range = type_range;
178
179 switch (param->type_range) {
180 case TYPERANGE_BOOL_AND:
181 param->type = TYPE_BOOL_AND;
182 break;
183 case TYPERANGE_BOOL_OR:
184 param->type = TYPE_BOOL_OR;
185 break;
186 case TYPERANGE_0_TO_2:
187 case TYPERANGE_0_TO_3600:
188 case TYPERANGE_0_TO_32767:
189 case TYPERANGE_0_TO_65535:
190 case TYPERANGE_1_TO_65535:
191 case TYPERANGE_2_TO_3600:
192 case TYPERANGE_512_TO_16777215:
193 param->type = TYPE_NUMBER;
194 break;
195 case TYPERANGE_AUTH:
196 case TYPERANGE_DIGEST:
197 param->type = TYPE_VALUE_LIST | TYPE_STRING;
198 break;
199 case TYPERANGE_MARKINT:
200 param->type = TYPE_NUMBER_RANGE;
201 param->type_range |= TYPERANGE_1_TO_65535;
202 break;
203 case TYPERANGE_ISCSINAME:
204 case TYPERANGE_SESSIONTYPE:
205 case TYPERANGE_TARGETADDRESS:
206 case TYPERANGE_UTF8:
207 param->type = TYPE_STRING;
208 break;
209 default:
210 pr_err("Unknown type_range 0x%02x\n",
211 param->type_range);
212 goto out;
213 }
214 list_add_tail(&param->p_list, &param_list->param_list);
215
216 return param;
217out:
218 if (param) {
219 kfree(param->value);
220 kfree(param->name);
221 kfree(param);
222 }
223
224 return NULL;
225}
226
227/* #warning Add extension keys */
228int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
229{
230 struct iscsi_param *param = NULL;
231 struct iscsi_param_list *pl;
232
233 pl = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
234 if (!pl) {
235 pr_err("Unable to allocate memory for"
236 " struct iscsi_param_list.\n");
237 return -1 ;
238 }
239 INIT_LIST_HEAD(&pl->param_list);
240 INIT_LIST_HEAD(&pl->extra_response_list);
241
242 /*
243 * The format for setting the initial parameter definitions are:
244 *
245 * Parameter name:
246 * Initial value:
247 * Allowable phase:
248 * Scope:
249 * Allowable senders:
250 * Typerange:
251 * Use:
252 */
253 param = iscsi_set_default_param(pl, AUTHMETHOD, INITIAL_AUTHMETHOD,
254 PHASE_SECURITY, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
255 TYPERANGE_AUTH, USE_INITIAL_ONLY);
256 if (!param)
257 goto out;
258
259 param = iscsi_set_default_param(pl, HEADERDIGEST, INITIAL_HEADERDIGEST,
260 PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
261 TYPERANGE_DIGEST, USE_INITIAL_ONLY);
262 if (!param)
263 goto out;
264
265 param = iscsi_set_default_param(pl, DATADIGEST, INITIAL_DATADIGEST,
266 PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
267 TYPERANGE_DIGEST, USE_INITIAL_ONLY);
268 if (!param)
269 goto out;
270
271 param = iscsi_set_default_param(pl, MAXCONNECTIONS,
272 INITIAL_MAXCONNECTIONS, PHASE_OPERATIONAL,
273 SCOPE_SESSION_WIDE, SENDER_BOTH,
274 TYPERANGE_1_TO_65535, USE_LEADING_ONLY);
275 if (!param)
276 goto out;
277
278 param = iscsi_set_default_param(pl, SENDTARGETS, INITIAL_SENDTARGETS,
279 PHASE_FFP0, SCOPE_SESSION_WIDE, SENDER_INITIATOR,
280 TYPERANGE_UTF8, 0);
281 if (!param)
282 goto out;
283
284 param = iscsi_set_default_param(pl, TARGETNAME, INITIAL_TARGETNAME,
285 PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_BOTH,
286 TYPERANGE_ISCSINAME, USE_ALL);
287 if (!param)
288 goto out;
289
290 param = iscsi_set_default_param(pl, INITIATORNAME,
291 INITIAL_INITIATORNAME, PHASE_DECLARATIVE,
292 SCOPE_SESSION_WIDE, SENDER_INITIATOR,
293 TYPERANGE_ISCSINAME, USE_INITIAL_ONLY);
294 if (!param)
295 goto out;
296
297 param = iscsi_set_default_param(pl, TARGETALIAS, INITIAL_TARGETALIAS,
298 PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_TARGET,
299 TYPERANGE_UTF8, USE_ALL);
300 if (!param)
301 goto out;
302
303 param = iscsi_set_default_param(pl, INITIATORALIAS,
304 INITIAL_INITIATORALIAS, PHASE_DECLARATIVE,
305 SCOPE_SESSION_WIDE, SENDER_INITIATOR, TYPERANGE_UTF8,
306 USE_ALL);
307 if (!param)
308 goto out;
309
310 param = iscsi_set_default_param(pl, TARGETADDRESS,
311 INITIAL_TARGETADDRESS, PHASE_DECLARATIVE,
312 SCOPE_SESSION_WIDE, SENDER_TARGET,
313 TYPERANGE_TARGETADDRESS, USE_ALL);
314 if (!param)
315 goto out;
316
317 param = iscsi_set_default_param(pl, TARGETPORTALGROUPTAG,
318 INITIAL_TARGETPORTALGROUPTAG,
319 PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_TARGET,
320 TYPERANGE_0_TO_65535, USE_INITIAL_ONLY);
321 if (!param)
322 goto out;
323
324 param = iscsi_set_default_param(pl, INITIALR2T, INITIAL_INITIALR2T,
325 PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
326 TYPERANGE_BOOL_OR, USE_LEADING_ONLY);
327 if (!param)
328 goto out;
329
330 param = iscsi_set_default_param(pl, IMMEDIATEDATA,
331 INITIAL_IMMEDIATEDATA, PHASE_OPERATIONAL,
332 SCOPE_SESSION_WIDE, SENDER_BOTH, TYPERANGE_BOOL_AND,
333 USE_LEADING_ONLY);
334 if (!param)
335 goto out;
336
337 param = iscsi_set_default_param(pl, MAXRECVDATASEGMENTLENGTH,
338 INITIAL_MAXRECVDATASEGMENTLENGTH,
339 PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
340 TYPERANGE_512_TO_16777215, USE_ALL);
341 if (!param)
342 goto out;
343
344 param = iscsi_set_default_param(pl, MAXBURSTLENGTH,
345 INITIAL_MAXBURSTLENGTH, PHASE_OPERATIONAL,
346 SCOPE_SESSION_WIDE, SENDER_BOTH,
347 TYPERANGE_512_TO_16777215, USE_LEADING_ONLY);
348 if (!param)
349 goto out;
350
351 param = iscsi_set_default_param(pl, FIRSTBURSTLENGTH,
352 INITIAL_FIRSTBURSTLENGTH,
353 PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
354 TYPERANGE_512_TO_16777215, USE_LEADING_ONLY);
355 if (!param)
356 goto out;
357
358 param = iscsi_set_default_param(pl, DEFAULTTIME2WAIT,
359 INITIAL_DEFAULTTIME2WAIT,
360 PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
361 TYPERANGE_0_TO_3600, USE_LEADING_ONLY);
362 if (!param)
363 goto out;
364
365 param = iscsi_set_default_param(pl, DEFAULTTIME2RETAIN,
366 INITIAL_DEFAULTTIME2RETAIN,
367 PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
368 TYPERANGE_0_TO_3600, USE_LEADING_ONLY);
369 if (!param)
370 goto out;
371
372 param = iscsi_set_default_param(pl, MAXOUTSTANDINGR2T,
373 INITIAL_MAXOUTSTANDINGR2T,
374 PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
375 TYPERANGE_1_TO_65535, USE_LEADING_ONLY);
376 if (!param)
377 goto out;
378
379 param = iscsi_set_default_param(pl, DATAPDUINORDER,
380 INITIAL_DATAPDUINORDER, PHASE_OPERATIONAL,
381 SCOPE_SESSION_WIDE, SENDER_BOTH, TYPERANGE_BOOL_OR,
382 USE_LEADING_ONLY);
383 if (!param)
384 goto out;
385
386 param = iscsi_set_default_param(pl, DATASEQUENCEINORDER,
387 INITIAL_DATASEQUENCEINORDER,
388 PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
389 TYPERANGE_BOOL_OR, USE_LEADING_ONLY);
390 if (!param)
391 goto out;
392
393 param = iscsi_set_default_param(pl, ERRORRECOVERYLEVEL,
394 INITIAL_ERRORRECOVERYLEVEL,
395 PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
396 TYPERANGE_0_TO_2, USE_LEADING_ONLY);
397 if (!param)
398 goto out;
399
400 param = iscsi_set_default_param(pl, SESSIONTYPE, INITIAL_SESSIONTYPE,
401 PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_INITIATOR,
402 TYPERANGE_SESSIONTYPE, USE_LEADING_ONLY);
403 if (!param)
404 goto out;
405
406 param = iscsi_set_default_param(pl, IFMARKER, INITIAL_IFMARKER,
407 PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
408 TYPERANGE_BOOL_AND, USE_INITIAL_ONLY);
409 if (!param)
410 goto out;
411
412 param = iscsi_set_default_param(pl, OFMARKER, INITIAL_OFMARKER,
413 PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
414 TYPERANGE_BOOL_AND, USE_INITIAL_ONLY);
415 if (!param)
416 goto out;
417
418 param = iscsi_set_default_param(pl, IFMARKINT, INITIAL_IFMARKINT,
419 PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
420 TYPERANGE_MARKINT, USE_INITIAL_ONLY);
421 if (!param)
422 goto out;
423
424 param = iscsi_set_default_param(pl, OFMARKINT, INITIAL_OFMARKINT,
425 PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
426 TYPERANGE_MARKINT, USE_INITIAL_ONLY);
427 if (!param)
428 goto out;
429
430 *param_list_ptr = pl;
431 return 0;
432out:
433 iscsi_release_param_list(pl);
434 return -1;
435}
436
437int iscsi_set_keys_to_negotiate(
438 int sessiontype,
439 struct iscsi_param_list *param_list)
440{
441 struct iscsi_param *param;
442
443 list_for_each_entry(param, &param_list->param_list, p_list) {
444 param->state = 0;
445 if (!strcmp(param->name, AUTHMETHOD)) {
446 SET_PSTATE_NEGOTIATE(param);
447 } else if (!strcmp(param->name, HEADERDIGEST)) {
448 SET_PSTATE_NEGOTIATE(param);
449 } else if (!strcmp(param->name, DATADIGEST)) {
450 SET_PSTATE_NEGOTIATE(param);
451 } else if (!strcmp(param->name, MAXCONNECTIONS)) {
452 SET_PSTATE_NEGOTIATE(param);
453 } else if (!strcmp(param->name, TARGETNAME)) {
454 continue;
455 } else if (!strcmp(param->name, INITIATORNAME)) {
456 continue;
457 } else if (!strcmp(param->name, TARGETALIAS)) {
458 if (param->value)
459 SET_PSTATE_NEGOTIATE(param);
460 } else if (!strcmp(param->name, INITIATORALIAS)) {
461 continue;
462 } else if (!strcmp(param->name, TARGETPORTALGROUPTAG)) {
463 SET_PSTATE_NEGOTIATE(param);
464 } else if (!strcmp(param->name, INITIALR2T)) {
465 SET_PSTATE_NEGOTIATE(param);
466 } else if (!strcmp(param->name, IMMEDIATEDATA)) {
467 SET_PSTATE_NEGOTIATE(param);
468 } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
469 SET_PSTATE_NEGOTIATE(param);
470 } else if (!strcmp(param->name, MAXBURSTLENGTH)) {
471 SET_PSTATE_NEGOTIATE(param);
472 } else if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
473 SET_PSTATE_NEGOTIATE(param);
474 } else if (!strcmp(param->name, DEFAULTTIME2WAIT)) {
475 SET_PSTATE_NEGOTIATE(param);
476 } else if (!strcmp(param->name, DEFAULTTIME2RETAIN)) {
477 SET_PSTATE_NEGOTIATE(param);
478 } else if (!strcmp(param->name, MAXOUTSTANDINGR2T)) {
479 SET_PSTATE_NEGOTIATE(param);
480 } else if (!strcmp(param->name, DATAPDUINORDER)) {
481 SET_PSTATE_NEGOTIATE(param);
482 } else if (!strcmp(param->name, DATASEQUENCEINORDER)) {
483 SET_PSTATE_NEGOTIATE(param);
484 } else if (!strcmp(param->name, ERRORRECOVERYLEVEL)) {
485 SET_PSTATE_NEGOTIATE(param);
486 } else if (!strcmp(param->name, SESSIONTYPE)) {
487 SET_PSTATE_NEGOTIATE(param);
488 } else if (!strcmp(param->name, IFMARKER)) {
489 SET_PSTATE_NEGOTIATE(param);
490 } else if (!strcmp(param->name, OFMARKER)) {
491 SET_PSTATE_NEGOTIATE(param);
492 } else if (!strcmp(param->name, IFMARKINT)) {
493 SET_PSTATE_NEGOTIATE(param);
494 } else if (!strcmp(param->name, OFMARKINT)) {
495 SET_PSTATE_NEGOTIATE(param);
496 }
497 }
498
499 return 0;
500}
501
502int iscsi_set_keys_irrelevant_for_discovery(
503 struct iscsi_param_list *param_list)
504{
505 struct iscsi_param *param;
506
507 list_for_each_entry(param, &param_list->param_list, p_list) {
508 if (!strcmp(param->name, MAXCONNECTIONS))
509 param->state &= ~PSTATE_NEGOTIATE;
510 else if (!strcmp(param->name, INITIALR2T))
511 param->state &= ~PSTATE_NEGOTIATE;
512 else if (!strcmp(param->name, IMMEDIATEDATA))
513 param->state &= ~PSTATE_NEGOTIATE;
514 else if (!strcmp(param->name, MAXBURSTLENGTH))
515 param->state &= ~PSTATE_NEGOTIATE;
516 else if (!strcmp(param->name, FIRSTBURSTLENGTH))
517 param->state &= ~PSTATE_NEGOTIATE;
518 else if (!strcmp(param->name, MAXOUTSTANDINGR2T))
519 param->state &= ~PSTATE_NEGOTIATE;
520 else if (!strcmp(param->name, DATAPDUINORDER))
521 param->state &= ~PSTATE_NEGOTIATE;
522 else if (!strcmp(param->name, DATASEQUENCEINORDER))
523 param->state &= ~PSTATE_NEGOTIATE;
524 else if (!strcmp(param->name, ERRORRECOVERYLEVEL))
525 param->state &= ~PSTATE_NEGOTIATE;
526 else if (!strcmp(param->name, DEFAULTTIME2WAIT))
527 param->state &= ~PSTATE_NEGOTIATE;
528 else if (!strcmp(param->name, DEFAULTTIME2RETAIN))
529 param->state &= ~PSTATE_NEGOTIATE;
530 else if (!strcmp(param->name, IFMARKER))
531 param->state &= ~PSTATE_NEGOTIATE;
532 else if (!strcmp(param->name, OFMARKER))
533 param->state &= ~PSTATE_NEGOTIATE;
534 else if (!strcmp(param->name, IFMARKINT))
535 param->state &= ~PSTATE_NEGOTIATE;
536 else if (!strcmp(param->name, OFMARKINT))
537 param->state &= ~PSTATE_NEGOTIATE;
538 }
539
540 return 0;
541}
542
543int iscsi_copy_param_list(
544 struct iscsi_param_list **dst_param_list,
545 struct iscsi_param_list *src_param_list,
546 int leading)
547{
548 struct iscsi_param *new_param = NULL, *param = NULL;
549 struct iscsi_param_list *param_list = NULL;
550
551 param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
552 if (!param_list) {
553 pr_err("Unable to allocate memory for"
554 " struct iscsi_param_list.\n");
555 goto err_out;
556 }
557 INIT_LIST_HEAD(&param_list->param_list);
558 INIT_LIST_HEAD(&param_list->extra_response_list);
559
560 list_for_each_entry(param, &src_param_list->param_list, p_list) {
561 if (!leading && (param->scope & SCOPE_SESSION_WIDE)) {
562 if ((strcmp(param->name, "TargetName") != 0) &&
563 (strcmp(param->name, "InitiatorName") != 0) &&
564 (strcmp(param->name, "TargetPortalGroupTag") != 0))
565 continue;
566 }
567
568 new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL);
569 if (!new_param) {
570 pr_err("Unable to allocate memory for"
571 " struct iscsi_param.\n");
572 goto err_out;
573 }
574
575 new_param->set_param = param->set_param;
576 new_param->phase = param->phase;
577 new_param->scope = param->scope;
578 new_param->sender = param->sender;
579 new_param->type = param->type;
580 new_param->use = param->use;
581 new_param->type_range = param->type_range;
582
583 new_param->name = kzalloc(strlen(param->name) + 1, GFP_KERNEL);
584 if (!new_param->name) {
585 pr_err("Unable to allocate memory for"
586 " parameter name.\n");
587 goto err_out;
588 }
589
590 new_param->value = kzalloc(strlen(param->value) + 1,
591 GFP_KERNEL);
592 if (!new_param->value) {
593 pr_err("Unable to allocate memory for"
594 " parameter value.\n");
595 goto err_out;
596 }
597
598 memcpy(new_param->name, param->name, strlen(param->name));
599 new_param->name[strlen(param->name)] = '\0';
600 memcpy(new_param->value, param->value, strlen(param->value));
601 new_param->value[strlen(param->value)] = '\0';
602
603 list_add_tail(&new_param->p_list, &param_list->param_list);
604 }
605
606 if (!list_empty(&param_list->param_list))
607 *dst_param_list = param_list;
608 else {
609 pr_err("No parameters allocated.\n");
610 goto err_out;
611 }
612
613 return 0;
614
615err_out:
616 iscsi_release_param_list(param_list);
617 return -1;
618}
619
620static void iscsi_release_extra_responses(struct iscsi_param_list *param_list)
621{
622 struct iscsi_extra_response *er, *er_tmp;
623
624 list_for_each_entry_safe(er, er_tmp, &param_list->extra_response_list,
625 er_list) {
626 list_del(&er->er_list);
627 kfree(er);
628 }
629}
630
631void iscsi_release_param_list(struct iscsi_param_list *param_list)
632{
633 struct iscsi_param *param, *param_tmp;
634
635 list_for_each_entry_safe(param, param_tmp, &param_list->param_list,
636 p_list) {
637 list_del(&param->p_list);
638
639 kfree(param->name);
640 param->name = NULL;
641 kfree(param->value);
642 param->value = NULL;
643 kfree(param);
644 param = NULL;
645 }
646
647 iscsi_release_extra_responses(param_list);
648
649 kfree(param_list);
650}
651
652struct iscsi_param *iscsi_find_param_from_key(
653 char *key,
654 struct iscsi_param_list *param_list)
655{
656 struct iscsi_param *param;
657
658 if (!key || !param_list) {
659 pr_err("Key or parameter list pointer is NULL.\n");
660 return NULL;
661 }
662
663 list_for_each_entry(param, &param_list->param_list, p_list) {
664 if (!strcmp(key, param->name))
665 return param;
666 }
667
668 pr_err("Unable to locate key \"%s\".\n", key);
669 return NULL;
670}
671
672int iscsi_extract_key_value(char *textbuf, char **key, char **value)
673{
674 *value = strchr(textbuf, '=');
675 if (!*value) {
676 pr_err("Unable to locate \"=\" seperator for key,"
677 " ignoring request.\n");
678 return -1;
679 }
680
681 *key = textbuf;
682 **value = '\0';
683 *value = *value + 1;
684
685 return 0;
686}
687
688int iscsi_update_param_value(struct iscsi_param *param, char *value)
689{
690 kfree(param->value);
691
692 param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
693 if (!param->value) {
694 pr_err("Unable to allocate memory for value.\n");
695 return -1;
696 }
697
698 memcpy(param->value, value, strlen(value));
699 param->value[strlen(value)] = '\0';
700
701 pr_debug("iSCSI Parameter updated to %s=%s\n",
702 param->name, param->value);
703 return 0;
704}
705
706static int iscsi_add_notunderstood_response(
707 char *key,
708 char *value,
709 struct iscsi_param_list *param_list)
710{
711 struct iscsi_extra_response *extra_response;
712
713 if (strlen(value) > VALUE_MAXLEN) {
714 pr_err("Value for notunderstood key \"%s\" exceeds %d,"
715 " protocol error.\n", key, VALUE_MAXLEN);
716 return -1;
717 }
718
719 extra_response = kzalloc(sizeof(struct iscsi_extra_response), GFP_KERNEL);
720 if (!extra_response) {
721 pr_err("Unable to allocate memory for"
722 " struct iscsi_extra_response.\n");
723 return -1;
724 }
725 INIT_LIST_HEAD(&extra_response->er_list);
726
727 strncpy(extra_response->key, key, strlen(key) + 1);
728 strncpy(extra_response->value, NOTUNDERSTOOD,
729 strlen(NOTUNDERSTOOD) + 1);
730
731 list_add_tail(&extra_response->er_list,
732 &param_list->extra_response_list);
733 return 0;
734}
735
736static int iscsi_check_for_auth_key(char *key)
737{
738 /*
739 * RFC 1994
740 */
741 if (!strcmp(key, "CHAP_A") || !strcmp(key, "CHAP_I") ||
742 !strcmp(key, "CHAP_C") || !strcmp(key, "CHAP_N") ||
743 !strcmp(key, "CHAP_R"))
744 return 1;
745
746 /*
747 * RFC 2945
748 */
749 if (!strcmp(key, "SRP_U") || !strcmp(key, "SRP_N") ||
750 !strcmp(key, "SRP_g") || !strcmp(key, "SRP_s") ||
751 !strcmp(key, "SRP_A") || !strcmp(key, "SRP_B") ||
752 !strcmp(key, "SRP_M") || !strcmp(key, "SRP_HM"))
753 return 1;
754
755 return 0;
756}
757
758static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
759{
760 if (IS_TYPE_BOOL_AND(param)) {
761 if (!strcmp(param->value, NO))
762 SET_PSTATE_REPLY_OPTIONAL(param);
763 } else if (IS_TYPE_BOOL_OR(param)) {
764 if (!strcmp(param->value, YES))
765 SET_PSTATE_REPLY_OPTIONAL(param);
766 /*
767 * Required for gPXE iSCSI boot client
768 */
769 if (!strcmp(param->name, IMMEDIATEDATA))
770 SET_PSTATE_REPLY_OPTIONAL(param);
771 } else if (IS_TYPE_NUMBER(param)) {
772 if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
773 SET_PSTATE_REPLY_OPTIONAL(param);
774 /*
775 * The GlobalSAN iSCSI Initiator for MacOSX does
776 * not respond to MaxBurstLength, FirstBurstLength,
777 * DefaultTime2Wait or DefaultTime2Retain parameter keys.
778 * So, we set them to 'reply optional' here, and assume the
779 * the defaults from iscsi_parameters.h if the initiator
780 * is not RFC compliant and the keys are not negotiated.
781 */
782 if (!strcmp(param->name, MAXBURSTLENGTH))
783 SET_PSTATE_REPLY_OPTIONAL(param);
784 if (!strcmp(param->name, FIRSTBURSTLENGTH))
785 SET_PSTATE_REPLY_OPTIONAL(param);
786 if (!strcmp(param->name, DEFAULTTIME2WAIT))
787 SET_PSTATE_REPLY_OPTIONAL(param);
788 if (!strcmp(param->name, DEFAULTTIME2RETAIN))
789 SET_PSTATE_REPLY_OPTIONAL(param);
790 /*
791 * Required for gPXE iSCSI boot client
792 */
793 if (!strcmp(param->name, MAXCONNECTIONS))
794 SET_PSTATE_REPLY_OPTIONAL(param);
795 } else if (IS_PHASE_DECLARATIVE(param))
796 SET_PSTATE_REPLY_OPTIONAL(param);
797}
798
799static int iscsi_check_boolean_value(struct iscsi_param *param, char *value)
800{
801 if (strcmp(value, YES) && strcmp(value, NO)) {
802 pr_err("Illegal value for \"%s\", must be either"
803 " \"%s\" or \"%s\".\n", param->name, YES, NO);
804 return -1;
805 }
806
807 return 0;
808}
809
810static int iscsi_check_numerical_value(struct iscsi_param *param, char *value_ptr)
811{
812 char *tmpptr;
813 int value = 0;
814
815 value = simple_strtoul(value_ptr, &tmpptr, 0);
816
817/* #warning FIXME: Fix this */
818#if 0
819 if (strspn(endptr, WHITE_SPACE) != strlen(endptr)) {
820 pr_err("Illegal value \"%s\" for \"%s\".\n",
821 value, param->name);
822 return -1;
823 }
824#endif
825 if (IS_TYPERANGE_0_TO_2(param)) {
826 if ((value < 0) || (value > 2)) {
827 pr_err("Illegal value for \"%s\", must be"
828 " between 0 and 2.\n", param->name);
829 return -1;
830 }
831 return 0;
832 }
833 if (IS_TYPERANGE_0_TO_3600(param)) {
834 if ((value < 0) || (value > 3600)) {
835 pr_err("Illegal value for \"%s\", must be"
836 " between 0 and 3600.\n", param->name);
837 return -1;
838 }
839 return 0;
840 }
841 if (IS_TYPERANGE_0_TO_32767(param)) {
842 if ((value < 0) || (value > 32767)) {
843 pr_err("Illegal value for \"%s\", must be"
844 " between 0 and 32767.\n", param->name);
845 return -1;
846 }
847 return 0;
848 }
849 if (IS_TYPERANGE_0_TO_65535(param)) {
850 if ((value < 0) || (value > 65535)) {
851 pr_err("Illegal value for \"%s\", must be"
852 " between 0 and 65535.\n", param->name);
853 return -1;
854 }
855 return 0;
856 }
857 if (IS_TYPERANGE_1_TO_65535(param)) {
858 if ((value < 1) || (value > 65535)) {
859 pr_err("Illegal value for \"%s\", must be"
860 " between 1 and 65535.\n", param->name);
861 return -1;
862 }
863 return 0;
864 }
865 if (IS_TYPERANGE_2_TO_3600(param)) {
866 if ((value < 2) || (value > 3600)) {
867 pr_err("Illegal value for \"%s\", must be"
868 " between 2 and 3600.\n", param->name);
869 return -1;
870 }
871 return 0;
872 }
873 if (IS_TYPERANGE_512_TO_16777215(param)) {
874 if ((value < 512) || (value > 16777215)) {
875 pr_err("Illegal value for \"%s\", must be"
876 " between 512 and 16777215.\n", param->name);
877 return -1;
878 }
879 return 0;
880 }
881
882 return 0;
883}
884
885static int iscsi_check_numerical_range_value(struct iscsi_param *param, char *value)
886{
887 char *left_val_ptr = NULL, *right_val_ptr = NULL;
888 char *tilde_ptr = NULL, *tmp_ptr = NULL;
889 u32 left_val, right_val, local_left_val, local_right_val;
890
891 if (strcmp(param->name, IFMARKINT) &&
892 strcmp(param->name, OFMARKINT)) {
893 pr_err("Only parameters \"%s\" or \"%s\" may contain a"
894 " numerical range value.\n", IFMARKINT, OFMARKINT);
895 return -1;
896 }
897
898 if (IS_PSTATE_PROPOSER(param))
899 return 0;
900
901 tilde_ptr = strchr(value, '~');
902 if (!tilde_ptr) {
903 pr_err("Unable to locate numerical range indicator"
904 " \"~\" for \"%s\".\n", param->name);
905 return -1;
906 }
907 *tilde_ptr = '\0';
908
909 left_val_ptr = value;
910 right_val_ptr = value + strlen(left_val_ptr) + 1;
911
912 if (iscsi_check_numerical_value(param, left_val_ptr) < 0)
913 return -1;
914 if (iscsi_check_numerical_value(param, right_val_ptr) < 0)
915 return -1;
916
917 left_val = simple_strtoul(left_val_ptr, &tmp_ptr, 0);
918 right_val = simple_strtoul(right_val_ptr, &tmp_ptr, 0);
919 *tilde_ptr = '~';
920
921 if (right_val < left_val) {
922 pr_err("Numerical range for parameter \"%s\" contains"
923 " a right value which is less than the left.\n",
924 param->name);
925 return -1;
926 }
927
928 /*
929 * For now, enforce reasonable defaults for [I,O]FMarkInt.
930 */
931 tilde_ptr = strchr(param->value, '~');
932 if (!tilde_ptr) {
933 pr_err("Unable to locate numerical range indicator"
934 " \"~\" for \"%s\".\n", param->name);
935 return -1;
936 }
937 *tilde_ptr = '\0';
938
939 left_val_ptr = param->value;
940 right_val_ptr = param->value + strlen(left_val_ptr) + 1;
941
942 local_left_val = simple_strtoul(left_val_ptr, &tmp_ptr, 0);
943 local_right_val = simple_strtoul(right_val_ptr, &tmp_ptr, 0);
944 *tilde_ptr = '~';
945
946 if (param->set_param) {
947 if ((left_val < local_left_val) ||
948 (right_val < local_left_val)) {
949 pr_err("Passed value range \"%u~%u\" is below"
950 " minimum left value \"%u\" for key \"%s\","
951 " rejecting.\n", left_val, right_val,
952 local_left_val, param->name);
953 return -1;
954 }
955 } else {
956 if ((left_val < local_left_val) &&
957 (right_val < local_left_val)) {
958 pr_err("Received value range \"%u~%u\" is"
959 " below minimum left value \"%u\" for key"
960 " \"%s\", rejecting.\n", left_val, right_val,
961 local_left_val, param->name);
962 SET_PSTATE_REJECT(param);
963 if (iscsi_update_param_value(param, REJECT) < 0)
964 return -1;
965 }
966 }
967
968 return 0;
969}
970
971static int iscsi_check_string_or_list_value(struct iscsi_param *param, char *value)
972{
973 if (IS_PSTATE_PROPOSER(param))
974 return 0;
975
976 if (IS_TYPERANGE_AUTH_PARAM(param)) {
977 if (strcmp(value, KRB5) && strcmp(value, SPKM1) &&
978 strcmp(value, SPKM2) && strcmp(value, SRP) &&
979 strcmp(value, CHAP) && strcmp(value, NONE)) {
980 pr_err("Illegal value for \"%s\", must be"
981 " \"%s\", \"%s\", \"%s\", \"%s\", \"%s\""
982 " or \"%s\".\n", param->name, KRB5,
983 SPKM1, SPKM2, SRP, CHAP, NONE);
984 return -1;
985 }
986 }
987 if (IS_TYPERANGE_DIGEST_PARAM(param)) {
988 if (strcmp(value, CRC32C) && strcmp(value, NONE)) {
989 pr_err("Illegal value for \"%s\", must be"
990 " \"%s\" or \"%s\".\n", param->name,
991 CRC32C, NONE);
992 return -1;
993 }
994 }
995 if (IS_TYPERANGE_SESSIONTYPE(param)) {
996 if (strcmp(value, DISCOVERY) && strcmp(value, NORMAL)) {
997 pr_err("Illegal value for \"%s\", must be"
998 " \"%s\" or \"%s\".\n", param->name,
999 DISCOVERY, NORMAL);
1000 return -1;
1001 }
1002 }
1003
1004 return 0;
1005}
1006
1007/*
1008 * This function is used to pick a value range number, currently just
1009 * returns the lesser of both right values.
1010 */
1011static char *iscsi_get_value_from_number_range(
1012 struct iscsi_param *param,
1013 char *value)
1014{
1015 char *end_ptr, *tilde_ptr1 = NULL, *tilde_ptr2 = NULL;
1016 u32 acceptor_right_value, proposer_right_value;
1017
1018 tilde_ptr1 = strchr(value, '~');
1019 if (!tilde_ptr1)
1020 return NULL;
1021 *tilde_ptr1++ = '\0';
1022 proposer_right_value = simple_strtoul(tilde_ptr1, &end_ptr, 0);
1023
1024 tilde_ptr2 = strchr(param->value, '~');
1025 if (!tilde_ptr2)
1026 return NULL;
1027 *tilde_ptr2++ = '\0';
1028 acceptor_right_value = simple_strtoul(tilde_ptr2, &end_ptr, 0);
1029
1030 return (acceptor_right_value >= proposer_right_value) ?
1031 tilde_ptr1 : tilde_ptr2;
1032}
1033
1034static char *iscsi_check_valuelist_for_support(
1035 struct iscsi_param *param,
1036 char *value)
1037{
1038 char *tmp1 = NULL, *tmp2 = NULL;
1039 char *acceptor_values = NULL, *proposer_values = NULL;
1040
1041 acceptor_values = param->value;
1042 proposer_values = value;
1043
1044 do {
1045 if (!proposer_values)
1046 return NULL;
1047 tmp1 = strchr(proposer_values, ',');
1048 if (tmp1)
1049 *tmp1 = '\0';
1050 acceptor_values = param->value;
1051 do {
1052 if (!acceptor_values) {
1053 if (tmp1)
1054 *tmp1 = ',';
1055 return NULL;
1056 }
1057 tmp2 = strchr(acceptor_values, ',');
1058 if (tmp2)
1059 *tmp2 = '\0';
1060 if (!acceptor_values || !proposer_values) {
1061 if (tmp1)
1062 *tmp1 = ',';
1063 if (tmp2)
1064 *tmp2 = ',';
1065 return NULL;
1066 }
1067 if (!strcmp(acceptor_values, proposer_values)) {
1068 if (tmp2)
1069 *tmp2 = ',';
1070 goto out;
1071 }
1072 if (tmp2)
1073 *tmp2++ = ',';
1074
1075 acceptor_values = tmp2;
1076 if (!acceptor_values)
1077 break;
1078 } while (acceptor_values);
1079 if (tmp1)
1080 *tmp1++ = ',';
1081 proposer_values = tmp1;
1082 } while (proposer_values);
1083
1084out:
1085 return proposer_values;
1086}
1087
1088static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value)
1089{
1090 u8 acceptor_boolean_value = 0, proposer_boolean_value = 0;
1091 char *negoitated_value = NULL;
1092
1093 if (IS_PSTATE_ACCEPTOR(param)) {
1094 pr_err("Received key \"%s\" twice, protocol error.\n",
1095 param->name);
1096 return -1;
1097 }
1098
1099 if (IS_PSTATE_REJECT(param))
1100 return 0;
1101
1102 if (IS_TYPE_BOOL_AND(param)) {
1103 if (!strcmp(value, YES))
1104 proposer_boolean_value = 1;
1105 if (!strcmp(param->value, YES))
1106 acceptor_boolean_value = 1;
1107 if (acceptor_boolean_value && proposer_boolean_value)
1108 do {} while (0);
1109 else {
1110 if (iscsi_update_param_value(param, NO) < 0)
1111 return -1;
1112 if (!proposer_boolean_value)
1113 SET_PSTATE_REPLY_OPTIONAL(param);
1114 }
1115 } else if (IS_TYPE_BOOL_OR(param)) {
1116 if (!strcmp(value, YES))
1117 proposer_boolean_value = 1;
1118 if (!strcmp(param->value, YES))
1119 acceptor_boolean_value = 1;
1120 if (acceptor_boolean_value || proposer_boolean_value) {
1121 if (iscsi_update_param_value(param, YES) < 0)
1122 return -1;
1123 if (proposer_boolean_value)
1124 SET_PSTATE_REPLY_OPTIONAL(param);
1125 }
1126 } else if (IS_TYPE_NUMBER(param)) {
1127 char *tmpptr, buf[10];
1128 u32 acceptor_value = simple_strtoul(param->value, &tmpptr, 0);
1129 u32 proposer_value = simple_strtoul(value, &tmpptr, 0);
1130
1131 memset(buf, 0, 10);
1132
1133 if (!strcmp(param->name, MAXCONNECTIONS) ||
1134 !strcmp(param->name, MAXBURSTLENGTH) ||
1135 !strcmp(param->name, FIRSTBURSTLENGTH) ||
1136 !strcmp(param->name, MAXOUTSTANDINGR2T) ||
1137 !strcmp(param->name, DEFAULTTIME2RETAIN) ||
1138 !strcmp(param->name, ERRORRECOVERYLEVEL)) {
1139 if (proposer_value > acceptor_value) {
1140 sprintf(buf, "%u", acceptor_value);
1141 if (iscsi_update_param_value(param,
1142 &buf[0]) < 0)
1143 return -1;
1144 } else {
1145 if (iscsi_update_param_value(param, value) < 0)
1146 return -1;
1147 }
1148 } else if (!strcmp(param->name, DEFAULTTIME2WAIT)) {
1149 if (acceptor_value > proposer_value) {
1150 sprintf(buf, "%u", acceptor_value);
1151 if (iscsi_update_param_value(param,
1152 &buf[0]) < 0)
1153 return -1;
1154 } else {
1155 if (iscsi_update_param_value(param, value) < 0)
1156 return -1;
1157 }
1158 } else {
1159 if (iscsi_update_param_value(param, value) < 0)
1160 return -1;
1161 }
1162
1163 if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
1164 SET_PSTATE_REPLY_OPTIONAL(param);
1165 } else if (IS_TYPE_NUMBER_RANGE(param)) {
1166 negoitated_value = iscsi_get_value_from_number_range(
1167 param, value);
1168 if (!negoitated_value)
1169 return -1;
1170 if (iscsi_update_param_value(param, negoitated_value) < 0)
1171 return -1;
1172 } else if (IS_TYPE_VALUE_LIST(param)) {
1173 negoitated_value = iscsi_check_valuelist_for_support(
1174 param, value);
1175 if (!negoitated_value) {
1176 pr_err("Proposer's value list \"%s\" contains"
1177 " no valid values from Acceptor's value list"
1178 " \"%s\".\n", value, param->value);
1179 return -1;
1180 }
1181 if (iscsi_update_param_value(param, negoitated_value) < 0)
1182 return -1;
1183 } else if (IS_PHASE_DECLARATIVE(param)) {
1184 if (iscsi_update_param_value(param, value) < 0)
1185 return -1;
1186 SET_PSTATE_REPLY_OPTIONAL(param);
1187 }
1188
1189 return 0;
1190}
1191
1192static int iscsi_check_proposer_state(struct iscsi_param *param, char *value)
1193{
1194 if (IS_PSTATE_RESPONSE_GOT(param)) {
1195 pr_err("Received key \"%s\" twice, protocol error.\n",
1196 param->name);
1197 return -1;
1198 }
1199
1200 if (IS_TYPE_NUMBER_RANGE(param)) {
1201 u32 left_val = 0, right_val = 0, recieved_value = 0;
1202 char *left_val_ptr = NULL, *right_val_ptr = NULL;
1203 char *tilde_ptr = NULL, *tmp_ptr = NULL;
1204
1205 if (!strcmp(value, IRRELEVANT) || !strcmp(value, REJECT)) {
1206 if (iscsi_update_param_value(param, value) < 0)
1207 return -1;
1208 return 0;
1209 }
1210
1211 tilde_ptr = strchr(value, '~');
1212 if (tilde_ptr) {
1213 pr_err("Illegal \"~\" in response for \"%s\".\n",
1214 param->name);
1215 return -1;
1216 }
1217 tilde_ptr = strchr(param->value, '~');
1218 if (!tilde_ptr) {
1219 pr_err("Unable to locate numerical range"
1220 " indicator \"~\" for \"%s\".\n", param->name);
1221 return -1;
1222 }
1223 *tilde_ptr = '\0';
1224
1225 left_val_ptr = param->value;
1226 right_val_ptr = param->value + strlen(left_val_ptr) + 1;
1227 left_val = simple_strtoul(left_val_ptr, &tmp_ptr, 0);
1228 right_val = simple_strtoul(right_val_ptr, &tmp_ptr, 0);
1229 recieved_value = simple_strtoul(value, &tmp_ptr, 0);
1230
1231 *tilde_ptr = '~';
1232
1233 if ((recieved_value < left_val) ||
1234 (recieved_value > right_val)) {
1235 pr_err("Illegal response \"%s=%u\", value must"
1236 " be between %u and %u.\n", param->name,
1237 recieved_value, left_val, right_val);
1238 return -1;
1239 }
1240 } else if (IS_TYPE_VALUE_LIST(param)) {
1241 char *comma_ptr = NULL, *tmp_ptr = NULL;
1242
1243 comma_ptr = strchr(value, ',');
1244 if (comma_ptr) {
1245 pr_err("Illegal \",\" in response for \"%s\".\n",
1246 param->name);
1247 return -1;
1248 }
1249
1250 tmp_ptr = iscsi_check_valuelist_for_support(param, value);
1251 if (!tmp_ptr)
1252 return -1;
1253 }
1254
1255 if (iscsi_update_param_value(param, value) < 0)
1256 return -1;
1257
1258 return 0;
1259}
1260
1261static int iscsi_check_value(struct iscsi_param *param, char *value)
1262{
1263 char *comma_ptr = NULL;
1264
1265 if (!strcmp(value, REJECT)) {
1266 if (!strcmp(param->name, IFMARKINT) ||
1267 !strcmp(param->name, OFMARKINT)) {
1268 /*
1269 * Reject is not fatal for [I,O]FMarkInt, and causes
1270 * [I,O]FMarker to be reset to No. (See iSCSI v20 A.3.2)
1271 */
1272 SET_PSTATE_REJECT(param);
1273 return 0;
1274 }
1275 pr_err("Received %s=%s\n", param->name, value);
1276 return -1;
1277 }
1278 if (!strcmp(value, IRRELEVANT)) {
1279 pr_debug("Received %s=%s\n", param->name, value);
1280 SET_PSTATE_IRRELEVANT(param);
1281 return 0;
1282 }
1283 if (!strcmp(value, NOTUNDERSTOOD)) {
1284 if (!IS_PSTATE_PROPOSER(param)) {
1285 pr_err("Received illegal offer %s=%s\n",
1286 param->name, value);
1287 return -1;
1288 }
1289
1290/* #warning FIXME: Add check for X-ExtensionKey here */
1291 pr_err("Standard iSCSI key \"%s\" cannot be answered"
1292 " with \"%s\", protocol error.\n", param->name, value);
1293 return -1;
1294 }
1295
1296 do {
1297 comma_ptr = NULL;
1298 comma_ptr = strchr(value, ',');
1299
1300 if (comma_ptr && !IS_TYPE_VALUE_LIST(param)) {
1301 pr_err("Detected value seperator \",\", but"
1302 " key \"%s\" does not allow a value list,"
1303 " protocol error.\n", param->name);
1304 return -1;
1305 }
1306 if (comma_ptr)
1307 *comma_ptr = '\0';
1308
1309 if (strlen(value) > VALUE_MAXLEN) {
1310 pr_err("Value for key \"%s\" exceeds %d,"
1311 " protocol error.\n", param->name,
1312 VALUE_MAXLEN);
1313 return -1;
1314 }
1315
1316 if (IS_TYPE_BOOL_AND(param) || IS_TYPE_BOOL_OR(param)) {
1317 if (iscsi_check_boolean_value(param, value) < 0)
1318 return -1;
1319 } else if (IS_TYPE_NUMBER(param)) {
1320 if (iscsi_check_numerical_value(param, value) < 0)
1321 return -1;
1322 } else if (IS_TYPE_NUMBER_RANGE(param)) {
1323 if (iscsi_check_numerical_range_value(param, value) < 0)
1324 return -1;
1325 } else if (IS_TYPE_STRING(param) || IS_TYPE_VALUE_LIST(param)) {
1326 if (iscsi_check_string_or_list_value(param, value) < 0)
1327 return -1;
1328 } else {
1329 pr_err("Huh? 0x%02x\n", param->type);
1330 return -1;
1331 }
1332
1333 if (comma_ptr)
1334 *comma_ptr++ = ',';
1335
1336 value = comma_ptr;
1337 } while (value);
1338
1339 return 0;
1340}
1341
1342static struct iscsi_param *__iscsi_check_key(
1343 char *key,
1344 int sender,
1345 struct iscsi_param_list *param_list)
1346{
1347 struct iscsi_param *param;
1348
1349 if (strlen(key) > KEY_MAXLEN) {
1350 pr_err("Length of key name \"%s\" exceeds %d.\n",
1351 key, KEY_MAXLEN);
1352 return NULL;
1353 }
1354
1355 param = iscsi_find_param_from_key(key, param_list);
1356 if (!param)
1357 return NULL;
1358
1359 if ((sender & SENDER_INITIATOR) && !IS_SENDER_INITIATOR(param)) {
1360 pr_err("Key \"%s\" may not be sent to %s,"
1361 " protocol error.\n", param->name,
1362 (sender & SENDER_RECEIVER) ? "target" : "initiator");
1363 return NULL;
1364 }
1365
1366 if ((sender & SENDER_TARGET) && !IS_SENDER_TARGET(param)) {
1367 pr_err("Key \"%s\" may not be sent to %s,"
1368 " protocol error.\n", param->name,
1369 (sender & SENDER_RECEIVER) ? "initiator" : "target");
1370 return NULL;
1371 }
1372
1373 return param;
1374}
1375
1376static struct iscsi_param *iscsi_check_key(
1377 char *key,
1378 int phase,
1379 int sender,
1380 struct iscsi_param_list *param_list)
1381{
1382 struct iscsi_param *param;
1383 /*
1384 * Key name length must not exceed 63 bytes. (See iSCSI v20 5.1)
1385 */
1386 if (strlen(key) > KEY_MAXLEN) {
1387 pr_err("Length of key name \"%s\" exceeds %d.\n",
1388 key, KEY_MAXLEN);
1389 return NULL;
1390 }
1391
1392 param = iscsi_find_param_from_key(key, param_list);
1393 if (!param)
1394 return NULL;
1395
1396 if ((sender & SENDER_INITIATOR) && !IS_SENDER_INITIATOR(param)) {
1397 pr_err("Key \"%s\" may not be sent to %s,"
1398 " protocol error.\n", param->name,
1399 (sender & SENDER_RECEIVER) ? "target" : "initiator");
1400 return NULL;
1401 }
1402 if ((sender & SENDER_TARGET) && !IS_SENDER_TARGET(param)) {
1403 pr_err("Key \"%s\" may not be sent to %s,"
1404 " protocol error.\n", param->name,
1405 (sender & SENDER_RECEIVER) ? "initiator" : "target");
1406 return NULL;
1407 }
1408
1409 if (IS_PSTATE_ACCEPTOR(param)) {
1410 pr_err("Key \"%s\" received twice, protocol error.\n",
1411 key);
1412 return NULL;
1413 }
1414
1415 if (!phase)
1416 return param;
1417
1418 if (!(param->phase & phase)) {
1419 pr_err("Key \"%s\" may not be negotiated during ",
1420 param->name);
1421 switch (phase) {
1422 case PHASE_SECURITY:
1423 pr_debug("Security phase.\n");
1424 break;
1425 case PHASE_OPERATIONAL:
1426 pr_debug("Operational phase.\n");
1427 default:
1428 pr_debug("Unknown phase.\n");
1429 }
1430 return NULL;
1431 }
1432
1433 return param;
1434}
1435
1436static int iscsi_enforce_integrity_rules(
1437 u8 phase,
1438 struct iscsi_param_list *param_list)
1439{
1440 char *tmpptr;
1441 u8 DataSequenceInOrder = 0;
1442 u8 ErrorRecoveryLevel = 0, SessionType = 0;
1443 u8 IFMarker = 0, OFMarker = 0;
1444 u8 IFMarkInt_Reject = 0, OFMarkInt_Reject = 0;
1445 u32 FirstBurstLength = 0, MaxBurstLength = 0;
1446 struct iscsi_param *param = NULL;
1447
1448 list_for_each_entry(param, &param_list->param_list, p_list) {
1449 if (!(param->phase & phase))
1450 continue;
1451 if (!strcmp(param->name, SESSIONTYPE))
1452 if (!strcmp(param->value, NORMAL))
1453 SessionType = 1;
1454 if (!strcmp(param->name, ERRORRECOVERYLEVEL))
1455 ErrorRecoveryLevel = simple_strtoul(param->value,
1456 &tmpptr, 0);
1457 if (!strcmp(param->name, DATASEQUENCEINORDER))
1458 if (!strcmp(param->value, YES))
1459 DataSequenceInOrder = 1;
1460 if (!strcmp(param->name, MAXBURSTLENGTH))
1461 MaxBurstLength = simple_strtoul(param->value,
1462 &tmpptr, 0);
1463 if (!strcmp(param->name, IFMARKER))
1464 if (!strcmp(param->value, YES))
1465 IFMarker = 1;
1466 if (!strcmp(param->name, OFMARKER))
1467 if (!strcmp(param->value, YES))
1468 OFMarker = 1;
1469 if (!strcmp(param->name, IFMARKINT))
1470 if (!strcmp(param->value, REJECT))
1471 IFMarkInt_Reject = 1;
1472 if (!strcmp(param->name, OFMARKINT))
1473 if (!strcmp(param->value, REJECT))
1474 OFMarkInt_Reject = 1;
1475 }
1476
1477 list_for_each_entry(param, &param_list->param_list, p_list) {
1478 if (!(param->phase & phase))
1479 continue;
1480 if (!SessionType && (!IS_PSTATE_ACCEPTOR(param) &&
1481 (strcmp(param->name, IFMARKER) &&
1482 strcmp(param->name, OFMARKER) &&
1483 strcmp(param->name, IFMARKINT) &&
1484 strcmp(param->name, OFMARKINT))))
1485 continue;
1486 if (!strcmp(param->name, MAXOUTSTANDINGR2T) &&
1487 DataSequenceInOrder && (ErrorRecoveryLevel > 0)) {
1488 if (strcmp(param->value, "1")) {
1489 if (iscsi_update_param_value(param, "1") < 0)
1490 return -1;
1491 pr_debug("Reset \"%s\" to \"%s\".\n",
1492 param->name, param->value);
1493 }
1494 }
1495 if (!strcmp(param->name, MAXCONNECTIONS) && !SessionType) {
1496 if (strcmp(param->value, "1")) {
1497 if (iscsi_update_param_value(param, "1") < 0)
1498 return -1;
1499 pr_debug("Reset \"%s\" to \"%s\".\n",
1500 param->name, param->value);
1501 }
1502 }
1503 if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
1504 FirstBurstLength = simple_strtoul(param->value,
1505 &tmpptr, 0);
1506 if (FirstBurstLength > MaxBurstLength) {
1507 char tmpbuf[10];
1508 memset(tmpbuf, 0, 10);
1509 sprintf(tmpbuf, "%u", MaxBurstLength);
1510 if (iscsi_update_param_value(param, tmpbuf))
1511 return -1;
1512 pr_debug("Reset \"%s\" to \"%s\".\n",
1513 param->name, param->value);
1514 }
1515 }
1516 if (!strcmp(param->name, IFMARKER) && IFMarkInt_Reject) {
1517 if (iscsi_update_param_value(param, NO) < 0)
1518 return -1;
1519 IFMarker = 0;
1520 pr_debug("Reset \"%s\" to \"%s\".\n",
1521 param->name, param->value);
1522 }
1523 if (!strcmp(param->name, OFMARKER) && OFMarkInt_Reject) {
1524 if (iscsi_update_param_value(param, NO) < 0)
1525 return -1;
1526 OFMarker = 0;
1527 pr_debug("Reset \"%s\" to \"%s\".\n",
1528 param->name, param->value);
1529 }
1530 if (!strcmp(param->name, IFMARKINT) && !IFMarker) {
1531 if (!strcmp(param->value, REJECT))
1532 continue;
1533 param->state &= ~PSTATE_NEGOTIATE;
1534 if (iscsi_update_param_value(param, IRRELEVANT) < 0)
1535 return -1;
1536 pr_debug("Reset \"%s\" to \"%s\".\n",
1537 param->name, param->value);
1538 }
1539 if (!strcmp(param->name, OFMARKINT) && !OFMarker) {
1540 if (!strcmp(param->value, REJECT))
1541 continue;
1542 param->state &= ~PSTATE_NEGOTIATE;
1543 if (iscsi_update_param_value(param, IRRELEVANT) < 0)
1544 return -1;
1545 pr_debug("Reset \"%s\" to \"%s\".\n",
1546 param->name, param->value);
1547 }
1548 }
1549
1550 return 0;
1551}
1552
1553int iscsi_decode_text_input(
1554 u8 phase,
1555 u8 sender,
1556 char *textbuf,
1557 u32 length,
1558 struct iscsi_param_list *param_list)
1559{
1560 char *tmpbuf, *start = NULL, *end = NULL;
1561
1562 tmpbuf = kzalloc(length + 1, GFP_KERNEL);
1563 if (!tmpbuf) {
1564 pr_err("Unable to allocate memory for tmpbuf.\n");
1565 return -1;
1566 }
1567
1568 memcpy(tmpbuf, textbuf, length);
1569 tmpbuf[length] = '\0';
1570 start = tmpbuf;
1571 end = (start + length);
1572
1573 while (start < end) {
1574 char *key, *value;
1575 struct iscsi_param *param;
1576
1577 if (iscsi_extract_key_value(start, &key, &value) < 0) {
1578 kfree(tmpbuf);
1579 return -1;
1580 }
1581
1582 pr_debug("Got key: %s=%s\n", key, value);
1583
1584 if (phase & PHASE_SECURITY) {
1585 if (iscsi_check_for_auth_key(key) > 0) {
1586 char *tmpptr = key + strlen(key);
1587 *tmpptr = '=';
1588 kfree(tmpbuf);
1589 return 1;
1590 }
1591 }
1592
1593 param = iscsi_check_key(key, phase, sender, param_list);
1594 if (!param) {
1595 if (iscsi_add_notunderstood_response(key,
1596 value, param_list) < 0) {
1597 kfree(tmpbuf);
1598 return -1;
1599 }
1600 start += strlen(key) + strlen(value) + 2;
1601 continue;
1602 }
1603 if (iscsi_check_value(param, value) < 0) {
1604 kfree(tmpbuf);
1605 return -1;
1606 }
1607
1608 start += strlen(key) + strlen(value) + 2;
1609
1610 if (IS_PSTATE_PROPOSER(param)) {
1611 if (iscsi_check_proposer_state(param, value) < 0) {
1612 kfree(tmpbuf);
1613 return -1;
1614 }
1615 SET_PSTATE_RESPONSE_GOT(param);
1616 } else {
1617 if (iscsi_check_acceptor_state(param, value) < 0) {
1618 kfree(tmpbuf);
1619 return -1;
1620 }
1621 SET_PSTATE_ACCEPTOR(param);
1622 }
1623 }
1624
1625 kfree(tmpbuf);
1626 return 0;
1627}
1628
1629int iscsi_encode_text_output(
1630 u8 phase,
1631 u8 sender,
1632 char *textbuf,
1633 u32 *length,
1634 struct iscsi_param_list *param_list)
1635{
1636 char *output_buf = NULL;
1637 struct iscsi_extra_response *er;
1638 struct iscsi_param *param;
1639
1640 output_buf = textbuf + *length;
1641
1642 if (iscsi_enforce_integrity_rules(phase, param_list) < 0)
1643 return -1;
1644
1645 list_for_each_entry(param, &param_list->param_list, p_list) {
1646 if (!(param->sender & sender))
1647 continue;
1648 if (IS_PSTATE_ACCEPTOR(param) &&
1649 !IS_PSTATE_RESPONSE_SENT(param) &&
1650 !IS_PSTATE_REPLY_OPTIONAL(param) &&
1651 (param->phase & phase)) {
1652 *length += sprintf(output_buf, "%s=%s",
1653 param->name, param->value);
1654 *length += 1;
1655 output_buf = textbuf + *length;
1656 SET_PSTATE_RESPONSE_SENT(param);
1657 pr_debug("Sending key: %s=%s\n",
1658 param->name, param->value);
1659 continue;
1660 }
1661 if (IS_PSTATE_NEGOTIATE(param) &&
1662 !IS_PSTATE_ACCEPTOR(param) &&
1663 !IS_PSTATE_PROPOSER(param) &&
1664 (param->phase & phase)) {
1665 *length += sprintf(output_buf, "%s=%s",
1666 param->name, param->value);
1667 *length += 1;
1668 output_buf = textbuf + *length;
1669 SET_PSTATE_PROPOSER(param);
1670 iscsi_check_proposer_for_optional_reply(param);
1671 pr_debug("Sending key: %s=%s\n",
1672 param->name, param->value);
1673 }
1674 }
1675
1676 list_for_each_entry(er, &param_list->extra_response_list, er_list) {
1677 *length += sprintf(output_buf, "%s=%s", er->key, er->value);
1678 *length += 1;
1679 output_buf = textbuf + *length;
1680 pr_debug("Sending key: %s=%s\n", er->key, er->value);
1681 }
1682 iscsi_release_extra_responses(param_list);
1683
1684 return 0;
1685}
1686
1687int iscsi_check_negotiated_keys(struct iscsi_param_list *param_list)
1688{
1689 int ret = 0;
1690 struct iscsi_param *param;
1691
1692 list_for_each_entry(param, &param_list->param_list, p_list) {
1693 if (IS_PSTATE_NEGOTIATE(param) &&
1694 IS_PSTATE_PROPOSER(param) &&
1695 !IS_PSTATE_RESPONSE_GOT(param) &&
1696 !IS_PSTATE_REPLY_OPTIONAL(param) &&
1697 !IS_PHASE_DECLARATIVE(param)) {
1698 pr_err("No response for proposed key \"%s\".\n",
1699 param->name);
1700 ret = -1;
1701 }
1702 }
1703
1704 return ret;
1705}
1706
1707int iscsi_change_param_value(
1708 char *keyvalue,
1709 struct iscsi_param_list *param_list,
1710 int check_key)
1711{
1712 char *key = NULL, *value = NULL;
1713 struct iscsi_param *param;
1714 int sender = 0;
1715
1716 if (iscsi_extract_key_value(keyvalue, &key, &value) < 0)
1717 return -1;
1718
1719 if (!check_key) {
1720 param = __iscsi_check_key(keyvalue, sender, param_list);
1721 if (!param)
1722 return -1;
1723 } else {
1724 param = iscsi_check_key(keyvalue, 0, sender, param_list);
1725 if (!param)
1726 return -1;
1727
1728 param->set_param = 1;
1729 if (iscsi_check_value(param, value) < 0) {
1730 param->set_param = 0;
1731 return -1;
1732 }
1733 param->set_param = 0;
1734 }
1735
1736 if (iscsi_update_param_value(param, value) < 0)
1737 return -1;
1738
1739 return 0;
1740}
1741
1742void iscsi_set_connection_parameters(
1743 struct iscsi_conn_ops *ops,
1744 struct iscsi_param_list *param_list)
1745{
1746 char *tmpptr;
1747 struct iscsi_param *param;
1748
1749 pr_debug("---------------------------------------------------"
1750 "---------------\n");
1751 list_for_each_entry(param, &param_list->param_list, p_list) {
1752 if (!IS_PSTATE_ACCEPTOR(param) && !IS_PSTATE_PROPOSER(param))
1753 continue;
1754 if (!strcmp(param->name, AUTHMETHOD)) {
1755 pr_debug("AuthMethod: %s\n",
1756 param->value);
1757 } else if (!strcmp(param->name, HEADERDIGEST)) {
1758 ops->HeaderDigest = !strcmp(param->value, CRC32C);
1759 pr_debug("HeaderDigest: %s\n",
1760 param->value);
1761 } else if (!strcmp(param->name, DATADIGEST)) {
1762 ops->DataDigest = !strcmp(param->value, CRC32C);
1763 pr_debug("DataDigest: %s\n",
1764 param->value);
1765 } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
1766 ops->MaxRecvDataSegmentLength =
1767 simple_strtoul(param->value, &tmpptr, 0);
1768 pr_debug("MaxRecvDataSegmentLength: %s\n",
1769 param->value);
1770 } else if (!strcmp(param->name, OFMARKER)) {
1771 ops->OFMarker = !strcmp(param->value, YES);
1772 pr_debug("OFMarker: %s\n",
1773 param->value);
1774 } else if (!strcmp(param->name, IFMARKER)) {
1775 ops->IFMarker = !strcmp(param->value, YES);
1776 pr_debug("IFMarker: %s\n",
1777 param->value);
1778 } else if (!strcmp(param->name, OFMARKINT)) {
1779 ops->OFMarkInt =
1780 simple_strtoul(param->value, &tmpptr, 0);
1781 pr_debug("OFMarkInt: %s\n",
1782 param->value);
1783 } else if (!strcmp(param->name, IFMARKINT)) {
1784 ops->IFMarkInt =
1785 simple_strtoul(param->value, &tmpptr, 0);
1786 pr_debug("IFMarkInt: %s\n",
1787 param->value);
1788 }
1789 }
1790 pr_debug("----------------------------------------------------"
1791 "--------------\n");
1792}
1793
1794void iscsi_set_session_parameters(
1795 struct iscsi_sess_ops *ops,
1796 struct iscsi_param_list *param_list,
1797 int leading)
1798{
1799 char *tmpptr;
1800 struct iscsi_param *param;
1801
1802 pr_debug("----------------------------------------------------"
1803 "--------------\n");
1804 list_for_each_entry(param, &param_list->param_list, p_list) {
1805 if (!IS_PSTATE_ACCEPTOR(param) && !IS_PSTATE_PROPOSER(param))
1806 continue;
1807 if (!strcmp(param->name, INITIATORNAME)) {
1808 if (!param->value)
1809 continue;
1810 if (leading)
1811 snprintf(ops->InitiatorName,
1812 sizeof(ops->InitiatorName),
1813 "%s", param->value);
1814 pr_debug("InitiatorName: %s\n",
1815 param->value);
1816 } else if (!strcmp(param->name, INITIATORALIAS)) {
1817 if (!param->value)
1818 continue;
1819 snprintf(ops->InitiatorAlias,
1820 sizeof(ops->InitiatorAlias),
1821 "%s", param->value);
1822 pr_debug("InitiatorAlias: %s\n",
1823 param->value);
1824 } else if (!strcmp(param->name, TARGETNAME)) {
1825 if (!param->value)
1826 continue;
1827 if (leading)
1828 snprintf(ops->TargetName,
1829 sizeof(ops->TargetName),
1830 "%s", param->value);
1831 pr_debug("TargetName: %s\n",
1832 param->value);
1833 } else if (!strcmp(param->name, TARGETALIAS)) {
1834 if (!param->value)
1835 continue;
1836 snprintf(ops->TargetAlias, sizeof(ops->TargetAlias),
1837 "%s", param->value);
1838 pr_debug("TargetAlias: %s\n",
1839 param->value);
1840 } else if (!strcmp(param->name, TARGETPORTALGROUPTAG)) {
1841 ops->TargetPortalGroupTag =
1842 simple_strtoul(param->value, &tmpptr, 0);
1843 pr_debug("TargetPortalGroupTag: %s\n",
1844 param->value);
1845 } else if (!strcmp(param->name, MAXCONNECTIONS)) {
1846 ops->MaxConnections =
1847 simple_strtoul(param->value, &tmpptr, 0);
1848 pr_debug("MaxConnections: %s\n",
1849 param->value);
1850 } else if (!strcmp(param->name, INITIALR2T)) {
1851 ops->InitialR2T = !strcmp(param->value, YES);
1852 pr_debug("InitialR2T: %s\n",
1853 param->value);
1854 } else if (!strcmp(param->name, IMMEDIATEDATA)) {
1855 ops->ImmediateData = !strcmp(param->value, YES);
1856 pr_debug("ImmediateData: %s\n",
1857 param->value);
1858 } else if (!strcmp(param->name, MAXBURSTLENGTH)) {
1859 ops->MaxBurstLength =
1860 simple_strtoul(param->value, &tmpptr, 0);
1861 pr_debug("MaxBurstLength: %s\n",
1862 param->value);
1863 } else if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
1864 ops->FirstBurstLength =
1865 simple_strtoul(param->value, &tmpptr, 0);
1866 pr_debug("FirstBurstLength: %s\n",
1867 param->value);
1868 } else if (!strcmp(param->name, DEFAULTTIME2WAIT)) {
1869 ops->DefaultTime2Wait =
1870 simple_strtoul(param->value, &tmpptr, 0);
1871 pr_debug("DefaultTime2Wait: %s\n",
1872 param->value);
1873 } else if (!strcmp(param->name, DEFAULTTIME2RETAIN)) {
1874 ops->DefaultTime2Retain =
1875 simple_strtoul(param->value, &tmpptr, 0);
1876 pr_debug("DefaultTime2Retain: %s\n",
1877 param->value);
1878 } else if (!strcmp(param->name, MAXOUTSTANDINGR2T)) {
1879 ops->MaxOutstandingR2T =
1880 simple_strtoul(param->value, &tmpptr, 0);
1881 pr_debug("MaxOutstandingR2T: %s\n",
1882 param->value);
1883 } else if (!strcmp(param->name, DATAPDUINORDER)) {
1884 ops->DataPDUInOrder = !strcmp(param->value, YES);
1885 pr_debug("DataPDUInOrder: %s\n",
1886 param->value);
1887 } else if (!strcmp(param->name, DATASEQUENCEINORDER)) {
1888 ops->DataSequenceInOrder = !strcmp(param->value, YES);
1889 pr_debug("DataSequenceInOrder: %s\n",
1890 param->value);
1891 } else if (!strcmp(param->name, ERRORRECOVERYLEVEL)) {
1892 ops->ErrorRecoveryLevel =
1893 simple_strtoul(param->value, &tmpptr, 0);
1894 pr_debug("ErrorRecoveryLevel: %s\n",
1895 param->value);
1896 } else if (!strcmp(param->name, SESSIONTYPE)) {
1897 ops->SessionType = !strcmp(param->value, DISCOVERY);
1898 pr_debug("SessionType: %s\n",
1899 param->value);
1900 }
1901 }
1902 pr_debug("----------------------------------------------------"
1903 "--------------\n");
1904
1905}
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
new file mode 100644
index 000000000000..6a37fd6f1285
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -0,0 +1,269 @@
1#ifndef ISCSI_PARAMETERS_H
2#define ISCSI_PARAMETERS_H
3
4struct iscsi_extra_response {
5 char key[64];
6 char value[32];
7 struct list_head er_list;
8} ____cacheline_aligned;
9
10struct iscsi_param {
11 char *name;
12 char *value;
13 u8 set_param;
14 u8 phase;
15 u8 scope;
16 u8 sender;
17 u8 type;
18 u8 use;
19 u16 type_range;
20 u32 state;
21 struct list_head p_list;
22} ____cacheline_aligned;
23
24extern int iscsi_login_rx_data(struct iscsi_conn *, char *, int);
25extern int iscsi_login_tx_data(struct iscsi_conn *, char *, char *, int);
26extern void iscsi_dump_conn_ops(struct iscsi_conn_ops *);
27extern void iscsi_dump_sess_ops(struct iscsi_sess_ops *);
28extern void iscsi_print_params(struct iscsi_param_list *);
29extern int iscsi_create_default_params(struct iscsi_param_list **);
30extern int iscsi_set_keys_to_negotiate(int, struct iscsi_param_list *);
31extern int iscsi_set_keys_irrelevant_for_discovery(struct iscsi_param_list *);
32extern int iscsi_copy_param_list(struct iscsi_param_list **,
33 struct iscsi_param_list *, int);
34extern int iscsi_change_param_value(char *, struct iscsi_param_list *, int);
35extern void iscsi_release_param_list(struct iscsi_param_list *);
36extern struct iscsi_param *iscsi_find_param_from_key(char *, struct iscsi_param_list *);
37extern int iscsi_extract_key_value(char *, char **, char **);
38extern int iscsi_update_param_value(struct iscsi_param *, char *);
39extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_param_list *);
40extern int iscsi_encode_text_output(u8, u8, char *, u32 *,
41 struct iscsi_param_list *);
42extern int iscsi_check_negotiated_keys(struct iscsi_param_list *);
43extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *,
44 struct iscsi_param_list *);
45extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
46 struct iscsi_param_list *, int);
47
48#define YES "Yes"
49#define NO "No"
50#define ALL "All"
51#define IRRELEVANT "Irrelevant"
52#define NONE "None"
53#define NOTUNDERSTOOD "NotUnderstood"
54#define REJECT "Reject"
55
56/*
57 * The Parameter Names.
58 */
59#define AUTHMETHOD "AuthMethod"
60#define HEADERDIGEST "HeaderDigest"
61#define DATADIGEST "DataDigest"
62#define MAXCONNECTIONS "MaxConnections"
63#define SENDTARGETS "SendTargets"
64#define TARGETNAME "TargetName"
65#define INITIATORNAME "InitiatorName"
66#define TARGETALIAS "TargetAlias"
67#define INITIATORALIAS "InitiatorAlias"
68#define TARGETADDRESS "TargetAddress"
69#define TARGETPORTALGROUPTAG "TargetPortalGroupTag"
70#define INITIALR2T "InitialR2T"
71#define IMMEDIATEDATA "ImmediateData"
72#define MAXRECVDATASEGMENTLENGTH "MaxRecvDataSegmentLength"
73#define MAXBURSTLENGTH "MaxBurstLength"
74#define FIRSTBURSTLENGTH "FirstBurstLength"
75#define DEFAULTTIME2WAIT "DefaultTime2Wait"
76#define DEFAULTTIME2RETAIN "DefaultTime2Retain"
77#define MAXOUTSTANDINGR2T "MaxOutstandingR2T"
78#define DATAPDUINORDER "DataPDUInOrder"
79#define DATASEQUENCEINORDER "DataSequenceInOrder"
80#define ERRORRECOVERYLEVEL "ErrorRecoveryLevel"
81#define SESSIONTYPE "SessionType"
82#define IFMARKER "IFMarker"
83#define OFMARKER "OFMarker"
84#define IFMARKINT "IFMarkInt"
85#define OFMARKINT "OFMarkInt"
86#define X_EXTENSIONKEY "X-com.sbei.version"
87#define X_EXTENSIONKEY_CISCO_NEW "X-com.cisco.protocol"
88#define X_EXTENSIONKEY_CISCO_OLD "X-com.cisco.iscsi.draft"
89
90/*
91 * For AuthMethod.
92 */
93#define KRB5 "KRB5"
94#define SPKM1 "SPKM1"
95#define SPKM2 "SPKM2"
96#define SRP "SRP"
97#define CHAP "CHAP"
98
99/*
100 * Initial values for Parameter Negotiation.
101 */
102#define INITIAL_AUTHMETHOD CHAP
103#define INITIAL_HEADERDIGEST "CRC32C,None"
104#define INITIAL_DATADIGEST "CRC32C,None"
105#define INITIAL_MAXCONNECTIONS "1"
106#define INITIAL_SENDTARGETS ALL
107#define INITIAL_TARGETNAME "LIO.Target"
108#define INITIAL_INITIATORNAME "LIO.Initiator"
109#define INITIAL_TARGETALIAS "LIO Target"
110#define INITIAL_INITIATORALIAS "LIO Initiator"
111#define INITIAL_TARGETADDRESS "0.0.0.0:0000,0"
112#define INITIAL_TARGETPORTALGROUPTAG "1"
113#define INITIAL_INITIALR2T YES
114#define INITIAL_IMMEDIATEDATA YES
115#define INITIAL_MAXRECVDATASEGMENTLENGTH "8192"
116#define INITIAL_MAXBURSTLENGTH "262144"
117#define INITIAL_FIRSTBURSTLENGTH "65536"
118#define INITIAL_DEFAULTTIME2WAIT "2"
119#define INITIAL_DEFAULTTIME2RETAIN "20"
120#define INITIAL_MAXOUTSTANDINGR2T "1"
121#define INITIAL_DATAPDUINORDER YES
122#define INITIAL_DATASEQUENCEINORDER YES
123#define INITIAL_ERRORRECOVERYLEVEL "0"
124#define INITIAL_SESSIONTYPE NORMAL
125#define INITIAL_IFMARKER NO
126#define INITIAL_OFMARKER NO
127#define INITIAL_IFMARKINT "2048~65535"
128#define INITIAL_OFMARKINT "2048~65535"
129
130/*
131 * For [Header,Data]Digests.
132 */
133#define CRC32C "CRC32C"
134
135/*
136 * For SessionType.
137 */
138#define DISCOVERY "Discovery"
139#define NORMAL "Normal"
140
141/*
142 * struct iscsi_param->use
143 */
144#define USE_LEADING_ONLY 0x01
145#define USE_INITIAL_ONLY 0x02
146#define USE_ALL 0x04
147
148#define IS_USE_LEADING_ONLY(p) ((p)->use & USE_LEADING_ONLY)
149#define IS_USE_INITIAL_ONLY(p) ((p)->use & USE_INITIAL_ONLY)
150#define IS_USE_ALL(p) ((p)->use & USE_ALL)
151
152#define SET_USE_INITIAL_ONLY(p) ((p)->use |= USE_INITIAL_ONLY)
153
154/*
155 * struct iscsi_param->sender
156 */
157#define SENDER_INITIATOR 0x01
158#define SENDER_TARGET 0x02
159#define SENDER_BOTH 0x03
160/* Used in iscsi_check_key() */
161#define SENDER_RECEIVER 0x04
162
163#define IS_SENDER_INITIATOR(p) ((p)->sender & SENDER_INITIATOR)
164#define IS_SENDER_TARGET(p) ((p)->sender & SENDER_TARGET)
165#define IS_SENDER_BOTH(p) ((p)->sender & SENDER_BOTH)
166
167/*
168 * struct iscsi_param->scope
169 */
170#define SCOPE_CONNECTION_ONLY 0x01
171#define SCOPE_SESSION_WIDE 0x02
172
173#define IS_SCOPE_CONNECTION_ONLY(p) ((p)->scope & SCOPE_CONNECTION_ONLY)
174#define IS_SCOPE_SESSION_WIDE(p) ((p)->scope & SCOPE_SESSION_WIDE)
175
176/*
177 * struct iscsi_param->phase
178 */
179#define PHASE_SECURITY 0x01
180#define PHASE_OPERATIONAL 0x02
181#define PHASE_DECLARATIVE 0x04
182#define PHASE_FFP0 0x08
183
184#define IS_PHASE_SECURITY(p) ((p)->phase & PHASE_SECURITY)
185#define IS_PHASE_OPERATIONAL(p) ((p)->phase & PHASE_OPERATIONAL)
186#define IS_PHASE_DECLARATIVE(p) ((p)->phase & PHASE_DECLARATIVE)
187#define IS_PHASE_FFP0(p) ((p)->phase & PHASE_FFP0)
188
189/*
190 * struct iscsi_param->type
191 */
192#define TYPE_BOOL_AND 0x01
193#define TYPE_BOOL_OR 0x02
194#define TYPE_NUMBER 0x04
195#define TYPE_NUMBER_RANGE 0x08
196#define TYPE_STRING 0x10
197#define TYPE_VALUE_LIST 0x20
198
199#define IS_TYPE_BOOL_AND(p) ((p)->type & TYPE_BOOL_AND)
200#define IS_TYPE_BOOL_OR(p) ((p)->type & TYPE_BOOL_OR)
201#define IS_TYPE_NUMBER(p) ((p)->type & TYPE_NUMBER)
202#define IS_TYPE_NUMBER_RANGE(p) ((p)->type & TYPE_NUMBER_RANGE)
203#define IS_TYPE_STRING(p) ((p)->type & TYPE_STRING)
204#define IS_TYPE_VALUE_LIST(p) ((p)->type & TYPE_VALUE_LIST)
205
206/*
207 * struct iscsi_param->type_range
208 */
209#define TYPERANGE_BOOL_AND 0x0001
210#define TYPERANGE_BOOL_OR 0x0002
211#define TYPERANGE_0_TO_2 0x0004
212#define TYPERANGE_0_TO_3600 0x0008
213#define TYPERANGE_0_TO_32767 0x0010
214#define TYPERANGE_0_TO_65535 0x0020
215#define TYPERANGE_1_TO_65535 0x0040
216#define TYPERANGE_2_TO_3600 0x0080
217#define TYPERANGE_512_TO_16777215 0x0100
218#define TYPERANGE_AUTH 0x0200
219#define TYPERANGE_DIGEST 0x0400
220#define TYPERANGE_ISCSINAME 0x0800
221#define TYPERANGE_MARKINT 0x1000
222#define TYPERANGE_SESSIONTYPE 0x2000
223#define TYPERANGE_TARGETADDRESS 0x4000
224#define TYPERANGE_UTF8 0x8000
225
226#define IS_TYPERANGE_0_TO_2(p) ((p)->type_range & TYPERANGE_0_TO_2)
227#define IS_TYPERANGE_0_TO_3600(p) ((p)->type_range & TYPERANGE_0_TO_3600)
228#define IS_TYPERANGE_0_TO_32767(p) ((p)->type_range & TYPERANGE_0_TO_32767)
229#define IS_TYPERANGE_0_TO_65535(p) ((p)->type_range & TYPERANGE_0_TO_65535)
230#define IS_TYPERANGE_1_TO_65535(p) ((p)->type_range & TYPERANGE_1_TO_65535)
231#define IS_TYPERANGE_2_TO_3600(p) ((p)->type_range & TYPERANGE_2_TO_3600)
232#define IS_TYPERANGE_512_TO_16777215(p) ((p)->type_range & \
233 TYPERANGE_512_TO_16777215)
234#define IS_TYPERANGE_AUTH_PARAM(p) ((p)->type_range & TYPERANGE_AUTH)
235#define IS_TYPERANGE_DIGEST_PARAM(p) ((p)->type_range & TYPERANGE_DIGEST)
236#define IS_TYPERANGE_SESSIONTYPE(p) ((p)->type_range & \
237 TYPERANGE_SESSIONTYPE)
238
239/*
240 * struct iscsi_param->state
241 */
242#define PSTATE_ACCEPTOR 0x01
243#define PSTATE_NEGOTIATE 0x02
244#define PSTATE_PROPOSER 0x04
245#define PSTATE_IRRELEVANT 0x08
246#define PSTATE_REJECT 0x10
247#define PSTATE_REPLY_OPTIONAL 0x20
248#define PSTATE_RESPONSE_GOT 0x40
249#define PSTATE_RESPONSE_SENT 0x80
250
251#define IS_PSTATE_ACCEPTOR(p) ((p)->state & PSTATE_ACCEPTOR)
252#define IS_PSTATE_NEGOTIATE(p) ((p)->state & PSTATE_NEGOTIATE)
253#define IS_PSTATE_PROPOSER(p) ((p)->state & PSTATE_PROPOSER)
254#define IS_PSTATE_IRRELEVANT(p) ((p)->state & PSTATE_IRRELEVANT)
255#define IS_PSTATE_REJECT(p) ((p)->state & PSTATE_REJECT)
256#define IS_PSTATE_REPLY_OPTIONAL(p) ((p)->state & PSTATE_REPLY_OPTIONAL)
257#define IS_PSTATE_RESPONSE_GOT(p) ((p)->state & PSTATE_RESPONSE_GOT)
258#define IS_PSTATE_RESPONSE_SENT(p) ((p)->state & PSTATE_RESPONSE_SENT)
259
260#define SET_PSTATE_ACCEPTOR(p) ((p)->state |= PSTATE_ACCEPTOR)
261#define SET_PSTATE_NEGOTIATE(p) ((p)->state |= PSTATE_NEGOTIATE)
262#define SET_PSTATE_PROPOSER(p) ((p)->state |= PSTATE_PROPOSER)
263#define SET_PSTATE_IRRELEVANT(p) ((p)->state |= PSTATE_IRRELEVANT)
264#define SET_PSTATE_REJECT(p) ((p)->state |= PSTATE_REJECT)
265#define SET_PSTATE_REPLY_OPTIONAL(p) ((p)->state |= PSTATE_REPLY_OPTIONAL)
266#define SET_PSTATE_RESPONSE_GOT(p) ((p)->state |= PSTATE_RESPONSE_GOT)
267#define SET_PSTATE_RESPONSE_SENT(p) ((p)->state |= PSTATE_RESPONSE_SENT)
268
269#endif /* ISCSI_PARAMETERS_H */
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
new file mode 100644
index 000000000000..fc694082bfc0
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
@@ -0,0 +1,664 @@
1/*******************************************************************************
2 * This file contains main functions related to iSCSI DataSequenceInOrder=No
3 * and DataPDUInOrder=No.
4 *
5 \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 *
9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 ******************************************************************************/
21
22#include <linux/slab.h>
23#include <linux/random.h>
24
25#include "iscsi_target_core.h"
26#include "iscsi_target_util.h"
27#include "iscsi_target_seq_pdu_list.h"
28
29#define OFFLOAD_BUF_SIZE 32768
30
31void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
32{
33 int i;
34 struct iscsi_seq *seq;
35
36 pr_debug("Dumping Sequence List for ITT: 0x%08x:\n",
37 cmd->init_task_tag);
38
39 for (i = 0; i < cmd->seq_count; i++) {
40 seq = &cmd->seq_list[i];
41 pr_debug("i: %d, pdu_start: %d, pdu_count: %d,"
42 " offset: %d, xfer_len: %d, seq_send_order: %d,"
43 " seq_no: %d\n", i, seq->pdu_start, seq->pdu_count,
44 seq->offset, seq->xfer_len, seq->seq_send_order,
45 seq->seq_no);
46 }
47}
48
49void iscsit_dump_pdu_list(struct iscsi_cmd *cmd)
50{
51 int i;
52 struct iscsi_pdu *pdu;
53
54 pr_debug("Dumping PDU List for ITT: 0x%08x:\n",
55 cmd->init_task_tag);
56
57 for (i = 0; i < cmd->pdu_count; i++) {
58 pdu = &cmd->pdu_list[i];
59 pr_debug("i: %d, offset: %d, length: %d,"
60 " pdu_send_order: %d, seq_no: %d\n", i, pdu->offset,
61 pdu->length, pdu->pdu_send_order, pdu->seq_no);
62 }
63}
64
65static void iscsit_ordered_seq_lists(
66 struct iscsi_cmd *cmd,
67 u8 type)
68{
69 u32 i, seq_count = 0;
70
71 for (i = 0; i < cmd->seq_count; i++) {
72 if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
73 continue;
74 cmd->seq_list[i].seq_send_order = seq_count++;
75 }
76}
77
78static void iscsit_ordered_pdu_lists(
79 struct iscsi_cmd *cmd,
80 u8 type)
81{
82 u32 i, pdu_send_order = 0, seq_no = 0;
83
84 for (i = 0; i < cmd->pdu_count; i++) {
85redo:
86 if (cmd->pdu_list[i].seq_no == seq_no) {
87 cmd->pdu_list[i].pdu_send_order = pdu_send_order++;
88 continue;
89 }
90 seq_no++;
91 pdu_send_order = 0;
92 goto redo;
93 }
94}
95
96/*
97 * Generate count random values into array.
98 * Use 0x80000000 to mark generates valued in array[].
99 */
100static void iscsit_create_random_array(u32 *array, u32 count)
101{
102 int i, j, k;
103
104 if (count == 1) {
105 array[0] = 0;
106 return;
107 }
108
109 for (i = 0; i < count; i++) {
110redo:
111 get_random_bytes(&j, sizeof(u32));
112 j = (1 + (int) (9999 + 1) - j) % count;
113 for (k = 0; k < i + 1; k++) {
114 j |= 0x80000000;
115 if ((array[k] & 0x80000000) && (array[k] == j))
116 goto redo;
117 }
118 array[i] = j;
119 }
120
121 for (i = 0; i < count; i++)
122 array[i] &= ~0x80000000;
123}
124
125static int iscsit_randomize_pdu_lists(
126 struct iscsi_cmd *cmd,
127 u8 type)
128{
129 int i = 0;
130 u32 *array, pdu_count, seq_count = 0, seq_no = 0, seq_offset = 0;
131
132 for (pdu_count = 0; pdu_count < cmd->pdu_count; pdu_count++) {
133redo:
134 if (cmd->pdu_list[pdu_count].seq_no == seq_no) {
135 seq_count++;
136 continue;
137 }
138 array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
139 if (!array) {
140 pr_err("Unable to allocate memory"
141 " for random array.\n");
142 return -1;
143 }
144 iscsit_create_random_array(array, seq_count);
145
146 for (i = 0; i < seq_count; i++)
147 cmd->pdu_list[seq_offset+i].pdu_send_order = array[i];
148
149 kfree(array);
150
151 seq_offset += seq_count;
152 seq_count = 0;
153 seq_no++;
154 goto redo;
155 }
156
157 if (seq_count) {
158 array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
159 if (!array) {
160 pr_err("Unable to allocate memory for"
161 " random array.\n");
162 return -1;
163 }
164 iscsit_create_random_array(array, seq_count);
165
166 for (i = 0; i < seq_count; i++)
167 cmd->pdu_list[seq_offset+i].pdu_send_order = array[i];
168
169 kfree(array);
170 }
171
172 return 0;
173}
174
175static int iscsit_randomize_seq_lists(
176 struct iscsi_cmd *cmd,
177 u8 type)
178{
179 int i, j = 0;
180 u32 *array, seq_count = cmd->seq_count;
181
182 if ((type == PDULIST_IMMEDIATE) || (type == PDULIST_UNSOLICITED))
183 seq_count--;
184 else if (type == PDULIST_IMMEDIATE_AND_UNSOLICITED)
185 seq_count -= 2;
186
187 if (!seq_count)
188 return 0;
189
190 array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
191 if (!array) {
192 pr_err("Unable to allocate memory for random array.\n");
193 return -1;
194 }
195 iscsit_create_random_array(array, seq_count);
196
197 for (i = 0; i < cmd->seq_count; i++) {
198 if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
199 continue;
200 cmd->seq_list[i].seq_send_order = array[j++];
201 }
202
203 kfree(array);
204 return 0;
205}
206
207static void iscsit_determine_counts_for_list(
208 struct iscsi_cmd *cmd,
209 struct iscsi_build_list *bl,
210 u32 *seq_count,
211 u32 *pdu_count)
212{
213 int check_immediate = 0;
214 u32 burstlength = 0, offset = 0;
215 u32 unsolicited_data_length = 0;
216 struct iscsi_conn *conn = cmd->conn;
217
218 if ((bl->type == PDULIST_IMMEDIATE) ||
219 (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
220 check_immediate = 1;
221
222 if ((bl->type == PDULIST_UNSOLICITED) ||
223 (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
224 unsolicited_data_length = (cmd->data_length >
225 conn->sess->sess_ops->FirstBurstLength) ?
226 conn->sess->sess_ops->FirstBurstLength : cmd->data_length;
227
228 while (offset < cmd->data_length) {
229 *pdu_count += 1;
230
231 if (check_immediate) {
232 check_immediate = 0;
233 offset += bl->immediate_data_length;
234 *seq_count += 1;
235 if (unsolicited_data_length)
236 unsolicited_data_length -=
237 bl->immediate_data_length;
238 continue;
239 }
240 if (unsolicited_data_length > 0) {
241 if ((offset + conn->conn_ops->MaxRecvDataSegmentLength)
242 >= cmd->data_length) {
243 unsolicited_data_length -=
244 (cmd->data_length - offset);
245 offset += (cmd->data_length - offset);
246 continue;
247 }
248 if ((offset + conn->conn_ops->MaxRecvDataSegmentLength)
249 >= conn->sess->sess_ops->FirstBurstLength) {
250 unsolicited_data_length -=
251 (conn->sess->sess_ops->FirstBurstLength -
252 offset);
253 offset += (conn->sess->sess_ops->FirstBurstLength -
254 offset);
255 burstlength = 0;
256 *seq_count += 1;
257 continue;
258 }
259
260 offset += conn->conn_ops->MaxRecvDataSegmentLength;
261 unsolicited_data_length -=
262 conn->conn_ops->MaxRecvDataSegmentLength;
263 continue;
264 }
265 if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
266 cmd->data_length) {
267 offset += (cmd->data_length - offset);
268 continue;
269 }
270 if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >=
271 conn->sess->sess_ops->MaxBurstLength) {
272 offset += (conn->sess->sess_ops->MaxBurstLength -
273 burstlength);
274 burstlength = 0;
275 *seq_count += 1;
276 continue;
277 }
278
279 burstlength += conn->conn_ops->MaxRecvDataSegmentLength;
280 offset += conn->conn_ops->MaxRecvDataSegmentLength;
281 }
282}
283
284
285/*
286 * Builds PDU and/or Sequence list, called while DataSequenceInOrder=No
287 * and DataPDUInOrder=No.
288 */
289static int iscsit_build_pdu_and_seq_list(
290 struct iscsi_cmd *cmd,
291 struct iscsi_build_list *bl)
292{
293 int check_immediate = 0, datapduinorder, datasequenceinorder;
294 u32 burstlength = 0, offset = 0, i = 0;
295 u32 pdu_count = 0, seq_no = 0, unsolicited_data_length = 0;
296 struct iscsi_conn *conn = cmd->conn;
297 struct iscsi_pdu *pdu = cmd->pdu_list;
298 struct iscsi_seq *seq = cmd->seq_list;
299
300 datapduinorder = conn->sess->sess_ops->DataPDUInOrder;
301 datasequenceinorder = conn->sess->sess_ops->DataSequenceInOrder;
302
303 if ((bl->type == PDULIST_IMMEDIATE) ||
304 (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
305 check_immediate = 1;
306
307 if ((bl->type == PDULIST_UNSOLICITED) ||
308 (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
309 unsolicited_data_length = (cmd->data_length >
310 conn->sess->sess_ops->FirstBurstLength) ?
311 conn->sess->sess_ops->FirstBurstLength : cmd->data_length;
312
313 while (offset < cmd->data_length) {
314 pdu_count++;
315 if (!datapduinorder) {
316 pdu[i].offset = offset;
317 pdu[i].seq_no = seq_no;
318 }
319 if (!datasequenceinorder && (pdu_count == 1)) {
320 seq[seq_no].pdu_start = i;
321 seq[seq_no].seq_no = seq_no;
322 seq[seq_no].offset = offset;
323 seq[seq_no].orig_offset = offset;
324 }
325
326 if (check_immediate) {
327 check_immediate = 0;
328 if (!datapduinorder) {
329 pdu[i].type = PDUTYPE_IMMEDIATE;
330 pdu[i++].length = bl->immediate_data_length;
331 }
332 if (!datasequenceinorder) {
333 seq[seq_no].type = SEQTYPE_IMMEDIATE;
334 seq[seq_no].pdu_count = 1;
335 seq[seq_no].xfer_len =
336 bl->immediate_data_length;
337 }
338 offset += bl->immediate_data_length;
339 pdu_count = 0;
340 seq_no++;
341 if (unsolicited_data_length)
342 unsolicited_data_length -=
343 bl->immediate_data_length;
344 continue;
345 }
346 if (unsolicited_data_length > 0) {
347 if ((offset +
348 conn->conn_ops->MaxRecvDataSegmentLength) >=
349 cmd->data_length) {
350 if (!datapduinorder) {
351 pdu[i].type = PDUTYPE_UNSOLICITED;
352 pdu[i].length =
353 (cmd->data_length - offset);
354 }
355 if (!datasequenceinorder) {
356 seq[seq_no].type = SEQTYPE_UNSOLICITED;
357 seq[seq_no].pdu_count = pdu_count;
358 seq[seq_no].xfer_len = (burstlength +
359 (cmd->data_length - offset));
360 }
361 unsolicited_data_length -=
362 (cmd->data_length - offset);
363 offset += (cmd->data_length - offset);
364 continue;
365 }
366 if ((offset +
367 conn->conn_ops->MaxRecvDataSegmentLength) >=
368 conn->sess->sess_ops->FirstBurstLength) {
369 if (!datapduinorder) {
370 pdu[i].type = PDUTYPE_UNSOLICITED;
371 pdu[i++].length =
372 (conn->sess->sess_ops->FirstBurstLength -
373 offset);
374 }
375 if (!datasequenceinorder) {
376 seq[seq_no].type = SEQTYPE_UNSOLICITED;
377 seq[seq_no].pdu_count = pdu_count;
378 seq[seq_no].xfer_len = (burstlength +
379 (conn->sess->sess_ops->FirstBurstLength -
380 offset));
381 }
382 unsolicited_data_length -=
383 (conn->sess->sess_ops->FirstBurstLength -
384 offset);
385 offset += (conn->sess->sess_ops->FirstBurstLength -
386 offset);
387 burstlength = 0;
388 pdu_count = 0;
389 seq_no++;
390 continue;
391 }
392
393 if (!datapduinorder) {
394 pdu[i].type = PDUTYPE_UNSOLICITED;
395 pdu[i++].length =
396 conn->conn_ops->MaxRecvDataSegmentLength;
397 }
398 burstlength += conn->conn_ops->MaxRecvDataSegmentLength;
399 offset += conn->conn_ops->MaxRecvDataSegmentLength;
400 unsolicited_data_length -=
401 conn->conn_ops->MaxRecvDataSegmentLength;
402 continue;
403 }
404 if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
405 cmd->data_length) {
406 if (!datapduinorder) {
407 pdu[i].type = PDUTYPE_NORMAL;
408 pdu[i].length = (cmd->data_length - offset);
409 }
410 if (!datasequenceinorder) {
411 seq[seq_no].type = SEQTYPE_NORMAL;
412 seq[seq_no].pdu_count = pdu_count;
413 seq[seq_no].xfer_len = (burstlength +
414 (cmd->data_length - offset));
415 }
416 offset += (cmd->data_length - offset);
417 continue;
418 }
419 if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >=
420 conn->sess->sess_ops->MaxBurstLength) {
421 if (!datapduinorder) {
422 pdu[i].type = PDUTYPE_NORMAL;
423 pdu[i++].length =
424 (conn->sess->sess_ops->MaxBurstLength -
425 burstlength);
426 }
427 if (!datasequenceinorder) {
428 seq[seq_no].type = SEQTYPE_NORMAL;
429 seq[seq_no].pdu_count = pdu_count;
430 seq[seq_no].xfer_len = (burstlength +
431 (conn->sess->sess_ops->MaxBurstLength -
432 burstlength));
433 }
434 offset += (conn->sess->sess_ops->MaxBurstLength -
435 burstlength);
436 burstlength = 0;
437 pdu_count = 0;
438 seq_no++;
439 continue;
440 }
441
442 if (!datapduinorder) {
443 pdu[i].type = PDUTYPE_NORMAL;
444 pdu[i++].length =
445 conn->conn_ops->MaxRecvDataSegmentLength;
446 }
447 burstlength += conn->conn_ops->MaxRecvDataSegmentLength;
448 offset += conn->conn_ops->MaxRecvDataSegmentLength;
449 }
450
451 if (!datasequenceinorder) {
452 if (bl->data_direction & ISCSI_PDU_WRITE) {
453 if (bl->randomize & RANDOM_R2T_OFFSETS) {
454 if (iscsit_randomize_seq_lists(cmd, bl->type)
455 < 0)
456 return -1;
457 } else
458 iscsit_ordered_seq_lists(cmd, bl->type);
459 } else if (bl->data_direction & ISCSI_PDU_READ) {
460 if (bl->randomize & RANDOM_DATAIN_SEQ_OFFSETS) {
461 if (iscsit_randomize_seq_lists(cmd, bl->type)
462 < 0)
463 return -1;
464 } else
465 iscsit_ordered_seq_lists(cmd, bl->type);
466 }
467#if 0
468 iscsit_dump_seq_list(cmd);
469#endif
470 }
471 if (!datapduinorder) {
472 if (bl->data_direction & ISCSI_PDU_WRITE) {
473 if (bl->randomize & RANDOM_DATAOUT_PDU_OFFSETS) {
474 if (iscsit_randomize_pdu_lists(cmd, bl->type)
475 < 0)
476 return -1;
477 } else
478 iscsit_ordered_pdu_lists(cmd, bl->type);
479 } else if (bl->data_direction & ISCSI_PDU_READ) {
480 if (bl->randomize & RANDOM_DATAIN_PDU_OFFSETS) {
481 if (iscsit_randomize_pdu_lists(cmd, bl->type)
482 < 0)
483 return -1;
484 } else
485 iscsit_ordered_pdu_lists(cmd, bl->type);
486 }
487#if 0
488 iscsit_dump_pdu_list(cmd);
489#endif
490 }
491
492 return 0;
493}
494
495/*
496 * Only called while DataSequenceInOrder=No or DataPDUInOrder=No.
497 */
498int iscsit_do_build_list(
499 struct iscsi_cmd *cmd,
500 struct iscsi_build_list *bl)
501{
502 u32 pdu_count = 0, seq_count = 1;
503 struct iscsi_conn *conn = cmd->conn;
504 struct iscsi_pdu *pdu = NULL;
505 struct iscsi_seq *seq = NULL;
506
507 iscsit_determine_counts_for_list(cmd, bl, &seq_count, &pdu_count);
508
509 if (!conn->sess->sess_ops->DataSequenceInOrder) {
510 seq = kzalloc(seq_count * sizeof(struct iscsi_seq), GFP_ATOMIC);
511 if (!seq) {
512 pr_err("Unable to allocate struct iscsi_seq list\n");
513 return -1;
514 }
515 cmd->seq_list = seq;
516 cmd->seq_count = seq_count;
517 }
518
519 if (!conn->sess->sess_ops->DataPDUInOrder) {
520 pdu = kzalloc(pdu_count * sizeof(struct iscsi_pdu), GFP_ATOMIC);
521 if (!pdu) {
522 pr_err("Unable to allocate struct iscsi_pdu list.\n");
523 kfree(seq);
524 return -1;
525 }
526 cmd->pdu_list = pdu;
527 cmd->pdu_count = pdu_count;
528 }
529
530 return iscsit_build_pdu_and_seq_list(cmd, bl);
531}
532
533struct iscsi_pdu *iscsit_get_pdu_holder(
534 struct iscsi_cmd *cmd,
535 u32 offset,
536 u32 length)
537{
538 u32 i;
539 struct iscsi_pdu *pdu = NULL;
540
541 if (!cmd->pdu_list) {
542 pr_err("struct iscsi_cmd->pdu_list is NULL!\n");
543 return NULL;
544 }
545
546 pdu = &cmd->pdu_list[0];
547
548 for (i = 0; i < cmd->pdu_count; i++)
549 if ((pdu[i].offset == offset) && (pdu[i].length == length))
550 return &pdu[i];
551
552 pr_err("Unable to locate PDU holder for ITT: 0x%08x, Offset:"
553 " %u, Length: %u\n", cmd->init_task_tag, offset, length);
554 return NULL;
555}
556
557struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(
558 struct iscsi_cmd *cmd,
559 struct iscsi_seq *seq)
560{
561 u32 i;
562 struct iscsi_conn *conn = cmd->conn;
563 struct iscsi_pdu *pdu = NULL;
564
565 if (!cmd->pdu_list) {
566 pr_err("struct iscsi_cmd->pdu_list is NULL!\n");
567 return NULL;
568 }
569
570 if (conn->sess->sess_ops->DataSequenceInOrder) {
571redo:
572 pdu = &cmd->pdu_list[cmd->pdu_start];
573
574 for (i = 0; pdu[i].seq_no != cmd->seq_no; i++) {
575#if 0
576 pr_debug("pdu[i].seq_no: %d, pdu[i].pdu"
577 "_send_order: %d, pdu[i].offset: %d,"
578 " pdu[i].length: %d\n", pdu[i].seq_no,
579 pdu[i].pdu_send_order, pdu[i].offset,
580 pdu[i].length);
581#endif
582 if (pdu[i].pdu_send_order == cmd->pdu_send_order) {
583 cmd->pdu_send_order++;
584 return &pdu[i];
585 }
586 }
587
588 cmd->pdu_start += cmd->pdu_send_order;
589 cmd->pdu_send_order = 0;
590 cmd->seq_no++;
591
592 if (cmd->pdu_start < cmd->pdu_count)
593 goto redo;
594
595 pr_err("Command ITT: 0x%08x unable to locate"
596 " struct iscsi_pdu for cmd->pdu_send_order: %u.\n",
597 cmd->init_task_tag, cmd->pdu_send_order);
598 return NULL;
599 } else {
600 if (!seq) {
601 pr_err("struct iscsi_seq is NULL!\n");
602 return NULL;
603 }
604#if 0
605 pr_debug("seq->pdu_start: %d, seq->pdu_count: %d,"
606 " seq->seq_no: %d\n", seq->pdu_start, seq->pdu_count,
607 seq->seq_no);
608#endif
609 pdu = &cmd->pdu_list[seq->pdu_start];
610
611 if (seq->pdu_send_order == seq->pdu_count) {
612 pr_err("Command ITT: 0x%08x seq->pdu_send"
613 "_order: %u equals seq->pdu_count: %u\n",
614 cmd->init_task_tag, seq->pdu_send_order,
615 seq->pdu_count);
616 return NULL;
617 }
618
619 for (i = 0; i < seq->pdu_count; i++) {
620 if (pdu[i].pdu_send_order == seq->pdu_send_order) {
621 seq->pdu_send_order++;
622 return &pdu[i];
623 }
624 }
625
626 pr_err("Command ITT: 0x%08x unable to locate iscsi"
627 "_pdu_t for seq->pdu_send_order: %u.\n",
628 cmd->init_task_tag, seq->pdu_send_order);
629 return NULL;
630 }
631
632 return NULL;
633}
634
635struct iscsi_seq *iscsit_get_seq_holder(
636 struct iscsi_cmd *cmd,
637 u32 offset,
638 u32 length)
639{
640 u32 i;
641
642 if (!cmd->seq_list) {
643 pr_err("struct iscsi_cmd->seq_list is NULL!\n");
644 return NULL;
645 }
646
647 for (i = 0; i < cmd->seq_count; i++) {
648#if 0
649 pr_debug("seq_list[i].orig_offset: %d, seq_list[i]."
650 "xfer_len: %d, seq_list[i].seq_no %u\n",
651 cmd->seq_list[i].orig_offset, cmd->seq_list[i].xfer_len,
652 cmd->seq_list[i].seq_no);
653#endif
654 if ((cmd->seq_list[i].orig_offset +
655 cmd->seq_list[i].xfer_len) >=
656 (offset + length))
657 return &cmd->seq_list[i];
658 }
659
660 pr_err("Unable to locate Sequence holder for ITT: 0x%08x,"
661 " Offset: %u, Length: %u\n", cmd->init_task_tag, offset,
662 length);
663 return NULL;
664}
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
new file mode 100644
index 000000000000..0d52a10e3069
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
@@ -0,0 +1,86 @@
1#ifndef ISCSI_SEQ_AND_PDU_LIST_H
2#define ISCSI_SEQ_AND_PDU_LIST_H
3
4/* struct iscsi_pdu->status */
5#define DATAOUT_PDU_SENT 1
6
7/* struct iscsi_seq->type */
8#define SEQTYPE_IMMEDIATE 1
9#define SEQTYPE_UNSOLICITED 2
10#define SEQTYPE_NORMAL 3
11
12/* struct iscsi_seq->status */
13#define DATAOUT_SEQUENCE_GOT_R2T 1
14#define DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY 2
15#define DATAOUT_SEQUENCE_COMPLETE 3
16
17/* iscsi_determine_counts_for_list() type */
18#define PDULIST_NORMAL 1
19#define PDULIST_IMMEDIATE 2
20#define PDULIST_UNSOLICITED 3
21#define PDULIST_IMMEDIATE_AND_UNSOLICITED 4
22
23/* struct iscsi_pdu->type */
24#define PDUTYPE_IMMEDIATE 1
25#define PDUTYPE_UNSOLICITED 2
26#define PDUTYPE_NORMAL 3
27
28/* struct iscsi_pdu->status */
29#define ISCSI_PDU_NOT_RECEIVED 0
30#define ISCSI_PDU_RECEIVED_OK 1
31#define ISCSI_PDU_CRC_FAILED 2
32#define ISCSI_PDU_TIMED_OUT 3
33
34/* struct iscsi_build_list->randomize */
35#define RANDOM_DATAIN_PDU_OFFSETS 0x01
36#define RANDOM_DATAIN_SEQ_OFFSETS 0x02
37#define RANDOM_DATAOUT_PDU_OFFSETS 0x04
38#define RANDOM_R2T_OFFSETS 0x08
39
40/* struct iscsi_build_list->data_direction */
41#define ISCSI_PDU_READ 0x01
42#define ISCSI_PDU_WRITE 0x02
43
44struct iscsi_build_list {
45 int data_direction;
46 int randomize;
47 int type;
48 int immediate_data_length;
49};
50
51struct iscsi_pdu {
52 int status;
53 int type;
54 u8 flags;
55 u32 data_sn;
56 u32 length;
57 u32 offset;
58 u32 pdu_send_order;
59 u32 seq_no;
60} ____cacheline_aligned;
61
62struct iscsi_seq {
63 int sent;
64 int status;
65 int type;
66 u32 data_sn;
67 u32 first_datasn;
68 u32 last_datasn;
69 u32 next_burst_len;
70 u32 pdu_start;
71 u32 pdu_count;
72 u32 offset;
73 u32 orig_offset;
74 u32 pdu_send_order;
75 u32 r2t_sn;
76 u32 seq_send_order;
77 u32 seq_no;
78 u32 xfer_len;
79} ____cacheline_aligned;
80
81extern int iscsit_do_build_list(struct iscsi_cmd *, struct iscsi_build_list *);
82extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsi_cmd *, u32, u32);
83extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsi_cmd *, struct iscsi_seq *);
84extern struct iscsi_seq *iscsit_get_seq_holder(struct iscsi_cmd *, u32, u32);
85
86#endif /* ISCSI_SEQ_AND_PDU_LIST_H */
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
new file mode 100644
index 000000000000..bbdbe9301b27
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -0,0 +1,950 @@
1/*******************************************************************************
2 * Modern ConfigFS group context specific iSCSI statistics based on original
3 * iscsi_target_mib.c code
4 *
5 * Copyright (c) 2011 Rising Tide Systems
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 *
9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 ******************************************************************************/
21
22#include <linux/configfs.h>
23#include <scsi/iscsi_proto.h>
24#include <target/target_core_base.h>
25#include <target/target_core_transport.h>
26#include <target/configfs_macros.h>
27
28#include "iscsi_target_core.h"
29#include "iscsi_target_parameters.h"
30#include "iscsi_target_device.h"
31#include "iscsi_target_tpg.h"
32#include "iscsi_target_util.h"
33#include "iscsi_target_stat.h"
34
35#ifndef INITIAL_JIFFIES
36#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
37#endif
38
39/* Instance Attributes Table */
40#define ISCSI_INST_NUM_NODES 1
41#define ISCSI_INST_DESCR "Storage Engine Target"
42#define ISCSI_INST_LAST_FAILURE_TYPE 0
43#define ISCSI_DISCONTINUITY_TIME 0
44
45#define ISCSI_NODE_INDEX 1
46
47#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
48
49/****************************************************************************
50 * iSCSI MIB Tables
51 ****************************************************************************/
52/*
53 * Instance Attributes Table
54 */
55CONFIGFS_EATTR_STRUCT(iscsi_stat_instance, iscsi_wwn_stat_grps);
56#define ISCSI_STAT_INSTANCE_ATTR(_name, _mode) \
57static struct iscsi_stat_instance_attribute \
58 iscsi_stat_instance_##_name = \
59 __CONFIGFS_EATTR(_name, _mode, \
60 iscsi_stat_instance_show_attr_##_name, \
61 iscsi_stat_instance_store_attr_##_name);
62
63#define ISCSI_STAT_INSTANCE_ATTR_RO(_name) \
64static struct iscsi_stat_instance_attribute \
65 iscsi_stat_instance_##_name = \
66 __CONFIGFS_EATTR_RO(_name, \
67 iscsi_stat_instance_show_attr_##_name);
68
69static ssize_t iscsi_stat_instance_show_attr_inst(
70 struct iscsi_wwn_stat_grps *igrps, char *page)
71{
72 struct iscsi_tiqn *tiqn = container_of(igrps,
73 struct iscsi_tiqn, tiqn_stat_grps);
74
75 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
76}
77ISCSI_STAT_INSTANCE_ATTR_RO(inst);
78
79static ssize_t iscsi_stat_instance_show_attr_min_ver(
80 struct iscsi_wwn_stat_grps *igrps, char *page)
81{
82 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
83}
84ISCSI_STAT_INSTANCE_ATTR_RO(min_ver);
85
86static ssize_t iscsi_stat_instance_show_attr_max_ver(
87 struct iscsi_wwn_stat_grps *igrps, char *page)
88{
89 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
90}
91ISCSI_STAT_INSTANCE_ATTR_RO(max_ver);
92
93static ssize_t iscsi_stat_instance_show_attr_portals(
94 struct iscsi_wwn_stat_grps *igrps, char *page)
95{
96 struct iscsi_tiqn *tiqn = container_of(igrps,
97 struct iscsi_tiqn, tiqn_stat_grps);
98
99 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_num_tpg_nps);
100}
101ISCSI_STAT_INSTANCE_ATTR_RO(portals);
102
103static ssize_t iscsi_stat_instance_show_attr_nodes(
104 struct iscsi_wwn_stat_grps *igrps, char *page)
105{
106 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_INST_NUM_NODES);
107}
108ISCSI_STAT_INSTANCE_ATTR_RO(nodes);
109
110static ssize_t iscsi_stat_instance_show_attr_sessions(
111 struct iscsi_wwn_stat_grps *igrps, char *page)
112{
113 struct iscsi_tiqn *tiqn = container_of(igrps,
114 struct iscsi_tiqn, tiqn_stat_grps);
115
116 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_nsessions);
117}
118ISCSI_STAT_INSTANCE_ATTR_RO(sessions);
119
120static ssize_t iscsi_stat_instance_show_attr_fail_sess(
121 struct iscsi_wwn_stat_grps *igrps, char *page)
122{
123 struct iscsi_tiqn *tiqn = container_of(igrps,
124 struct iscsi_tiqn, tiqn_stat_grps);
125 struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
126 u32 sess_err_count;
127
128 spin_lock_bh(&sess_err->lock);
129 sess_err_count = (sess_err->digest_errors +
130 sess_err->cxn_timeout_errors +
131 sess_err->pdu_format_errors);
132 spin_unlock_bh(&sess_err->lock);
133
134 return snprintf(page, PAGE_SIZE, "%u\n", sess_err_count);
135}
136ISCSI_STAT_INSTANCE_ATTR_RO(fail_sess);
137
138static ssize_t iscsi_stat_instance_show_attr_fail_type(
139 struct iscsi_wwn_stat_grps *igrps, char *page)
140{
141 struct iscsi_tiqn *tiqn = container_of(igrps,
142 struct iscsi_tiqn, tiqn_stat_grps);
143 struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
144
145 return snprintf(page, PAGE_SIZE, "%u\n",
146 sess_err->last_sess_failure_type);
147}
148ISCSI_STAT_INSTANCE_ATTR_RO(fail_type);
149
150static ssize_t iscsi_stat_instance_show_attr_fail_rem_name(
151 struct iscsi_wwn_stat_grps *igrps, char *page)
152{
153 struct iscsi_tiqn *tiqn = container_of(igrps,
154 struct iscsi_tiqn, tiqn_stat_grps);
155 struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
156
157 return snprintf(page, PAGE_SIZE, "%s\n",
158 sess_err->last_sess_fail_rem_name[0] ?
159 sess_err->last_sess_fail_rem_name : NONE);
160}
161ISCSI_STAT_INSTANCE_ATTR_RO(fail_rem_name);
162
163static ssize_t iscsi_stat_instance_show_attr_disc_time(
164 struct iscsi_wwn_stat_grps *igrps, char *page)
165{
166 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DISCONTINUITY_TIME);
167}
168ISCSI_STAT_INSTANCE_ATTR_RO(disc_time);
169
170static ssize_t iscsi_stat_instance_show_attr_description(
171 struct iscsi_wwn_stat_grps *igrps, char *page)
172{
173 return snprintf(page, PAGE_SIZE, "%s\n", ISCSI_INST_DESCR);
174}
175ISCSI_STAT_INSTANCE_ATTR_RO(description);
176
177static ssize_t iscsi_stat_instance_show_attr_vendor(
178 struct iscsi_wwn_stat_grps *igrps, char *page)
179{
180 return snprintf(page, PAGE_SIZE, "RisingTide Systems iSCSI-Target\n");
181}
182ISCSI_STAT_INSTANCE_ATTR_RO(vendor);
183
184static ssize_t iscsi_stat_instance_show_attr_version(
185 struct iscsi_wwn_stat_grps *igrps, char *page)
186{
187 return snprintf(page, PAGE_SIZE, "%s\n", ISCSIT_VERSION);
188}
189ISCSI_STAT_INSTANCE_ATTR_RO(version);
190
191CONFIGFS_EATTR_OPS(iscsi_stat_instance, iscsi_wwn_stat_grps,
192 iscsi_instance_group);
193
194static struct configfs_attribute *iscsi_stat_instance_attrs[] = {
195 &iscsi_stat_instance_inst.attr,
196 &iscsi_stat_instance_min_ver.attr,
197 &iscsi_stat_instance_max_ver.attr,
198 &iscsi_stat_instance_portals.attr,
199 &iscsi_stat_instance_nodes.attr,
200 &iscsi_stat_instance_sessions.attr,
201 &iscsi_stat_instance_fail_sess.attr,
202 &iscsi_stat_instance_fail_type.attr,
203 &iscsi_stat_instance_fail_rem_name.attr,
204 &iscsi_stat_instance_disc_time.attr,
205 &iscsi_stat_instance_description.attr,
206 &iscsi_stat_instance_vendor.attr,
207 &iscsi_stat_instance_version.attr,
208 NULL,
209};
210
211static struct configfs_item_operations iscsi_stat_instance_item_ops = {
212 .show_attribute = iscsi_stat_instance_attr_show,
213 .store_attribute = iscsi_stat_instance_attr_store,
214};
215
216struct config_item_type iscsi_stat_instance_cit = {
217 .ct_item_ops = &iscsi_stat_instance_item_ops,
218 .ct_attrs = iscsi_stat_instance_attrs,
219 .ct_owner = THIS_MODULE,
220};
221
222/*
223 * Instance Session Failure Stats Table
224 */
225CONFIGFS_EATTR_STRUCT(iscsi_stat_sess_err, iscsi_wwn_stat_grps);
226#define ISCSI_STAT_SESS_ERR_ATTR(_name, _mode) \
227static struct iscsi_stat_sess_err_attribute \
228 iscsi_stat_sess_err_##_name = \
229 __CONFIGFS_EATTR(_name, _mode, \
230 iscsi_stat_sess_err_show_attr_##_name, \
231 iscsi_stat_sess_err_store_attr_##_name);
232
233#define ISCSI_STAT_SESS_ERR_ATTR_RO(_name) \
234static struct iscsi_stat_sess_err_attribute \
235 iscsi_stat_sess_err_##_name = \
236 __CONFIGFS_EATTR_RO(_name, \
237 iscsi_stat_sess_err_show_attr_##_name);
238
239static ssize_t iscsi_stat_sess_err_show_attr_inst(
240 struct iscsi_wwn_stat_grps *igrps, char *page)
241{
242 struct iscsi_tiqn *tiqn = container_of(igrps,
243 struct iscsi_tiqn, tiqn_stat_grps);
244
245 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
246}
247ISCSI_STAT_SESS_ERR_ATTR_RO(inst);
248
249static ssize_t iscsi_stat_sess_err_show_attr_digest_errors(
250 struct iscsi_wwn_stat_grps *igrps, char *page)
251{
252 struct iscsi_tiqn *tiqn = container_of(igrps,
253 struct iscsi_tiqn, tiqn_stat_grps);
254 struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
255
256 return snprintf(page, PAGE_SIZE, "%u\n", sess_err->digest_errors);
257}
258ISCSI_STAT_SESS_ERR_ATTR_RO(digest_errors);
259
260static ssize_t iscsi_stat_sess_err_show_attr_cxn_errors(
261 struct iscsi_wwn_stat_grps *igrps, char *page)
262{
263 struct iscsi_tiqn *tiqn = container_of(igrps,
264 struct iscsi_tiqn, tiqn_stat_grps);
265 struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
266
267 return snprintf(page, PAGE_SIZE, "%u\n", sess_err->cxn_timeout_errors);
268}
269ISCSI_STAT_SESS_ERR_ATTR_RO(cxn_errors);
270
271static ssize_t iscsi_stat_sess_err_show_attr_format_errors(
272 struct iscsi_wwn_stat_grps *igrps, char *page)
273{
274 struct iscsi_tiqn *tiqn = container_of(igrps,
275 struct iscsi_tiqn, tiqn_stat_grps);
276 struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
277
278 return snprintf(page, PAGE_SIZE, "%u\n", sess_err->pdu_format_errors);
279}
280ISCSI_STAT_SESS_ERR_ATTR_RO(format_errors);
281
282CONFIGFS_EATTR_OPS(iscsi_stat_sess_err, iscsi_wwn_stat_grps,
283 iscsi_sess_err_group);
284
285static struct configfs_attribute *iscsi_stat_sess_err_attrs[] = {
286 &iscsi_stat_sess_err_inst.attr,
287 &iscsi_stat_sess_err_digest_errors.attr,
288 &iscsi_stat_sess_err_cxn_errors.attr,
289 &iscsi_stat_sess_err_format_errors.attr,
290 NULL,
291};
292
293static struct configfs_item_operations iscsi_stat_sess_err_item_ops = {
294 .show_attribute = iscsi_stat_sess_err_attr_show,
295 .store_attribute = iscsi_stat_sess_err_attr_store,
296};
297
298struct config_item_type iscsi_stat_sess_err_cit = {
299 .ct_item_ops = &iscsi_stat_sess_err_item_ops,
300 .ct_attrs = iscsi_stat_sess_err_attrs,
301 .ct_owner = THIS_MODULE,
302};
303
304/*
305 * Target Attributes Table
306 */
307CONFIGFS_EATTR_STRUCT(iscsi_stat_tgt_attr, iscsi_wwn_stat_grps);
308#define ISCSI_STAT_TGT_ATTR(_name, _mode) \
309static struct iscsi_stat_tgt_attr_attribute \
310 iscsi_stat_tgt_attr_##_name = \
311 __CONFIGFS_EATTR(_name, _mode, \
312 iscsi_stat_tgt-attr_show_attr_##_name, \
313 iscsi_stat_tgt_attr_store_attr_##_name);
314
315#define ISCSI_STAT_TGT_ATTR_RO(_name) \
316static struct iscsi_stat_tgt_attr_attribute \
317 iscsi_stat_tgt_attr_##_name = \
318 __CONFIGFS_EATTR_RO(_name, \
319 iscsi_stat_tgt_attr_show_attr_##_name);
320
321static ssize_t iscsi_stat_tgt_attr_show_attr_inst(
322 struct iscsi_wwn_stat_grps *igrps, char *page)
323{
324 struct iscsi_tiqn *tiqn = container_of(igrps,
325 struct iscsi_tiqn, tiqn_stat_grps);
326
327 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
328}
329ISCSI_STAT_TGT_ATTR_RO(inst);
330
331static ssize_t iscsi_stat_tgt_attr_show_attr_indx(
332 struct iscsi_wwn_stat_grps *igrps, char *page)
333{
334 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
335}
336ISCSI_STAT_TGT_ATTR_RO(indx);
337
338static ssize_t iscsi_stat_tgt_attr_show_attr_login_fails(
339 struct iscsi_wwn_stat_grps *igrps, char *page)
340{
341 struct iscsi_tiqn *tiqn = container_of(igrps,
342 struct iscsi_tiqn, tiqn_stat_grps);
343 struct iscsi_login_stats *lstat = &tiqn->login_stats;
344 u32 fail_count;
345
346 spin_lock(&lstat->lock);
347 fail_count = (lstat->redirects + lstat->authorize_fails +
348 lstat->authenticate_fails + lstat->negotiate_fails +
349 lstat->other_fails);
350 spin_unlock(&lstat->lock);
351
352 return snprintf(page, PAGE_SIZE, "%u\n", fail_count);
353}
354ISCSI_STAT_TGT_ATTR_RO(login_fails);
355
356static ssize_t iscsi_stat_tgt_attr_show_attr_last_fail_time(
357 struct iscsi_wwn_stat_grps *igrps, char *page)
358{
359 struct iscsi_tiqn *tiqn = container_of(igrps,
360 struct iscsi_tiqn, tiqn_stat_grps);
361 struct iscsi_login_stats *lstat = &tiqn->login_stats;
362 u32 last_fail_time;
363
364 spin_lock(&lstat->lock);
365 last_fail_time = lstat->last_fail_time ?
366 (u32)(((u32)lstat->last_fail_time -
367 INITIAL_JIFFIES) * 100 / HZ) : 0;
368 spin_unlock(&lstat->lock);
369
370 return snprintf(page, PAGE_SIZE, "%u\n", last_fail_time);
371}
372ISCSI_STAT_TGT_ATTR_RO(last_fail_time);
373
374static ssize_t iscsi_stat_tgt_attr_show_attr_last_fail_type(
375 struct iscsi_wwn_stat_grps *igrps, char *page)
376{
377 struct iscsi_tiqn *tiqn = container_of(igrps,
378 struct iscsi_tiqn, tiqn_stat_grps);
379 struct iscsi_login_stats *lstat = &tiqn->login_stats;
380 u32 last_fail_type;
381
382 spin_lock(&lstat->lock);
383 last_fail_type = lstat->last_fail_type;
384 spin_unlock(&lstat->lock);
385
386 return snprintf(page, PAGE_SIZE, "%u\n", last_fail_type);
387}
388ISCSI_STAT_TGT_ATTR_RO(last_fail_type);
389
390static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_name(
391 struct iscsi_wwn_stat_grps *igrps, char *page)
392{
393 struct iscsi_tiqn *tiqn = container_of(igrps,
394 struct iscsi_tiqn, tiqn_stat_grps);
395 struct iscsi_login_stats *lstat = &tiqn->login_stats;
396 unsigned char buf[224];
397
398 spin_lock(&lstat->lock);
399 snprintf(buf, 224, "%s", lstat->last_intr_fail_name[0] ?
400 lstat->last_intr_fail_name : NONE);
401 spin_unlock(&lstat->lock);
402
403 return snprintf(page, PAGE_SIZE, "%s\n", buf);
404}
405ISCSI_STAT_TGT_ATTR_RO(fail_intr_name);
406
407static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr_type(
408 struct iscsi_wwn_stat_grps *igrps, char *page)
409{
410 struct iscsi_tiqn *tiqn = container_of(igrps,
411 struct iscsi_tiqn, tiqn_stat_grps);
412 struct iscsi_login_stats *lstat = &tiqn->login_stats;
413 unsigned char buf[8];
414
415 spin_lock(&lstat->lock);
416 snprintf(buf, 8, "%s", (lstat->last_intr_fail_ip_addr != NULL) ?
417 "ipv6" : "ipv4");
418 spin_unlock(&lstat->lock);
419
420 return snprintf(page, PAGE_SIZE, "%s\n", buf);
421}
422ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr_type);
423
424static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr(
425 struct iscsi_wwn_stat_grps *igrps, char *page)
426{
427 struct iscsi_tiqn *tiqn = container_of(igrps,
428 struct iscsi_tiqn, tiqn_stat_grps);
429 struct iscsi_login_stats *lstat = &tiqn->login_stats;
430 unsigned char buf[32];
431
432 spin_lock(&lstat->lock);
433 if (lstat->last_intr_fail_ip_family == AF_INET6)
434 snprintf(buf, 32, "[%s]", lstat->last_intr_fail_ip_addr);
435 else
436 snprintf(buf, 32, "%s", lstat->last_intr_fail_ip_addr);
437 spin_unlock(&lstat->lock);
438
439 return snprintf(page, PAGE_SIZE, "%s\n", buf);
440}
441ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr);
442
443CONFIGFS_EATTR_OPS(iscsi_stat_tgt_attr, iscsi_wwn_stat_grps,
444 iscsi_tgt_attr_group);
445
446static struct configfs_attribute *iscsi_stat_tgt_attr_attrs[] = {
447 &iscsi_stat_tgt_attr_inst.attr,
448 &iscsi_stat_tgt_attr_indx.attr,
449 &iscsi_stat_tgt_attr_login_fails.attr,
450 &iscsi_stat_tgt_attr_last_fail_time.attr,
451 &iscsi_stat_tgt_attr_last_fail_type.attr,
452 &iscsi_stat_tgt_attr_fail_intr_name.attr,
453 &iscsi_stat_tgt_attr_fail_intr_addr_type.attr,
454 &iscsi_stat_tgt_attr_fail_intr_addr.attr,
455 NULL,
456};
457
458static struct configfs_item_operations iscsi_stat_tgt_attr_item_ops = {
459 .show_attribute = iscsi_stat_tgt_attr_attr_show,
460 .store_attribute = iscsi_stat_tgt_attr_attr_store,
461};
462
463struct config_item_type iscsi_stat_tgt_attr_cit = {
464 .ct_item_ops = &iscsi_stat_tgt_attr_item_ops,
465 .ct_attrs = iscsi_stat_tgt_attr_attrs,
466 .ct_owner = THIS_MODULE,
467};
468
469/*
470 * Target Login Stats Table
471 */
472CONFIGFS_EATTR_STRUCT(iscsi_stat_login, iscsi_wwn_stat_grps);
473#define ISCSI_STAT_LOGIN(_name, _mode) \
474static struct iscsi_stat_login_attribute \
475 iscsi_stat_login_##_name = \
476 __CONFIGFS_EATTR(_name, _mode, \
477 iscsi_stat_login_show_attr_##_name, \
478 iscsi_stat_login_store_attr_##_name);
479
480#define ISCSI_STAT_LOGIN_RO(_name) \
481static struct iscsi_stat_login_attribute \
482 iscsi_stat_login_##_name = \
483 __CONFIGFS_EATTR_RO(_name, \
484 iscsi_stat_login_show_attr_##_name);
485
486static ssize_t iscsi_stat_login_show_attr_inst(
487 struct iscsi_wwn_stat_grps *igrps, char *page)
488{
489 struct iscsi_tiqn *tiqn = container_of(igrps,
490 struct iscsi_tiqn, tiqn_stat_grps);
491
492 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
493}
494ISCSI_STAT_LOGIN_RO(inst);
495
496static ssize_t iscsi_stat_login_show_attr_indx(
497 struct iscsi_wwn_stat_grps *igrps, char *page)
498{
499 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
500}
501ISCSI_STAT_LOGIN_RO(indx);
502
503static ssize_t iscsi_stat_login_show_attr_accepts(
504 struct iscsi_wwn_stat_grps *igrps, char *page)
505{
506 struct iscsi_tiqn *tiqn = container_of(igrps,
507 struct iscsi_tiqn, tiqn_stat_grps);
508 struct iscsi_login_stats *lstat = &tiqn->login_stats;
509 ssize_t ret;
510
511 spin_lock(&lstat->lock);
512 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->accepts);
513 spin_unlock(&lstat->lock);
514
515 return ret;
516}
517ISCSI_STAT_LOGIN_RO(accepts);
518
519static ssize_t iscsi_stat_login_show_attr_other_fails(
520 struct iscsi_wwn_stat_grps *igrps, char *page)
521{
522 struct iscsi_tiqn *tiqn = container_of(igrps,
523 struct iscsi_tiqn, tiqn_stat_grps);
524 struct iscsi_login_stats *lstat = &tiqn->login_stats;
525 ssize_t ret;
526
527 spin_lock(&lstat->lock);
528 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->other_fails);
529 spin_unlock(&lstat->lock);
530
531 return ret;
532}
533ISCSI_STAT_LOGIN_RO(other_fails);
534
535static ssize_t iscsi_stat_login_show_attr_redirects(
536 struct iscsi_wwn_stat_grps *igrps, char *page)
537{
538 struct iscsi_tiqn *tiqn = container_of(igrps,
539 struct iscsi_tiqn, tiqn_stat_grps);
540 struct iscsi_login_stats *lstat = &tiqn->login_stats;
541 ssize_t ret;
542
543 spin_lock(&lstat->lock);
544 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->redirects);
545 spin_unlock(&lstat->lock);
546
547 return ret;
548}
549ISCSI_STAT_LOGIN_RO(redirects);
550
551static ssize_t iscsi_stat_login_show_attr_authorize_fails(
552 struct iscsi_wwn_stat_grps *igrps, char *page)
553{
554 struct iscsi_tiqn *tiqn = container_of(igrps,
555 struct iscsi_tiqn, tiqn_stat_grps);
556 struct iscsi_login_stats *lstat = &tiqn->login_stats;
557 ssize_t ret;
558
559 spin_lock(&lstat->lock);
560 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authorize_fails);
561 spin_unlock(&lstat->lock);
562
563 return ret;
564}
565ISCSI_STAT_LOGIN_RO(authorize_fails);
566
567static ssize_t iscsi_stat_login_show_attr_authenticate_fails(
568 struct iscsi_wwn_stat_grps *igrps, char *page)
569{
570 struct iscsi_tiqn *tiqn = container_of(igrps,
571 struct iscsi_tiqn, tiqn_stat_grps);
572 struct iscsi_login_stats *lstat = &tiqn->login_stats;
573 ssize_t ret;
574
575 spin_lock(&lstat->lock);
576 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authenticate_fails);
577 spin_unlock(&lstat->lock);
578
579 return ret;
580}
581ISCSI_STAT_LOGIN_RO(authenticate_fails);
582
583static ssize_t iscsi_stat_login_show_attr_negotiate_fails(
584 struct iscsi_wwn_stat_grps *igrps, char *page)
585{
586 struct iscsi_tiqn *tiqn = container_of(igrps,
587 struct iscsi_tiqn, tiqn_stat_grps);
588 struct iscsi_login_stats *lstat = &tiqn->login_stats;
589 ssize_t ret;
590
591 spin_lock(&lstat->lock);
592 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->negotiate_fails);
593 spin_unlock(&lstat->lock);
594
595 return ret;
596}
597ISCSI_STAT_LOGIN_RO(negotiate_fails);
598
599CONFIGFS_EATTR_OPS(iscsi_stat_login, iscsi_wwn_stat_grps,
600 iscsi_login_stats_group);
601
602static struct configfs_attribute *iscsi_stat_login_stats_attrs[] = {
603 &iscsi_stat_login_inst.attr,
604 &iscsi_stat_login_indx.attr,
605 &iscsi_stat_login_accepts.attr,
606 &iscsi_stat_login_other_fails.attr,
607 &iscsi_stat_login_redirects.attr,
608 &iscsi_stat_login_authorize_fails.attr,
609 &iscsi_stat_login_authenticate_fails.attr,
610 &iscsi_stat_login_negotiate_fails.attr,
611 NULL,
612};
613
614static struct configfs_item_operations iscsi_stat_login_stats_item_ops = {
615 .show_attribute = iscsi_stat_login_attr_show,
616 .store_attribute = iscsi_stat_login_attr_store,
617};
618
619struct config_item_type iscsi_stat_login_cit = {
620 .ct_item_ops = &iscsi_stat_login_stats_item_ops,
621 .ct_attrs = iscsi_stat_login_stats_attrs,
622 .ct_owner = THIS_MODULE,
623};
624
625/*
626 * Target Logout Stats Table
627 */
628
629CONFIGFS_EATTR_STRUCT(iscsi_stat_logout, iscsi_wwn_stat_grps);
630#define ISCSI_STAT_LOGOUT(_name, _mode) \
631static struct iscsi_stat_logout_attribute \
632 iscsi_stat_logout_##_name = \
633 __CONFIGFS_EATTR(_name, _mode, \
634 iscsi_stat_logout_show_attr_##_name, \
635 iscsi_stat_logout_store_attr_##_name);
636
637#define ISCSI_STAT_LOGOUT_RO(_name) \
638static struct iscsi_stat_logout_attribute \
639 iscsi_stat_logout_##_name = \
640 __CONFIGFS_EATTR_RO(_name, \
641 iscsi_stat_logout_show_attr_##_name);
642
643static ssize_t iscsi_stat_logout_show_attr_inst(
644 struct iscsi_wwn_stat_grps *igrps, char *page)
645{
646 struct iscsi_tiqn *tiqn = container_of(igrps,
647 struct iscsi_tiqn, tiqn_stat_grps);
648
649 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
650}
651ISCSI_STAT_LOGOUT_RO(inst);
652
653static ssize_t iscsi_stat_logout_show_attr_indx(
654 struct iscsi_wwn_stat_grps *igrps, char *page)
655{
656 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
657}
658ISCSI_STAT_LOGOUT_RO(indx);
659
660static ssize_t iscsi_stat_logout_show_attr_normal_logouts(
661 struct iscsi_wwn_stat_grps *igrps, char *page)
662{
663 struct iscsi_tiqn *tiqn = container_of(igrps,
664 struct iscsi_tiqn, tiqn_stat_grps);
665 struct iscsi_logout_stats *lstats = &tiqn->logout_stats;
666
667 return snprintf(page, PAGE_SIZE, "%u\n", lstats->normal_logouts);
668}
669ISCSI_STAT_LOGOUT_RO(normal_logouts);
670
671static ssize_t iscsi_stat_logout_show_attr_abnormal_logouts(
672 struct iscsi_wwn_stat_grps *igrps, char *page)
673{
674 struct iscsi_tiqn *tiqn = container_of(igrps,
675 struct iscsi_tiqn, tiqn_stat_grps);
676 struct iscsi_logout_stats *lstats = &tiqn->logout_stats;
677
678 return snprintf(page, PAGE_SIZE, "%u\n", lstats->abnormal_logouts);
679}
680ISCSI_STAT_LOGOUT_RO(abnormal_logouts);
681
682CONFIGFS_EATTR_OPS(iscsi_stat_logout, iscsi_wwn_stat_grps,
683 iscsi_logout_stats_group);
684
685static struct configfs_attribute *iscsi_stat_logout_stats_attrs[] = {
686 &iscsi_stat_logout_inst.attr,
687 &iscsi_stat_logout_indx.attr,
688 &iscsi_stat_logout_normal_logouts.attr,
689 &iscsi_stat_logout_abnormal_logouts.attr,
690 NULL,
691};
692
693static struct configfs_item_operations iscsi_stat_logout_stats_item_ops = {
694 .show_attribute = iscsi_stat_logout_attr_show,
695 .store_attribute = iscsi_stat_logout_attr_store,
696};
697
698struct config_item_type iscsi_stat_logout_cit = {
699 .ct_item_ops = &iscsi_stat_logout_stats_item_ops,
700 .ct_attrs = iscsi_stat_logout_stats_attrs,
701 .ct_owner = THIS_MODULE,
702};
703
704/*
705 * Session Stats Table
706 */
707
708CONFIGFS_EATTR_STRUCT(iscsi_stat_sess, iscsi_node_stat_grps);
709#define ISCSI_STAT_SESS(_name, _mode) \
710static struct iscsi_stat_sess_attribute \
711 iscsi_stat_sess_##_name = \
712 __CONFIGFS_EATTR(_name, _mode, \
713 iscsi_stat_sess_show_attr_##_name, \
714 iscsi_stat_sess_store_attr_##_name);
715
716#define ISCSI_STAT_SESS_RO(_name) \
717static struct iscsi_stat_sess_attribute \
718 iscsi_stat_sess_##_name = \
719 __CONFIGFS_EATTR_RO(_name, \
720 iscsi_stat_sess_show_attr_##_name);
721
722static ssize_t iscsi_stat_sess_show_attr_inst(
723 struct iscsi_node_stat_grps *igrps, char *page)
724{
725 struct iscsi_node_acl *acl = container_of(igrps,
726 struct iscsi_node_acl, node_stat_grps);
727 struct se_wwn *wwn = acl->se_node_acl.se_tpg->se_tpg_wwn;
728 struct iscsi_tiqn *tiqn = container_of(wwn,
729 struct iscsi_tiqn, tiqn_wwn);
730
731 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
732}
733ISCSI_STAT_SESS_RO(inst);
734
735static ssize_t iscsi_stat_sess_show_attr_node(
736 struct iscsi_node_stat_grps *igrps, char *page)
737{
738 struct iscsi_node_acl *acl = container_of(igrps,
739 struct iscsi_node_acl, node_stat_grps);
740 struct se_node_acl *se_nacl = &acl->se_node_acl;
741 struct iscsi_session *sess;
742 struct se_session *se_sess;
743 ssize_t ret = 0;
744
745 spin_lock_bh(&se_nacl->nacl_sess_lock);
746 se_sess = se_nacl->nacl_sess;
747 if (se_sess) {
748 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
749 if (sess)
750 ret = snprintf(page, PAGE_SIZE, "%u\n",
751 sess->sess_ops->SessionType ? 0 : ISCSI_NODE_INDEX);
752 }
753 spin_unlock_bh(&se_nacl->nacl_sess_lock);
754
755 return ret;
756}
757ISCSI_STAT_SESS_RO(node);
758
759static ssize_t iscsi_stat_sess_show_attr_indx(
760 struct iscsi_node_stat_grps *igrps, char *page)
761{
762 struct iscsi_node_acl *acl = container_of(igrps,
763 struct iscsi_node_acl, node_stat_grps);
764 struct se_node_acl *se_nacl = &acl->se_node_acl;
765 struct iscsi_session *sess;
766 struct se_session *se_sess;
767 ssize_t ret = 0;
768
769 spin_lock_bh(&se_nacl->nacl_sess_lock);
770 se_sess = se_nacl->nacl_sess;
771 if (se_sess) {
772 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
773 if (sess)
774 ret = snprintf(page, PAGE_SIZE, "%u\n",
775 sess->session_index);
776 }
777 spin_unlock_bh(&se_nacl->nacl_sess_lock);
778
779 return ret;
780}
781ISCSI_STAT_SESS_RO(indx);
782
783static ssize_t iscsi_stat_sess_show_attr_cmd_pdus(
784 struct iscsi_node_stat_grps *igrps, char *page)
785{
786 struct iscsi_node_acl *acl = container_of(igrps,
787 struct iscsi_node_acl, node_stat_grps);
788 struct se_node_acl *se_nacl = &acl->se_node_acl;
789 struct iscsi_session *sess;
790 struct se_session *se_sess;
791 ssize_t ret = 0;
792
793 spin_lock_bh(&se_nacl->nacl_sess_lock);
794 se_sess = se_nacl->nacl_sess;
795 if (se_sess) {
796 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
797 if (sess)
798 ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus);
799 }
800 spin_unlock_bh(&se_nacl->nacl_sess_lock);
801
802 return ret;
803}
804ISCSI_STAT_SESS_RO(cmd_pdus);
805
806static ssize_t iscsi_stat_sess_show_attr_rsp_pdus(
807 struct iscsi_node_stat_grps *igrps, char *page)
808{
809 struct iscsi_node_acl *acl = container_of(igrps,
810 struct iscsi_node_acl, node_stat_grps);
811 struct se_node_acl *se_nacl = &acl->se_node_acl;
812 struct iscsi_session *sess;
813 struct se_session *se_sess;
814 ssize_t ret = 0;
815
816 spin_lock_bh(&se_nacl->nacl_sess_lock);
817 se_sess = se_nacl->nacl_sess;
818 if (se_sess) {
819 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
820 if (sess)
821 ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus);
822 }
823 spin_unlock_bh(&se_nacl->nacl_sess_lock);
824
825 return ret;
826}
827ISCSI_STAT_SESS_RO(rsp_pdus);
828
829static ssize_t iscsi_stat_sess_show_attr_txdata_octs(
830 struct iscsi_node_stat_grps *igrps, char *page)
831{
832 struct iscsi_node_acl *acl = container_of(igrps,
833 struct iscsi_node_acl, node_stat_grps);
834 struct se_node_acl *se_nacl = &acl->se_node_acl;
835 struct iscsi_session *sess;
836 struct se_session *se_sess;
837 ssize_t ret = 0;
838
839 spin_lock_bh(&se_nacl->nacl_sess_lock);
840 se_sess = se_nacl->nacl_sess;
841 if (se_sess) {
842 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
843 if (sess)
844 ret = snprintf(page, PAGE_SIZE, "%llu\n",
845 (unsigned long long)sess->tx_data_octets);
846 }
847 spin_unlock_bh(&se_nacl->nacl_sess_lock);
848
849 return ret;
850}
851ISCSI_STAT_SESS_RO(txdata_octs);
852
853static ssize_t iscsi_stat_sess_show_attr_rxdata_octs(
854 struct iscsi_node_stat_grps *igrps, char *page)
855{
856 struct iscsi_node_acl *acl = container_of(igrps,
857 struct iscsi_node_acl, node_stat_grps);
858 struct se_node_acl *se_nacl = &acl->se_node_acl;
859 struct iscsi_session *sess;
860 struct se_session *se_sess;
861 ssize_t ret = 0;
862
863 spin_lock_bh(&se_nacl->nacl_sess_lock);
864 se_sess = se_nacl->nacl_sess;
865 if (se_sess) {
866 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
867 if (sess)
868 ret = snprintf(page, PAGE_SIZE, "%llu\n",
869 (unsigned long long)sess->rx_data_octets);
870 }
871 spin_unlock_bh(&se_nacl->nacl_sess_lock);
872
873 return ret;
874}
875ISCSI_STAT_SESS_RO(rxdata_octs);
876
877static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors(
878 struct iscsi_node_stat_grps *igrps, char *page)
879{
880 struct iscsi_node_acl *acl = container_of(igrps,
881 struct iscsi_node_acl, node_stat_grps);
882 struct se_node_acl *se_nacl = &acl->se_node_acl;
883 struct iscsi_session *sess;
884 struct se_session *se_sess;
885 ssize_t ret = 0;
886
887 spin_lock_bh(&se_nacl->nacl_sess_lock);
888 se_sess = se_nacl->nacl_sess;
889 if (se_sess) {
890 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
891 if (sess)
892 ret = snprintf(page, PAGE_SIZE, "%u\n",
893 sess->conn_digest_errors);
894 }
895 spin_unlock_bh(&se_nacl->nacl_sess_lock);
896
897 return ret;
898}
899ISCSI_STAT_SESS_RO(conn_digest_errors);
900
901static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors(
902 struct iscsi_node_stat_grps *igrps, char *page)
903{
904 struct iscsi_node_acl *acl = container_of(igrps,
905 struct iscsi_node_acl, node_stat_grps);
906 struct se_node_acl *se_nacl = &acl->se_node_acl;
907 struct iscsi_session *sess;
908 struct se_session *se_sess;
909 ssize_t ret = 0;
910
911 spin_lock_bh(&se_nacl->nacl_sess_lock);
912 se_sess = se_nacl->nacl_sess;
913 if (se_sess) {
914 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
915 if (sess)
916 ret = snprintf(page, PAGE_SIZE, "%u\n",
917 sess->conn_timeout_errors);
918 }
919 spin_unlock_bh(&se_nacl->nacl_sess_lock);
920
921 return ret;
922}
923ISCSI_STAT_SESS_RO(conn_timeout_errors);
924
925CONFIGFS_EATTR_OPS(iscsi_stat_sess, iscsi_node_stat_grps,
926 iscsi_sess_stats_group);
927
928static struct configfs_attribute *iscsi_stat_sess_stats_attrs[] = {
929 &iscsi_stat_sess_inst.attr,
930 &iscsi_stat_sess_node.attr,
931 &iscsi_stat_sess_indx.attr,
932 &iscsi_stat_sess_cmd_pdus.attr,
933 &iscsi_stat_sess_rsp_pdus.attr,
934 &iscsi_stat_sess_txdata_octs.attr,
935 &iscsi_stat_sess_rxdata_octs.attr,
936 &iscsi_stat_sess_conn_digest_errors.attr,
937 &iscsi_stat_sess_conn_timeout_errors.attr,
938 NULL,
939};
940
941static struct configfs_item_operations iscsi_stat_sess_stats_item_ops = {
942 .show_attribute = iscsi_stat_sess_attr_show,
943 .store_attribute = iscsi_stat_sess_attr_store,
944};
945
946struct config_item_type iscsi_stat_sess_cit = {
947 .ct_item_ops = &iscsi_stat_sess_stats_item_ops,
948 .ct_attrs = iscsi_stat_sess_stats_attrs,
949 .ct_owner = THIS_MODULE,
950};
diff --git a/drivers/target/iscsi/iscsi_target_stat.h b/drivers/target/iscsi/iscsi_target_stat.h
new file mode 100644
index 000000000000..3ff76b4faad3
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_stat.h
@@ -0,0 +1,64 @@
1#ifndef ISCSI_TARGET_STAT_H
2#define ISCSI_TARGET_STAT_H
3
4/*
5 * For struct iscsi_tiqn->tiqn_wwn default groups
6 */
7extern struct config_item_type iscsi_stat_instance_cit;
8extern struct config_item_type iscsi_stat_sess_err_cit;
9extern struct config_item_type iscsi_stat_tgt_attr_cit;
10extern struct config_item_type iscsi_stat_login_cit;
11extern struct config_item_type iscsi_stat_logout_cit;
12
13/*
14 * For struct iscsi_session->se_sess default groups
15 */
16extern struct config_item_type iscsi_stat_sess_cit;
17
18/* iSCSI session error types */
19#define ISCSI_SESS_ERR_UNKNOWN 0
20#define ISCSI_SESS_ERR_DIGEST 1
21#define ISCSI_SESS_ERR_CXN_TIMEOUT 2
22#define ISCSI_SESS_ERR_PDU_FORMAT 3
23
24/* iSCSI session error stats */
25struct iscsi_sess_err_stats {
26 spinlock_t lock;
27 u32 digest_errors;
28 u32 cxn_timeout_errors;
29 u32 pdu_format_errors;
30 u32 last_sess_failure_type;
31 char last_sess_fail_rem_name[224];
32} ____cacheline_aligned;
33
34/* iSCSI login failure types (sub oids) */
35#define ISCSI_LOGIN_FAIL_OTHER 2
36#define ISCSI_LOGIN_FAIL_REDIRECT 3
37#define ISCSI_LOGIN_FAIL_AUTHORIZE 4
38#define ISCSI_LOGIN_FAIL_AUTHENTICATE 5
39#define ISCSI_LOGIN_FAIL_NEGOTIATE 6
40
41/* iSCSI login stats */
42struct iscsi_login_stats {
43 spinlock_t lock;
44 u32 accepts;
45 u32 other_fails;
46 u32 redirects;
47 u32 authorize_fails;
48 u32 authenticate_fails;
49 u32 negotiate_fails; /* used for notifications */
50 u64 last_fail_time; /* time stamp (jiffies) */
51 u32 last_fail_type;
52 int last_intr_fail_ip_family;
53 unsigned char last_intr_fail_ip_addr[IPV6_ADDRESS_SPACE];
54 char last_intr_fail_name[224];
55} ____cacheline_aligned;
56
57/* iSCSI logout stats */
58struct iscsi_logout_stats {
59 spinlock_t lock;
60 u32 normal_logouts;
61 u32 abnormal_logouts;
62} ____cacheline_aligned;
63
64#endif /*** ISCSI_TARGET_STAT_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
new file mode 100644
index 000000000000..db1fe1ec84df
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -0,0 +1,849 @@
1/*******************************************************************************
2 * This file contains the iSCSI Target specific Task Management functions.
3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 ******************************************************************************/
20
21#include <asm/unaligned.h>
22#include <scsi/iscsi_proto.h>
23#include <target/target_core_base.h>
24#include <target/target_core_transport.h>
25
26#include "iscsi_target_core.h"
27#include "iscsi_target_seq_pdu_list.h"
28#include "iscsi_target_datain_values.h"
29#include "iscsi_target_device.h"
30#include "iscsi_target_erl0.h"
31#include "iscsi_target_erl1.h"
32#include "iscsi_target_erl2.h"
33#include "iscsi_target_tmr.h"
34#include "iscsi_target_tpg.h"
35#include "iscsi_target_util.h"
36#include "iscsi_target.h"
37
38u8 iscsit_tmr_abort_task(
39 struct iscsi_cmd *cmd,
40 unsigned char *buf)
41{
42 struct iscsi_cmd *ref_cmd;
43 struct iscsi_conn *conn = cmd->conn;
44 struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
45 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
46 struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
47
48 ref_cmd = iscsit_find_cmd_from_itt(conn, hdr->rtt);
49 if (!ref_cmd) {
50 pr_err("Unable to locate RefTaskTag: 0x%08x on CID:"
51 " %hu.\n", hdr->rtt, conn->cid);
52 return ((hdr->refcmdsn >= conn->sess->exp_cmd_sn) &&
53 (hdr->refcmdsn <= conn->sess->max_cmd_sn)) ?
54 ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK;
55 }
56 if (ref_cmd->cmd_sn != hdr->refcmdsn) {
57 pr_err("RefCmdSN 0x%08x does not equal"
58 " task's CmdSN 0x%08x. Rejecting ABORT_TASK.\n",
59 hdr->refcmdsn, ref_cmd->cmd_sn);
60 return ISCSI_TMF_RSP_REJECTED;
61 }
62
63 se_tmr->ref_task_tag = hdr->rtt;
64 se_tmr->ref_cmd = &ref_cmd->se_cmd;
65 tmr_req->ref_cmd_sn = hdr->refcmdsn;
66 tmr_req->exp_data_sn = hdr->exp_datasn;
67
68 return ISCSI_TMF_RSP_COMPLETE;
69}
70
71/*
72 * Called from iscsit_handle_task_mgt_cmd().
73 */
74int iscsit_tmr_task_warm_reset(
75 struct iscsi_conn *conn,
76 struct iscsi_tmr_req *tmr_req,
77 unsigned char *buf)
78{
79 struct iscsi_session *sess = conn->sess;
80 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
81#if 0
82 struct iscsi_init_task_mgt_cmnd *hdr =
83 (struct iscsi_init_task_mgt_cmnd *) buf;
84#endif
85 if (!na->tmr_warm_reset) {
86 pr_err("TMR Opcode TARGET_WARM_RESET authorization"
87 " failed for Initiator Node: %s\n",
88 sess->se_sess->se_node_acl->initiatorname);
89 return -1;
90 }
91 /*
92 * Do the real work in transport_generic_do_tmr().
93 */
94 return 0;
95}
96
97int iscsit_tmr_task_cold_reset(
98 struct iscsi_conn *conn,
99 struct iscsi_tmr_req *tmr_req,
100 unsigned char *buf)
101{
102 struct iscsi_session *sess = conn->sess;
103 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
104
105 if (!na->tmr_cold_reset) {
106 pr_err("TMR Opcode TARGET_COLD_RESET authorization"
107 " failed for Initiator Node: %s\n",
108 sess->se_sess->se_node_acl->initiatorname);
109 return -1;
110 }
111 /*
112 * Do the real work in transport_generic_do_tmr().
113 */
114 return 0;
115}
116
117u8 iscsit_tmr_task_reassign(
118 struct iscsi_cmd *cmd,
119 unsigned char *buf)
120{
121 struct iscsi_cmd *ref_cmd = NULL;
122 struct iscsi_conn *conn = cmd->conn;
123 struct iscsi_conn_recovery *cr = NULL;
124 struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
125 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
126 struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
127 int ret;
128
129 pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x,"
130 " RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n",
131 hdr->itt, hdr->rtt, hdr->exp_datasn, conn->cid);
132
133 if (conn->sess->sess_ops->ErrorRecoveryLevel != 2) {
134 pr_err("TMR TASK_REASSIGN not supported in ERL<2,"
135 " ignoring request.\n");
136 return ISCSI_TMF_RSP_NOT_SUPPORTED;
137 }
138
139 ret = iscsit_find_cmd_for_recovery(conn->sess, &ref_cmd, &cr, hdr->rtt);
140 if (ret == -2) {
141 pr_err("Command ITT: 0x%08x is still alligent to CID:"
142 " %hu\n", ref_cmd->init_task_tag, cr->cid);
143 return ISCSI_TMF_RSP_TASK_ALLEGIANT;
144 } else if (ret == -1) {
145 pr_err("Unable to locate RefTaskTag: 0x%08x in"
146 " connection recovery command list.\n", hdr->rtt);
147 return ISCSI_TMF_RSP_NO_TASK;
148 }
149 /*
150 * Temporary check to prevent connection recovery for
151 * connections with a differing MaxRecvDataSegmentLength.
152 */
153 if (cr->maxrecvdatasegmentlength !=
154 conn->conn_ops->MaxRecvDataSegmentLength) {
155 pr_err("Unable to perform connection recovery for"
156 " differing MaxRecvDataSegmentLength, rejecting"
157 " TMR TASK_REASSIGN.\n");
158 return ISCSI_TMF_RSP_REJECTED;
159 }
160
161 se_tmr->ref_task_tag = hdr->rtt;
162 se_tmr->ref_cmd = &ref_cmd->se_cmd;
163 se_tmr->ref_task_lun = get_unaligned_le64(&hdr->lun);
164 tmr_req->ref_cmd_sn = hdr->refcmdsn;
165 tmr_req->exp_data_sn = hdr->exp_datasn;
166 tmr_req->conn_recovery = cr;
167 tmr_req->task_reassign = 1;
168 /*
169 * Command can now be reassigned to a new connection.
170 * The task management response must be sent before the
171 * reassignment actually happens. See iscsi_tmr_post_handler().
172 */
173 return ISCSI_TMF_RSP_COMPLETE;
174}
175
176static void iscsit_task_reassign_remove_cmd(
177 struct iscsi_cmd *cmd,
178 struct iscsi_conn_recovery *cr,
179 struct iscsi_session *sess)
180{
181 int ret;
182
183 spin_lock(&cr->conn_recovery_cmd_lock);
184 ret = iscsit_remove_cmd_from_connection_recovery(cmd, sess);
185 spin_unlock(&cr->conn_recovery_cmd_lock);
186 if (!ret) {
187 pr_debug("iSCSI connection recovery successful for CID:"
188 " %hu on SID: %u\n", cr->cid, sess->sid);
189 iscsit_remove_active_connection_recovery_entry(cr, sess);
190 }
191}
192
193static int iscsit_task_reassign_complete_nop_out(
194 struct iscsi_tmr_req *tmr_req,
195 struct iscsi_conn *conn)
196{
197 struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
198 struct se_cmd *se_cmd = se_tmr->ref_cmd;
199 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
200 struct iscsi_conn_recovery *cr;
201
202 if (!cmd->cr) {
203 pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
204 " is NULL!\n", cmd->init_task_tag);
205 return -1;
206 }
207 cr = cmd->cr;
208
209 /*
210 * Reset the StatSN so a new one for this commands new connection
211 * will be assigned.
212 * Reset the ExpStatSN as well so we may receive Status SNACKs.
213 */
214 cmd->stat_sn = cmd->exp_stat_sn = 0;
215
216 iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
217
218 spin_lock_bh(&conn->cmd_lock);
219 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
220 spin_unlock_bh(&conn->cmd_lock);
221
222 cmd->i_state = ISTATE_SEND_NOPIN;
223 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
224 return 0;
225}
226
227static int iscsit_task_reassign_complete_write(
228 struct iscsi_cmd *cmd,
229 struct iscsi_tmr_req *tmr_req)
230{
231 int no_build_r2ts = 0;
232 u32 length = 0, offset = 0;
233 struct iscsi_conn *conn = cmd->conn;
234 struct se_cmd *se_cmd = &cmd->se_cmd;
235 /*
236 * The Initiator must not send a R2T SNACK with a Begrun less than
237 * the TMR TASK_REASSIGN's ExpDataSN.
238 */
239 if (!tmr_req->exp_data_sn) {
240 cmd->cmd_flags &= ~ICF_GOT_DATACK_SNACK;
241 cmd->acked_data_sn = 0;
242 } else {
243 cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
244 cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);
245 }
246
247 /*
248 * The TMR TASK_REASSIGN's ExpDataSN contains the next R2TSN the
249 * Initiator is expecting. The Target controls all WRITE operations
250 * so if we have received all DataOUT we can safety ignore Initiator.
251 */
252 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
253 if (!atomic_read(&cmd->transport_sent)) {
254 pr_debug("WRITE ITT: 0x%08x: t_state: %d"
255 " never sent to transport\n",
256 cmd->init_task_tag, cmd->se_cmd.t_state);
257 return transport_generic_handle_data(se_cmd);
258 }
259
260 cmd->i_state = ISTATE_SEND_STATUS;
261 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
262 return 0;
263 }
264
265 /*
266 * Special case to deal with DataSequenceInOrder=No and Non-Immeidate
267 * Unsolicited DataOut.
268 */
269 if (cmd->unsolicited_data) {
270 cmd->unsolicited_data = 0;
271
272 offset = cmd->next_burst_len = cmd->write_data_done;
273
274 if ((conn->sess->sess_ops->FirstBurstLength - offset) >=
275 cmd->data_length) {
276 no_build_r2ts = 1;
277 length = (cmd->data_length - offset);
278 } else
279 length = (conn->sess->sess_ops->FirstBurstLength - offset);
280
281 spin_lock_bh(&cmd->r2t_lock);
282 if (iscsit_add_r2t_to_list(cmd, offset, length, 0, 0) < 0) {
283 spin_unlock_bh(&cmd->r2t_lock);
284 return -1;
285 }
286 cmd->outstanding_r2ts++;
287 spin_unlock_bh(&cmd->r2t_lock);
288
289 if (no_build_r2ts)
290 return 0;
291 }
292 /*
293 * iscsit_build_r2ts_for_cmd() can handle the rest from here.
294 */
295 return iscsit_build_r2ts_for_cmd(cmd, conn, 2);
296}
297
298static int iscsit_task_reassign_complete_read(
299 struct iscsi_cmd *cmd,
300 struct iscsi_tmr_req *tmr_req)
301{
302 struct iscsi_conn *conn = cmd->conn;
303 struct iscsi_datain_req *dr;
304 struct se_cmd *se_cmd = &cmd->se_cmd;
305 /*
306 * The Initiator must not send a Data SNACK with a BegRun less than
307 * the TMR TASK_REASSIGN's ExpDataSN.
308 */
309 if (!tmr_req->exp_data_sn) {
310 cmd->cmd_flags &= ~ICF_GOT_DATACK_SNACK;
311 cmd->acked_data_sn = 0;
312 } else {
313 cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
314 cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);
315 }
316
317 if (!atomic_read(&cmd->transport_sent)) {
318 pr_debug("READ ITT: 0x%08x: t_state: %d never sent to"
319 " transport\n", cmd->init_task_tag,
320 cmd->se_cmd.t_state);
321 transport_generic_handle_cdb(se_cmd);
322 return 0;
323 }
324
325 if (!atomic_read(&se_cmd->t_transport_complete)) {
326 pr_err("READ ITT: 0x%08x: t_state: %d, never returned"
327 " from transport\n", cmd->init_task_tag,
328 cmd->se_cmd.t_state);
329 return -1;
330 }
331
332 dr = iscsit_allocate_datain_req();
333 if (!dr)
334 return -1;
335 /*
336 * The TMR TASK_REASSIGN's ExpDataSN contains the next DataSN the
337 * Initiator is expecting.
338 */
339 dr->data_sn = dr->begrun = tmr_req->exp_data_sn;
340 dr->runlength = 0;
341 dr->generate_recovery_values = 1;
342 dr->recovery = DATAIN_CONNECTION_RECOVERY;
343
344 iscsit_attach_datain_req(cmd, dr);
345
346 cmd->i_state = ISTATE_SEND_DATAIN;
347 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
348 return 0;
349}
350
351static int iscsit_task_reassign_complete_none(
352 struct iscsi_cmd *cmd,
353 struct iscsi_tmr_req *tmr_req)
354{
355 struct iscsi_conn *conn = cmd->conn;
356
357 cmd->i_state = ISTATE_SEND_STATUS;
358 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
359 return 0;
360}
361
362static int iscsit_task_reassign_complete_scsi_cmnd(
363 struct iscsi_tmr_req *tmr_req,
364 struct iscsi_conn *conn)
365{
366 struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
367 struct se_cmd *se_cmd = se_tmr->ref_cmd;
368 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
369 struct iscsi_conn_recovery *cr;
370
371 if (!cmd->cr) {
372 pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
373 " is NULL!\n", cmd->init_task_tag);
374 return -1;
375 }
376 cr = cmd->cr;
377
378 /*
379 * Reset the StatSN so a new one for this commands new connection
380 * will be assigned.
381 * Reset the ExpStatSN as well so we may receive Status SNACKs.
382 */
383 cmd->stat_sn = cmd->exp_stat_sn = 0;
384
385 iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
386
387 spin_lock_bh(&conn->cmd_lock);
388 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
389 spin_unlock_bh(&conn->cmd_lock);
390
391 if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
392 cmd->i_state = ISTATE_SEND_STATUS;
393 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
394 return 0;
395 }
396
397 switch (cmd->data_direction) {
398 case DMA_TO_DEVICE:
399 return iscsit_task_reassign_complete_write(cmd, tmr_req);
400 case DMA_FROM_DEVICE:
401 return iscsit_task_reassign_complete_read(cmd, tmr_req);
402 case DMA_NONE:
403 return iscsit_task_reassign_complete_none(cmd, tmr_req);
404 default:
405 pr_err("Unknown cmd->data_direction: 0x%02x\n",
406 cmd->data_direction);
407 return -1;
408 }
409
410 return 0;
411}
412
413static int iscsit_task_reassign_complete(
414 struct iscsi_tmr_req *tmr_req,
415 struct iscsi_conn *conn)
416{
417 struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
418 struct se_cmd *se_cmd;
419 struct iscsi_cmd *cmd;
420 int ret = 0;
421
422 if (!se_tmr->ref_cmd) {
423 pr_err("TMR Request is missing a RefCmd struct iscsi_cmd.\n");
424 return -1;
425 }
426 se_cmd = se_tmr->ref_cmd;
427 cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
428
429 cmd->conn = conn;
430
431 switch (cmd->iscsi_opcode) {
432 case ISCSI_OP_NOOP_OUT:
433 ret = iscsit_task_reassign_complete_nop_out(tmr_req, conn);
434 break;
435 case ISCSI_OP_SCSI_CMD:
436 ret = iscsit_task_reassign_complete_scsi_cmnd(tmr_req, conn);
437 break;
438 default:
439 pr_err("Illegal iSCSI Opcode 0x%02x during"
440 " command realligence\n", cmd->iscsi_opcode);
441 return -1;
442 }
443
444 if (ret != 0)
445 return ret;
446
447 pr_debug("Completed connection realligence for Opcode: 0x%02x,"
448 " ITT: 0x%08x to CID: %hu.\n", cmd->iscsi_opcode,
449 cmd->init_task_tag, conn->cid);
450
451 return 0;
452}
453
454/*
455 * Handles special after-the-fact actions related to TMRs.
456 * Right now the only one that its really needed for is
457 * connection recovery releated TASK_REASSIGN.
458 */
459extern int iscsit_tmr_post_handler(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
460{
461 struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
462 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
463
464 if (tmr_req->task_reassign &&
465 (se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
466 return iscsit_task_reassign_complete(tmr_req, conn);
467
468 return 0;
469}
470
471/*
472 * Nothing to do here, but leave it for good measure. :-)
473 */
474int iscsit_task_reassign_prepare_read(
475 struct iscsi_tmr_req *tmr_req,
476 struct iscsi_conn *conn)
477{
478 return 0;
479}
480
481static void iscsit_task_reassign_prepare_unsolicited_dataout(
482 struct iscsi_cmd *cmd,
483 struct iscsi_conn *conn)
484{
485 int i, j;
486 struct iscsi_pdu *pdu = NULL;
487 struct iscsi_seq *seq = NULL;
488
489 if (conn->sess->sess_ops->DataSequenceInOrder) {
490 cmd->data_sn = 0;
491
492 if (cmd->immediate_data)
493 cmd->r2t_offset += (cmd->first_burst_len -
494 cmd->seq_start_offset);
495
496 if (conn->sess->sess_ops->DataPDUInOrder) {
497 cmd->write_data_done -= (cmd->immediate_data) ?
498 (cmd->first_burst_len -
499 cmd->seq_start_offset) :
500 cmd->first_burst_len;
501 cmd->first_burst_len = 0;
502 return;
503 }
504
505 for (i = 0; i < cmd->pdu_count; i++) {
506 pdu = &cmd->pdu_list[i];
507
508 if (pdu->status != ISCSI_PDU_RECEIVED_OK)
509 continue;
510
511 if ((pdu->offset >= cmd->seq_start_offset) &&
512 ((pdu->offset + pdu->length) <=
513 cmd->seq_end_offset)) {
514 cmd->first_burst_len -= pdu->length;
515 cmd->write_data_done -= pdu->length;
516 pdu->status = ISCSI_PDU_NOT_RECEIVED;
517 }
518 }
519 } else {
520 for (i = 0; i < cmd->seq_count; i++) {
521 seq = &cmd->seq_list[i];
522
523 if (seq->type != SEQTYPE_UNSOLICITED)
524 continue;
525
526 cmd->write_data_done -=
527 (seq->offset - seq->orig_offset);
528 cmd->first_burst_len = 0;
529 seq->data_sn = 0;
530 seq->offset = seq->orig_offset;
531 seq->next_burst_len = 0;
532 seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
533
534 if (conn->sess->sess_ops->DataPDUInOrder)
535 continue;
536
537 for (j = 0; j < seq->pdu_count; j++) {
538 pdu = &cmd->pdu_list[j+seq->pdu_start];
539
540 if (pdu->status != ISCSI_PDU_RECEIVED_OK)
541 continue;
542
543 pdu->status = ISCSI_PDU_NOT_RECEIVED;
544 }
545 }
546 }
547}
548
549int iscsit_task_reassign_prepare_write(
550 struct iscsi_tmr_req *tmr_req,
551 struct iscsi_conn *conn)
552{
553 struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
554 struct se_cmd *se_cmd = se_tmr->ref_cmd;
555 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
556 struct iscsi_pdu *pdu = NULL;
557 struct iscsi_r2t *r2t = NULL, *r2t_tmp;
558 int first_incomplete_r2t = 1, i = 0;
559
560 /*
561 * The command was in the process of receiving Unsolicited DataOUT when
562 * the connection failed.
563 */
564 if (cmd->unsolicited_data)
565 iscsit_task_reassign_prepare_unsolicited_dataout(cmd, conn);
566
567 /*
568 * The Initiator is requesting R2Ts starting from zero, skip
569 * checking acknowledged R2Ts and start checking struct iscsi_r2ts
570 * greater than zero.
571 */
572 if (!tmr_req->exp_data_sn)
573 goto drop_unacknowledged_r2ts;
574
575 /*
576 * We now check that the PDUs in DataOUT sequences below
577 * the TMR TASK_REASSIGN ExpDataSN (R2TSN the Initiator is
578 * expecting next) have all the DataOUT they require to complete
579 * the DataOUT sequence. First scan from R2TSN 0 to TMR
580 * TASK_REASSIGN ExpDataSN-1.
581 *
582 * If we have not received all DataOUT in question, we must
583 * make sure to make the appropriate changes to values in
584 * struct iscsi_cmd (and elsewhere depending on session parameters)
585 * so iscsit_build_r2ts_for_cmd() in iscsit_task_reassign_complete_write()
586 * will resend a new R2T for the DataOUT sequences in question.
587 */
588 spin_lock_bh(&cmd->r2t_lock);
589 if (list_empty(&cmd->cmd_r2t_list)) {
590 spin_unlock_bh(&cmd->r2t_lock);
591 return -1;
592 }
593
594 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
595
596 if (r2t->r2t_sn >= tmr_req->exp_data_sn)
597 continue;
598 /*
599 * Safely ignore Recovery R2Ts and R2Ts that have completed
600 * DataOUT sequences.
601 */
602 if (r2t->seq_complete)
603 continue;
604
605 if (r2t->recovery_r2t)
606 continue;
607
608 /*
609 * DataSequenceInOrder=Yes:
610 *
611 * Taking into account the iSCSI implementation requirement of
612 * MaxOutstandingR2T=1 while ErrorRecoveryLevel>0 and
613 * DataSequenceInOrder=Yes, we must take into consideration
614 * the following:
615 *
616 * DataSequenceInOrder=No:
617 *
618 * Taking into account that the Initiator controls the (possibly
619 * random) PDU Order in (possibly random) Sequence Order of
620 * DataOUT the target requests with R2Ts, we must take into
621 * consideration the following:
622 *
623 * DataPDUInOrder=Yes for DataSequenceInOrder=[Yes,No]:
624 *
625 * While processing non-complete R2T DataOUT sequence requests
626 * the Target will re-request only the total sequence length
627 * minus current received offset. This is because we must
628 * assume the initiator will continue sending DataOUT from the
629 * last PDU before the connection failed.
630 *
631 * DataPDUInOrder=No for DataSequenceInOrder=[Yes,No]:
632 *
633 * While processing non-complete R2T DataOUT sequence requests
634 * the Target will re-request the entire DataOUT sequence if
635 * any single PDU is missing from the sequence. This is because
636 * we have no logical method to determine the next PDU offset,
637 * and we must assume the Initiator will be sending any random
638 * PDU offset in the current sequence after TASK_REASSIGN
639 * has completed.
640 */
641 if (conn->sess->sess_ops->DataSequenceInOrder) {
642 if (!first_incomplete_r2t) {
643 cmd->r2t_offset -= r2t->xfer_len;
644 goto next;
645 }
646
647 if (conn->sess->sess_ops->DataPDUInOrder) {
648 cmd->data_sn = 0;
649 cmd->r2t_offset -= (r2t->xfer_len -
650 cmd->next_burst_len);
651 first_incomplete_r2t = 0;
652 goto next;
653 }
654
655 cmd->data_sn = 0;
656 cmd->r2t_offset -= r2t->xfer_len;
657
658 for (i = 0; i < cmd->pdu_count; i++) {
659 pdu = &cmd->pdu_list[i];
660
661 if (pdu->status != ISCSI_PDU_RECEIVED_OK)
662 continue;
663
664 if ((pdu->offset >= r2t->offset) &&
665 (pdu->offset < (r2t->offset +
666 r2t->xfer_len))) {
667 cmd->next_burst_len -= pdu->length;
668 cmd->write_data_done -= pdu->length;
669 pdu->status = ISCSI_PDU_NOT_RECEIVED;
670 }
671 }
672
673 first_incomplete_r2t = 0;
674 } else {
675 struct iscsi_seq *seq;
676
677 seq = iscsit_get_seq_holder(cmd, r2t->offset,
678 r2t->xfer_len);
679 if (!seq) {
680 spin_unlock_bh(&cmd->r2t_lock);
681 return -1;
682 }
683
684 cmd->write_data_done -=
685 (seq->offset - seq->orig_offset);
686 seq->data_sn = 0;
687 seq->offset = seq->orig_offset;
688 seq->next_burst_len = 0;
689 seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
690
691 cmd->seq_send_order--;
692
693 if (conn->sess->sess_ops->DataPDUInOrder)
694 goto next;
695
696 for (i = 0; i < seq->pdu_count; i++) {
697 pdu = &cmd->pdu_list[i+seq->pdu_start];
698
699 if (pdu->status != ISCSI_PDU_RECEIVED_OK)
700 continue;
701
702 pdu->status = ISCSI_PDU_NOT_RECEIVED;
703 }
704 }
705
706next:
707 cmd->outstanding_r2ts--;
708 }
709 spin_unlock_bh(&cmd->r2t_lock);
710
711 /*
712 * We now drop all unacknowledged R2Ts, ie: ExpDataSN from TMR
713 * TASK_REASSIGN to the last R2T in the list.. We are also careful
714 * to check that the Initiator is not requesting R2Ts for DataOUT
715 * sequences it has already completed.
716 *
717 * Free each R2T in question and adjust values in struct iscsi_cmd
718 * accordingly so iscsit_build_r2ts_for_cmd() do the rest of
719 * the work after the TMR TASK_REASSIGN Response is sent.
720 */
721drop_unacknowledged_r2ts:
722
723 cmd->cmd_flags &= ~ICF_SENT_LAST_R2T;
724 cmd->r2t_sn = tmr_req->exp_data_sn;
725
726 spin_lock_bh(&cmd->r2t_lock);
727 list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list) {
728 /*
729 * Skip up to the R2T Sequence number provided by the
730 * iSCSI TASK_REASSIGN TMR
731 */
732 if (r2t->r2t_sn < tmr_req->exp_data_sn)
733 continue;
734
735 if (r2t->seq_complete) {
736 pr_err("Initiator is requesting R2Ts from"
737 " R2TSN: 0x%08x, but R2TSN: 0x%08x, Offset: %u,"
738 " Length: %u is already complete."
739 " BAD INITIATOR ERL=2 IMPLEMENTATION!\n",
740 tmr_req->exp_data_sn, r2t->r2t_sn,
741 r2t->offset, r2t->xfer_len);
742 spin_unlock_bh(&cmd->r2t_lock);
743 return -1;
744 }
745
746 if (r2t->recovery_r2t) {
747 iscsit_free_r2t(r2t, cmd);
748 continue;
749 }
750
751 /* DataSequenceInOrder=Yes:
752 *
753 * Taking into account the iSCSI implementation requirement of
754 * MaxOutstandingR2T=1 while ErrorRecoveryLevel>0 and
755 * DataSequenceInOrder=Yes, it's safe to subtract the R2Ts
756 * entire transfer length from the commands R2T offset marker.
757 *
758 * DataSequenceInOrder=No:
759 *
760 * We subtract the difference from struct iscsi_seq between the
761 * current offset and original offset from cmd->write_data_done
762 * for account for DataOUT PDUs already received. Then reset
763 * the current offset to the original and zero out the current
764 * burst length, to make sure we re-request the entire DataOUT
765 * sequence.
766 */
767 if (conn->sess->sess_ops->DataSequenceInOrder)
768 cmd->r2t_offset -= r2t->xfer_len;
769 else
770 cmd->seq_send_order--;
771
772 cmd->outstanding_r2ts--;
773 iscsit_free_r2t(r2t, cmd);
774 }
775 spin_unlock_bh(&cmd->r2t_lock);
776
777 return 0;
778}
779
780/*
781 * Performs sanity checks TMR TASK_REASSIGN's ExpDataSN for
782 * a given struct iscsi_cmd.
783 */
784int iscsit_check_task_reassign_expdatasn(
785 struct iscsi_tmr_req *tmr_req,
786 struct iscsi_conn *conn)
787{
788 struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
789 struct se_cmd *se_cmd = se_tmr->ref_cmd;
790 struct iscsi_cmd *ref_cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
791
792 if (ref_cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD)
793 return 0;
794
795 if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
796 return 0;
797
798 if (ref_cmd->data_direction == DMA_NONE)
799 return 0;
800
801 /*
802 * For READs the TMR TASK_REASSIGNs ExpDataSN contains the next DataSN
803 * of DataIN the Initiator is expecting.
804 *
805 * Also check that the Initiator is not re-requesting DataIN that has
806 * already been acknowledged with a DataAck SNACK.
807 */
808 if (ref_cmd->data_direction == DMA_FROM_DEVICE) {
809 if (tmr_req->exp_data_sn > ref_cmd->data_sn) {
810 pr_err("Received ExpDataSN: 0x%08x for READ"
811 " in TMR TASK_REASSIGN greater than command's"
812 " DataSN: 0x%08x.\n", tmr_req->exp_data_sn,
813 ref_cmd->data_sn);
814 return -1;
815 }
816 if ((ref_cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
817 (tmr_req->exp_data_sn <= ref_cmd->acked_data_sn)) {
818 pr_err("Received ExpDataSN: 0x%08x for READ"
819 " in TMR TASK_REASSIGN for previously"
820 " acknowledged DataIN: 0x%08x,"
821 " protocol error\n", tmr_req->exp_data_sn,
822 ref_cmd->acked_data_sn);
823 return -1;
824 }
825 return iscsit_task_reassign_prepare_read(tmr_req, conn);
826 }
827
828 /*
829 * For WRITEs the TMR TASK_REASSIGNs ExpDataSN contains the next R2TSN
830 * for R2Ts the Initiator is expecting.
831 *
832 * Do the magic in iscsit_task_reassign_prepare_write().
833 */
834 if (ref_cmd->data_direction == DMA_TO_DEVICE) {
835 if (tmr_req->exp_data_sn > ref_cmd->r2t_sn) {
836 pr_err("Received ExpDataSN: 0x%08x for WRITE"
837 " in TMR TASK_REASSIGN greater than command's"
838 " R2TSN: 0x%08x.\n", tmr_req->exp_data_sn,
839 ref_cmd->r2t_sn);
840 return -1;
841 }
842 return iscsit_task_reassign_prepare_write(tmr_req, conn);
843 }
844
845 pr_err("Unknown iSCSI data_direction: 0x%02x\n",
846 ref_cmd->data_direction);
847
848 return -1;
849}
diff --git a/drivers/target/iscsi/iscsi_target_tmr.h b/drivers/target/iscsi/iscsi_target_tmr.h
new file mode 100644
index 000000000000..142e992cb097
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tmr.h
@@ -0,0 +1,14 @@
1#ifndef ISCSI_TARGET_TMR_H
2#define ISCSI_TARGET_TMR_H
3
4extern u8 iscsit_tmr_abort_task(struct iscsi_cmd *, unsigned char *);
5extern int iscsit_tmr_task_warm_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
6 unsigned char *);
7extern int iscsit_tmr_task_cold_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
8 unsigned char *);
9extern u8 iscsit_tmr_task_reassign(struct iscsi_cmd *, unsigned char *);
10extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
11extern int iscsit_check_task_reassign_expdatasn(struct iscsi_tmr_req *,
12 struct iscsi_conn *);
13
14#endif /* ISCSI_TARGET_TMR_H */
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
new file mode 100644
index 000000000000..d4cf2cd25c44
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -0,0 +1,759 @@
1/*******************************************************************************
2 * This file contains iSCSI Target Portal Group related functions.
3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 ******************************************************************************/
20
21#include <target/target_core_base.h>
22#include <target/target_core_transport.h>
23#include <target/target_core_fabric_ops.h>
24#include <target/target_core_configfs.h>
25#include <target/target_core_tpg.h>
26
27#include "iscsi_target_core.h"
28#include "iscsi_target_erl0.h"
29#include "iscsi_target_login.h"
30#include "iscsi_target_nodeattrib.h"
31#include "iscsi_target_tpg.h"
32#include "iscsi_target_util.h"
33#include "iscsi_target.h"
34#include "iscsi_target_parameters.h"
35
36struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *tiqn, u16 tpgt)
37{
38 struct iscsi_portal_group *tpg;
39
40 tpg = kzalloc(sizeof(struct iscsi_portal_group), GFP_KERNEL);
41 if (!tpg) {
42 pr_err("Unable to allocate struct iscsi_portal_group\n");
43 return NULL;
44 }
45
46 tpg->tpgt = tpgt;
47 tpg->tpg_state = TPG_STATE_FREE;
48 tpg->tpg_tiqn = tiqn;
49 INIT_LIST_HEAD(&tpg->tpg_gnp_list);
50 INIT_LIST_HEAD(&tpg->tpg_list);
51 mutex_init(&tpg->tpg_access_lock);
52 mutex_init(&tpg->np_login_lock);
53 spin_lock_init(&tpg->tpg_state_lock);
54 spin_lock_init(&tpg->tpg_np_lock);
55
56 return tpg;
57}
58
59static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *);
60
61int iscsit_load_discovery_tpg(void)
62{
63 struct iscsi_param *param;
64 struct iscsi_portal_group *tpg;
65 int ret;
66
67 tpg = iscsit_alloc_portal_group(NULL, 1);
68 if (!tpg) {
69 pr_err("Unable to allocate struct iscsi_portal_group\n");
70 return -1;
71 }
72
73 ret = core_tpg_register(
74 &lio_target_fabric_configfs->tf_ops,
75 NULL, &tpg->tpg_se_tpg, (void *)tpg,
76 TRANSPORT_TPG_TYPE_DISCOVERY);
77 if (ret < 0) {
78 kfree(tpg);
79 return -1;
80 }
81
82 tpg->sid = 1; /* First Assigned LIO Session ID */
83 iscsit_set_default_tpg_attribs(tpg);
84
85 if (iscsi_create_default_params(&tpg->param_list) < 0)
86 goto out;
87 /*
88 * By default we disable authentication for discovery sessions,
89 * this can be changed with:
90 *
91 * /sys/kernel/config/target/iscsi/discovery_auth/enforce_discovery_auth
92 */
93 param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
94 if (!param)
95 goto out;
96
97 if (iscsi_update_param_value(param, "CHAP,None") < 0)
98 goto out;
99
100 tpg->tpg_attrib.authentication = 0;
101
102 spin_lock(&tpg->tpg_state_lock);
103 tpg->tpg_state = TPG_STATE_ACTIVE;
104 spin_unlock(&tpg->tpg_state_lock);
105
106 iscsit_global->discovery_tpg = tpg;
107 pr_debug("CORE[0] - Allocated Discovery TPG\n");
108
109 return 0;
110out:
111 if (tpg->sid == 1)
112 core_tpg_deregister(&tpg->tpg_se_tpg);
113 kfree(tpg);
114 return -1;
115}
116
117void iscsit_release_discovery_tpg(void)
118{
119 struct iscsi_portal_group *tpg = iscsit_global->discovery_tpg;
120
121 if (!tpg)
122 return;
123
124 core_tpg_deregister(&tpg->tpg_se_tpg);
125
126 kfree(tpg);
127 iscsit_global->discovery_tpg = NULL;
128}
129
130struct iscsi_portal_group *iscsit_get_tpg_from_np(
131 struct iscsi_tiqn *tiqn,
132 struct iscsi_np *np)
133{
134 struct iscsi_portal_group *tpg = NULL;
135 struct iscsi_tpg_np *tpg_np;
136
137 spin_lock(&tiqn->tiqn_tpg_lock);
138 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
139
140 spin_lock(&tpg->tpg_state_lock);
141 if (tpg->tpg_state == TPG_STATE_FREE) {
142 spin_unlock(&tpg->tpg_state_lock);
143 continue;
144 }
145 spin_unlock(&tpg->tpg_state_lock);
146
147 spin_lock(&tpg->tpg_np_lock);
148 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
149 if (tpg_np->tpg_np == np) {
150 spin_unlock(&tpg->tpg_np_lock);
151 spin_unlock(&tiqn->tiqn_tpg_lock);
152 return tpg;
153 }
154 }
155 spin_unlock(&tpg->tpg_np_lock);
156 }
157 spin_unlock(&tiqn->tiqn_tpg_lock);
158
159 return NULL;
160}
161
162int iscsit_get_tpg(
163 struct iscsi_portal_group *tpg)
164{
165 int ret;
166
167 ret = mutex_lock_interruptible(&tpg->tpg_access_lock);
168 return ((ret != 0) || signal_pending(current)) ? -1 : 0;
169}
170
171void iscsit_put_tpg(struct iscsi_portal_group *tpg)
172{
173 mutex_unlock(&tpg->tpg_access_lock);
174}
175
176static void iscsit_clear_tpg_np_login_thread(
177 struct iscsi_tpg_np *tpg_np,
178 struct iscsi_portal_group *tpg)
179{
180 if (!tpg_np->tpg_np) {
181 pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
182 return;
183 }
184
185 iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg);
186}
187
188void iscsit_clear_tpg_np_login_threads(
189 struct iscsi_portal_group *tpg)
190{
191 struct iscsi_tpg_np *tpg_np;
192
193 spin_lock(&tpg->tpg_np_lock);
194 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
195 if (!tpg_np->tpg_np) {
196 pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
197 continue;
198 }
199 spin_unlock(&tpg->tpg_np_lock);
200 iscsit_clear_tpg_np_login_thread(tpg_np, tpg);
201 spin_lock(&tpg->tpg_np_lock);
202 }
203 spin_unlock(&tpg->tpg_np_lock);
204}
205
206void iscsit_tpg_dump_params(struct iscsi_portal_group *tpg)
207{
208 iscsi_print_params(tpg->param_list);
209}
210
211static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
212{
213 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
214
215 a->authentication = TA_AUTHENTICATION;
216 a->login_timeout = TA_LOGIN_TIMEOUT;
217 a->netif_timeout = TA_NETIF_TIMEOUT;
218 a->default_cmdsn_depth = TA_DEFAULT_CMDSN_DEPTH;
219 a->generate_node_acls = TA_GENERATE_NODE_ACLS;
220 a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS;
221 a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT;
222 a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT;
223}
224
225int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
226{
227 if (tpg->tpg_state != TPG_STATE_FREE) {
228 pr_err("Unable to add iSCSI Target Portal Group: %d"
229 " while not in TPG_STATE_FREE state.\n", tpg->tpgt);
230 return -EEXIST;
231 }
232 iscsit_set_default_tpg_attribs(tpg);
233
234 if (iscsi_create_default_params(&tpg->param_list) < 0)
235 goto err_out;
236
237 ISCSI_TPG_ATTRIB(tpg)->tpg = tpg;
238
239 spin_lock(&tpg->tpg_state_lock);
240 tpg->tpg_state = TPG_STATE_INACTIVE;
241 spin_unlock(&tpg->tpg_state_lock);
242
243 spin_lock(&tiqn->tiqn_tpg_lock);
244 list_add_tail(&tpg->tpg_list, &tiqn->tiqn_tpg_list);
245 tiqn->tiqn_ntpgs++;
246 pr_debug("CORE[%s]_TPG[%hu] - Added iSCSI Target Portal Group\n",
247 tiqn->tiqn, tpg->tpgt);
248 spin_unlock(&tiqn->tiqn_tpg_lock);
249
250 return 0;
251err_out:
252 if (tpg->param_list) {
253 iscsi_release_param_list(tpg->param_list);
254 tpg->param_list = NULL;
255 }
256 kfree(tpg);
257 return -ENOMEM;
258}
259
260int iscsit_tpg_del_portal_group(
261 struct iscsi_tiqn *tiqn,
262 struct iscsi_portal_group *tpg,
263 int force)
264{
265 u8 old_state = tpg->tpg_state;
266
267 spin_lock(&tpg->tpg_state_lock);
268 tpg->tpg_state = TPG_STATE_INACTIVE;
269 spin_unlock(&tpg->tpg_state_lock);
270
271 if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
272 pr_err("Unable to delete iSCSI Target Portal Group:"
273 " %hu while active sessions exist, and force=0\n",
274 tpg->tpgt);
275 tpg->tpg_state = old_state;
276 return -EPERM;
277 }
278
279 core_tpg_clear_object_luns(&tpg->tpg_se_tpg);
280
281 if (tpg->param_list) {
282 iscsi_release_param_list(tpg->param_list);
283 tpg->param_list = NULL;
284 }
285
286 core_tpg_deregister(&tpg->tpg_se_tpg);
287
288 spin_lock(&tpg->tpg_state_lock);
289 tpg->tpg_state = TPG_STATE_FREE;
290 spin_unlock(&tpg->tpg_state_lock);
291
292 spin_lock(&tiqn->tiqn_tpg_lock);
293 tiqn->tiqn_ntpgs--;
294 list_del(&tpg->tpg_list);
295 spin_unlock(&tiqn->tiqn_tpg_lock);
296
297 pr_debug("CORE[%s]_TPG[%hu] - Deleted iSCSI Target Portal Group\n",
298 tiqn->tiqn, tpg->tpgt);
299
300 kfree(tpg);
301 return 0;
302}
303
304int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
305{
306 struct iscsi_param *param;
307 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
308
309 spin_lock(&tpg->tpg_state_lock);
310 if (tpg->tpg_state == TPG_STATE_ACTIVE) {
311 pr_err("iSCSI target portal group: %hu is already"
312 " active, ignoring request.\n", tpg->tpgt);
313 spin_unlock(&tpg->tpg_state_lock);
314 return -EINVAL;
315 }
316 /*
317 * Make sure that AuthMethod does not contain None as an option
318 * unless explictly disabled. Set the default to CHAP if authentication
319 * is enforced (as per default), and remove the NONE option.
320 */
321 param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
322 if (!param) {
323 spin_unlock(&tpg->tpg_state_lock);
324 return -ENOMEM;
325 }
326
327 if (ISCSI_TPG_ATTRIB(tpg)->authentication) {
328 if (!strcmp(param->value, NONE))
329 if (iscsi_update_param_value(param, CHAP) < 0) {
330 spin_unlock(&tpg->tpg_state_lock);
331 return -ENOMEM;
332 }
333 if (iscsit_ta_authentication(tpg, 1) < 0) {
334 spin_unlock(&tpg->tpg_state_lock);
335 return -ENOMEM;
336 }
337 }
338
339 tpg->tpg_state = TPG_STATE_ACTIVE;
340 spin_unlock(&tpg->tpg_state_lock);
341
342 spin_lock(&tiqn->tiqn_tpg_lock);
343 tiqn->tiqn_active_tpgs++;
344 pr_debug("iSCSI_TPG[%hu] - Enabled iSCSI Target Portal Group\n",
345 tpg->tpgt);
346 spin_unlock(&tiqn->tiqn_tpg_lock);
347
348 return 0;
349}
350
351int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force)
352{
353 struct iscsi_tiqn *tiqn;
354 u8 old_state = tpg->tpg_state;
355
356 spin_lock(&tpg->tpg_state_lock);
357 if (tpg->tpg_state == TPG_STATE_INACTIVE) {
358 pr_err("iSCSI Target Portal Group: %hu is already"
359 " inactive, ignoring request.\n", tpg->tpgt);
360 spin_unlock(&tpg->tpg_state_lock);
361 return -EINVAL;
362 }
363 tpg->tpg_state = TPG_STATE_INACTIVE;
364 spin_unlock(&tpg->tpg_state_lock);
365
366 iscsit_clear_tpg_np_login_threads(tpg);
367
368 if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
369 spin_lock(&tpg->tpg_state_lock);
370 tpg->tpg_state = old_state;
371 spin_unlock(&tpg->tpg_state_lock);
372 pr_err("Unable to disable iSCSI Target Portal Group:"
373 " %hu while active sessions exist, and force=0\n",
374 tpg->tpgt);
375 return -EPERM;
376 }
377
378 tiqn = tpg->tpg_tiqn;
379 if (!tiqn || (tpg == iscsit_global->discovery_tpg))
380 return 0;
381
382 spin_lock(&tiqn->tiqn_tpg_lock);
383 tiqn->tiqn_active_tpgs--;
384 pr_debug("iSCSI_TPG[%hu] - Disabled iSCSI Target Portal Group\n",
385 tpg->tpgt);
386 spin_unlock(&tiqn->tiqn_tpg_lock);
387
388 return 0;
389}
390
391struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(
392 struct iscsi_session *sess)
393{
394 struct se_session *se_sess = sess->se_sess;
395 struct se_node_acl *se_nacl = se_sess->se_node_acl;
396 struct iscsi_node_acl *acl = container_of(se_nacl, struct iscsi_node_acl,
397 se_node_acl);
398
399 return &acl->node_attrib;
400}
401
402struct iscsi_tpg_np *iscsit_tpg_locate_child_np(
403 struct iscsi_tpg_np *tpg_np,
404 int network_transport)
405{
406 struct iscsi_tpg_np *tpg_np_child, *tpg_np_child_tmp;
407
408 spin_lock(&tpg_np->tpg_np_parent_lock);
409 list_for_each_entry_safe(tpg_np_child, tpg_np_child_tmp,
410 &tpg_np->tpg_np_parent_list, tpg_np_child_list) {
411 if (tpg_np_child->tpg_np->np_network_transport ==
412 network_transport) {
413 spin_unlock(&tpg_np->tpg_np_parent_lock);
414 return tpg_np_child;
415 }
416 }
417 spin_unlock(&tpg_np->tpg_np_parent_lock);
418
419 return NULL;
420}
421
422struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
423 struct iscsi_portal_group *tpg,
424 struct __kernel_sockaddr_storage *sockaddr,
425 char *ip_str,
426 struct iscsi_tpg_np *tpg_np_parent,
427 int network_transport)
428{
429 struct iscsi_np *np;
430 struct iscsi_tpg_np *tpg_np;
431
432 tpg_np = kzalloc(sizeof(struct iscsi_tpg_np), GFP_KERNEL);
433 if (!tpg_np) {
434 pr_err("Unable to allocate memory for"
435 " struct iscsi_tpg_np.\n");
436 return ERR_PTR(-ENOMEM);
437 }
438
439 np = iscsit_add_np(sockaddr, ip_str, network_transport);
440 if (IS_ERR(np)) {
441 kfree(tpg_np);
442 return ERR_CAST(np);
443 }
444
445 INIT_LIST_HEAD(&tpg_np->tpg_np_list);
446 INIT_LIST_HEAD(&tpg_np->tpg_np_child_list);
447 INIT_LIST_HEAD(&tpg_np->tpg_np_parent_list);
448 spin_lock_init(&tpg_np->tpg_np_parent_lock);
449 tpg_np->tpg_np = np;
450 tpg_np->tpg = tpg;
451
452 spin_lock(&tpg->tpg_np_lock);
453 list_add_tail(&tpg_np->tpg_np_list, &tpg->tpg_gnp_list);
454 tpg->num_tpg_nps++;
455 if (tpg->tpg_tiqn)
456 tpg->tpg_tiqn->tiqn_num_tpg_nps++;
457 spin_unlock(&tpg->tpg_np_lock);
458
459 if (tpg_np_parent) {
460 tpg_np->tpg_np_parent = tpg_np_parent;
461 spin_lock(&tpg_np_parent->tpg_np_parent_lock);
462 list_add_tail(&tpg_np->tpg_np_child_list,
463 &tpg_np_parent->tpg_np_parent_list);
464 spin_unlock(&tpg_np_parent->tpg_np_parent_lock);
465 }
466
467 pr_debug("CORE[%s] - Added Network Portal: %s:%hu,%hu on %s\n",
468 tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
469 (np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP");
470
471 return tpg_np;
472}
473
474static int iscsit_tpg_release_np(
475 struct iscsi_tpg_np *tpg_np,
476 struct iscsi_portal_group *tpg,
477 struct iscsi_np *np)
478{
479 iscsit_clear_tpg_np_login_thread(tpg_np, tpg);
480
481 pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n",
482 tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
483 (np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP");
484
485 tpg_np->tpg_np = NULL;
486 tpg_np->tpg = NULL;
487 kfree(tpg_np);
488 /*
489 * iscsit_del_np() will shutdown struct iscsi_np when last TPG reference is released.
490 */
491 return iscsit_del_np(np);
492}
493
494int iscsit_tpg_del_network_portal(
495 struct iscsi_portal_group *tpg,
496 struct iscsi_tpg_np *tpg_np)
497{
498 struct iscsi_np *np;
499 struct iscsi_tpg_np *tpg_np_child, *tpg_np_child_tmp;
500 int ret = 0;
501
502 np = tpg_np->tpg_np;
503 if (!np) {
504 pr_err("Unable to locate struct iscsi_np from"
505 " struct iscsi_tpg_np\n");
506 return -EINVAL;
507 }
508
509 if (!tpg_np->tpg_np_parent) {
510 /*
511 * We are the parent tpg network portal. Release all of the
512 * child tpg_np's (eg: the non ISCSI_TCP ones) on our parent
513 * list first.
514 */
515 list_for_each_entry_safe(tpg_np_child, tpg_np_child_tmp,
516 &tpg_np->tpg_np_parent_list,
517 tpg_np_child_list) {
518 ret = iscsit_tpg_del_network_portal(tpg, tpg_np_child);
519 if (ret < 0)
520 pr_err("iscsit_tpg_del_network_portal()"
521 " failed: %d\n", ret);
522 }
523 } else {
524 /*
525 * We are not the parent ISCSI_TCP tpg network portal. Release
526 * our own network portals from the child list.
527 */
528 spin_lock(&tpg_np->tpg_np_parent->tpg_np_parent_lock);
529 list_del(&tpg_np->tpg_np_child_list);
530 spin_unlock(&tpg_np->tpg_np_parent->tpg_np_parent_lock);
531 }
532
533 spin_lock(&tpg->tpg_np_lock);
534 list_del(&tpg_np->tpg_np_list);
535 tpg->num_tpg_nps--;
536 if (tpg->tpg_tiqn)
537 tpg->tpg_tiqn->tiqn_num_tpg_nps--;
538 spin_unlock(&tpg->tpg_np_lock);
539
540 return iscsit_tpg_release_np(tpg_np, tpg, np);
541}
542
543int iscsit_tpg_set_initiator_node_queue_depth(
544 struct iscsi_portal_group *tpg,
545 unsigned char *initiatorname,
546 u32 queue_depth,
547 int force)
548{
549 return core_tpg_set_initiator_node_queue_depth(&tpg->tpg_se_tpg,
550 initiatorname, queue_depth, force);
551}
552
553int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
554{
555 unsigned char buf1[256], buf2[256], *none = NULL;
556 int len;
557 struct iscsi_param *param;
558 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
559
560 if ((authentication != 1) && (authentication != 0)) {
561 pr_err("Illegal value for authentication parameter:"
562 " %u, ignoring request.\n", authentication);
563 return -1;
564 }
565
566 memset(buf1, 0, sizeof(buf1));
567 memset(buf2, 0, sizeof(buf2));
568
569 param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
570 if (!param)
571 return -EINVAL;
572
573 if (authentication) {
574 snprintf(buf1, sizeof(buf1), "%s", param->value);
575 none = strstr(buf1, NONE);
576 if (!none)
577 goto out;
578 if (!strncmp(none + 4, ",", 1)) {
579 if (!strcmp(buf1, none))
580 sprintf(buf2, "%s", none+5);
581 else {
582 none--;
583 *none = '\0';
584 len = sprintf(buf2, "%s", buf1);
585 none += 5;
586 sprintf(buf2 + len, "%s", none);
587 }
588 } else {
589 none--;
590 *none = '\0';
591 sprintf(buf2, "%s", buf1);
592 }
593 if (iscsi_update_param_value(param, buf2) < 0)
594 return -EINVAL;
595 } else {
596 snprintf(buf1, sizeof(buf1), "%s", param->value);
597 none = strstr(buf1, NONE);
598 if ((none))
599 goto out;
600 strncat(buf1, ",", strlen(","));
601 strncat(buf1, NONE, strlen(NONE));
602 if (iscsi_update_param_value(param, buf1) < 0)
603 return -EINVAL;
604 }
605
606out:
607 a->authentication = authentication;
608 pr_debug("%s iSCSI Authentication Methods for TPG: %hu.\n",
609 a->authentication ? "Enforcing" : "Disabling", tpg->tpgt);
610
611 return 0;
612}
613
614int iscsit_ta_login_timeout(
615 struct iscsi_portal_group *tpg,
616 u32 login_timeout)
617{
618 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
619
620 if (login_timeout > TA_LOGIN_TIMEOUT_MAX) {
621 pr_err("Requested Login Timeout %u larger than maximum"
622 " %u\n", login_timeout, TA_LOGIN_TIMEOUT_MAX);
623 return -EINVAL;
624 } else if (login_timeout < TA_LOGIN_TIMEOUT_MIN) {
625 pr_err("Requested Logout Timeout %u smaller than"
626 " minimum %u\n", login_timeout, TA_LOGIN_TIMEOUT_MIN);
627 return -EINVAL;
628 }
629
630 a->login_timeout = login_timeout;
631 pr_debug("Set Logout Timeout to %u for Target Portal Group"
632 " %hu\n", a->login_timeout, tpg->tpgt);
633
634 return 0;
635}
636
637int iscsit_ta_netif_timeout(
638 struct iscsi_portal_group *tpg,
639 u32 netif_timeout)
640{
641 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
642
643 if (netif_timeout > TA_NETIF_TIMEOUT_MAX) {
644 pr_err("Requested Network Interface Timeout %u larger"
645 " than maximum %u\n", netif_timeout,
646 TA_NETIF_TIMEOUT_MAX);
647 return -EINVAL;
648 } else if (netif_timeout < TA_NETIF_TIMEOUT_MIN) {
649 pr_err("Requested Network Interface Timeout %u smaller"
650 " than minimum %u\n", netif_timeout,
651 TA_NETIF_TIMEOUT_MIN);
652 return -EINVAL;
653 }
654
655 a->netif_timeout = netif_timeout;
656 pr_debug("Set Network Interface Timeout to %u for"
657 " Target Portal Group %hu\n", a->netif_timeout, tpg->tpgt);
658
659 return 0;
660}
661
662int iscsit_ta_generate_node_acls(
663 struct iscsi_portal_group *tpg,
664 u32 flag)
665{
666 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
667
668 if ((flag != 0) && (flag != 1)) {
669 pr_err("Illegal value %d\n", flag);
670 return -EINVAL;
671 }
672
673 a->generate_node_acls = flag;
674 pr_debug("iSCSI_TPG[%hu] - Generate Initiator Portal Group ACLs: %s\n",
675 tpg->tpgt, (a->generate_node_acls) ? "Enabled" : "Disabled");
676
677 return 0;
678}
679
680int iscsit_ta_default_cmdsn_depth(
681 struct iscsi_portal_group *tpg,
682 u32 tcq_depth)
683{
684 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
685
686 if (tcq_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) {
687 pr_err("Requested Default Queue Depth: %u larger"
688 " than maximum %u\n", tcq_depth,
689 TA_DEFAULT_CMDSN_DEPTH_MAX);
690 return -EINVAL;
691 } else if (tcq_depth < TA_DEFAULT_CMDSN_DEPTH_MIN) {
692 pr_err("Requested Default Queue Depth: %u smaller"
693 " than minimum %u\n", tcq_depth,
694 TA_DEFAULT_CMDSN_DEPTH_MIN);
695 return -EINVAL;
696 }
697
698 a->default_cmdsn_depth = tcq_depth;
699 pr_debug("iSCSI_TPG[%hu] - Set Default CmdSN TCQ Depth to %u\n",
700 tpg->tpgt, a->default_cmdsn_depth);
701
702 return 0;
703}
704
705int iscsit_ta_cache_dynamic_acls(
706 struct iscsi_portal_group *tpg,
707 u32 flag)
708{
709 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
710
711 if ((flag != 0) && (flag != 1)) {
712 pr_err("Illegal value %d\n", flag);
713 return -EINVAL;
714 }
715
716 a->cache_dynamic_acls = flag;
717 pr_debug("iSCSI_TPG[%hu] - Cache Dynamic Initiator Portal Group"
718 " ACLs %s\n", tpg->tpgt, (a->cache_dynamic_acls) ?
719 "Enabled" : "Disabled");
720
721 return 0;
722}
723
724int iscsit_ta_demo_mode_write_protect(
725 struct iscsi_portal_group *tpg,
726 u32 flag)
727{
728 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
729
730 if ((flag != 0) && (flag != 1)) {
731 pr_err("Illegal value %d\n", flag);
732 return -EINVAL;
733 }
734
735 a->demo_mode_write_protect = flag;
736 pr_debug("iSCSI_TPG[%hu] - Demo Mode Write Protect bit: %s\n",
737 tpg->tpgt, (a->demo_mode_write_protect) ? "ON" : "OFF");
738
739 return 0;
740}
741
742int iscsit_ta_prod_mode_write_protect(
743 struct iscsi_portal_group *tpg,
744 u32 flag)
745{
746 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
747
748 if ((flag != 0) && (flag != 1)) {
749 pr_err("Illegal value %d\n", flag);
750 return -EINVAL;
751 }
752
753 a->prod_mode_write_protect = flag;
754 pr_debug("iSCSI_TPG[%hu] - Production Mode Write Protect bit:"
755 " %s\n", tpg->tpgt, (a->prod_mode_write_protect) ?
756 "ON" : "OFF");
757
758 return 0;
759}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
new file mode 100644
index 000000000000..dda48c141a8c
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -0,0 +1,41 @@
1#ifndef ISCSI_TARGET_TPG_H
2#define ISCSI_TARGET_TPG_H
3
4extern struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *, u16);
5extern int iscsit_load_discovery_tpg(void);
6extern void iscsit_release_discovery_tpg(void);
7extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *,
8 struct iscsi_np *);
9extern int iscsit_get_tpg(struct iscsi_portal_group *);
10extern void iscsit_put_tpg(struct iscsi_portal_group *);
11extern void iscsit_clear_tpg_np_login_threads(struct iscsi_portal_group *);
12extern void iscsit_tpg_dump_params(struct iscsi_portal_group *);
13extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *);
14extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *,
15 int);
16extern int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *);
17extern int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *, int);
18extern struct iscsi_node_acl *iscsit_tpg_add_initiator_node_acl(
19 struct iscsi_portal_group *, const char *, u32);
20extern void iscsit_tpg_del_initiator_node_acl(struct iscsi_portal_group *,
21 struct se_node_acl *);
22extern struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(struct iscsi_session *);
23extern void iscsit_tpg_del_external_nps(struct iscsi_tpg_np *);
24extern struct iscsi_tpg_np *iscsit_tpg_locate_child_np(struct iscsi_tpg_np *, int);
25extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_group *,
26 struct __kernel_sockaddr_storage *, char *, struct iscsi_tpg_np *,
27 int);
28extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *,
29 struct iscsi_tpg_np *);
30extern int iscsit_tpg_set_initiator_node_queue_depth(struct iscsi_portal_group *,
31 unsigned char *, u32, int);
32extern int iscsit_ta_authentication(struct iscsi_portal_group *, u32);
33extern int iscsit_ta_login_timeout(struct iscsi_portal_group *, u32);
34extern int iscsit_ta_netif_timeout(struct iscsi_portal_group *, u32);
35extern int iscsit_ta_generate_node_acls(struct iscsi_portal_group *, u32);
36extern int iscsit_ta_default_cmdsn_depth(struct iscsi_portal_group *, u32);
37extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32);
38extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32);
39extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32);
40
41#endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c
new file mode 100644
index 000000000000..0baac5bcebd4
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tq.c
@@ -0,0 +1,551 @@
1/*******************************************************************************
2 * This file contains the iSCSI Login Thread and Thread Queue functions.
3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 ******************************************************************************/
20
21#include <linux/kthread.h>
22#include <linux/list.h>
23#include <linux/bitmap.h>
24
25#include "iscsi_target_core.h"
26#include "iscsi_target_tq.h"
27#include "iscsi_target.h"
28
29static LIST_HEAD(active_ts_list);
30static LIST_HEAD(inactive_ts_list);
31static DEFINE_SPINLOCK(active_ts_lock);
32static DEFINE_SPINLOCK(inactive_ts_lock);
33static DEFINE_SPINLOCK(ts_bitmap_lock);
34
35static void iscsi_add_ts_to_active_list(struct iscsi_thread_set *ts)
36{
37 spin_lock(&active_ts_lock);
38 list_add_tail(&ts->ts_list, &active_ts_list);
39 iscsit_global->active_ts++;
40 spin_unlock(&active_ts_lock);
41}
42
43extern void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
44{
45 spin_lock(&inactive_ts_lock);
46 list_add_tail(&ts->ts_list, &inactive_ts_list);
47 iscsit_global->inactive_ts++;
48 spin_unlock(&inactive_ts_lock);
49}
50
51static void iscsi_del_ts_from_active_list(struct iscsi_thread_set *ts)
52{
53 spin_lock(&active_ts_lock);
54 list_del(&ts->ts_list);
55 iscsit_global->active_ts--;
56 spin_unlock(&active_ts_lock);
57}
58
59static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
60{
61 struct iscsi_thread_set *ts;
62
63 spin_lock(&inactive_ts_lock);
64 if (list_empty(&inactive_ts_list)) {
65 spin_unlock(&inactive_ts_lock);
66 return NULL;
67 }
68
69 list_for_each_entry(ts, &inactive_ts_list, ts_list)
70 break;
71
72 list_del(&ts->ts_list);
73 iscsit_global->inactive_ts--;
74 spin_unlock(&inactive_ts_lock);
75
76 return ts;
77}
78
79extern int iscsi_allocate_thread_sets(u32 thread_pair_count)
80{
81 int allocated_thread_pair_count = 0, i, thread_id;
82 struct iscsi_thread_set *ts = NULL;
83
84 for (i = 0; i < thread_pair_count; i++) {
85 ts = kzalloc(sizeof(struct iscsi_thread_set), GFP_KERNEL);
86 if (!ts) {
87 pr_err("Unable to allocate memory for"
88 " thread set.\n");
89 return allocated_thread_pair_count;
90 }
91 /*
92 * Locate the next available regision in the thread_set_bitmap
93 */
94 spin_lock(&ts_bitmap_lock);
95 thread_id = bitmap_find_free_region(iscsit_global->ts_bitmap,
96 iscsit_global->ts_bitmap_count, get_order(1));
97 spin_unlock(&ts_bitmap_lock);
98 if (thread_id < 0) {
99 pr_err("bitmap_find_free_region() failed for"
100 " thread_set_bitmap\n");
101 kfree(ts);
102 return allocated_thread_pair_count;
103 }
104
105 ts->thread_id = thread_id;
106 ts->status = ISCSI_THREAD_SET_FREE;
107 INIT_LIST_HEAD(&ts->ts_list);
108 spin_lock_init(&ts->ts_state_lock);
109 init_completion(&ts->rx_post_start_comp);
110 init_completion(&ts->tx_post_start_comp);
111 init_completion(&ts->rx_restart_comp);
112 init_completion(&ts->tx_restart_comp);
113 init_completion(&ts->rx_start_comp);
114 init_completion(&ts->tx_start_comp);
115
116 ts->create_threads = 1;
117 ts->tx_thread = kthread_run(iscsi_target_tx_thread, ts, "%s",
118 ISCSI_TX_THREAD_NAME);
119 if (IS_ERR(ts->tx_thread)) {
120 dump_stack();
121 pr_err("Unable to start iscsi_target_tx_thread\n");
122 break;
123 }
124
125 ts->rx_thread = kthread_run(iscsi_target_rx_thread, ts, "%s",
126 ISCSI_RX_THREAD_NAME);
127 if (IS_ERR(ts->rx_thread)) {
128 kthread_stop(ts->tx_thread);
129 pr_err("Unable to start iscsi_target_rx_thread\n");
130 break;
131 }
132 ts->create_threads = 0;
133
134 iscsi_add_ts_to_inactive_list(ts);
135 allocated_thread_pair_count++;
136 }
137
138 pr_debug("Spawned %d thread set(s) (%d total threads).\n",
139 allocated_thread_pair_count, allocated_thread_pair_count * 2);
140 return allocated_thread_pair_count;
141}
142
143extern void iscsi_deallocate_thread_sets(void)
144{
145 u32 released_count = 0;
146 struct iscsi_thread_set *ts = NULL;
147
148 while ((ts = iscsi_get_ts_from_inactive_list())) {
149
150 spin_lock_bh(&ts->ts_state_lock);
151 ts->status = ISCSI_THREAD_SET_DIE;
152 spin_unlock_bh(&ts->ts_state_lock);
153
154 if (ts->rx_thread) {
155 send_sig(SIGINT, ts->rx_thread, 1);
156 kthread_stop(ts->rx_thread);
157 }
158 if (ts->tx_thread) {
159 send_sig(SIGINT, ts->tx_thread, 1);
160 kthread_stop(ts->tx_thread);
161 }
162 /*
163 * Release this thread_id in the thread_set_bitmap
164 */
165 spin_lock(&ts_bitmap_lock);
166 bitmap_release_region(iscsit_global->ts_bitmap,
167 ts->thread_id, get_order(1));
168 spin_unlock(&ts_bitmap_lock);
169
170 released_count++;
171 kfree(ts);
172 }
173
174 if (released_count)
175 pr_debug("Stopped %d thread set(s) (%d total threads)."
176 "\n", released_count, released_count * 2);
177}
178
179static void iscsi_deallocate_extra_thread_sets(void)
180{
181 u32 orig_count, released_count = 0;
182 struct iscsi_thread_set *ts = NULL;
183
184 orig_count = TARGET_THREAD_SET_COUNT;
185
186 while ((iscsit_global->inactive_ts + 1) > orig_count) {
187 ts = iscsi_get_ts_from_inactive_list();
188 if (!ts)
189 break;
190
191 spin_lock_bh(&ts->ts_state_lock);
192 ts->status = ISCSI_THREAD_SET_DIE;
193 spin_unlock_bh(&ts->ts_state_lock);
194
195 if (ts->rx_thread) {
196 send_sig(SIGINT, ts->rx_thread, 1);
197 kthread_stop(ts->rx_thread);
198 }
199 if (ts->tx_thread) {
200 send_sig(SIGINT, ts->tx_thread, 1);
201 kthread_stop(ts->tx_thread);
202 }
203 /*
204 * Release this thread_id in the thread_set_bitmap
205 */
206 spin_lock(&ts_bitmap_lock);
207 bitmap_release_region(iscsit_global->ts_bitmap,
208 ts->thread_id, get_order(1));
209 spin_unlock(&ts_bitmap_lock);
210
211 released_count++;
212 kfree(ts);
213 }
214
215 if (released_count) {
216 pr_debug("Stopped %d thread set(s) (%d total threads)."
217 "\n", released_count, released_count * 2);
218 }
219}
220
221void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
222{
223 iscsi_add_ts_to_active_list(ts);
224
225 spin_lock_bh(&ts->ts_state_lock);
226 conn->thread_set = ts;
227 ts->conn = conn;
228 spin_unlock_bh(&ts->ts_state_lock);
229 /*
230 * Start up the RX thread and wait on rx_post_start_comp. The RX
231 * Thread will then do the same for the TX Thread in
232 * iscsi_rx_thread_pre_handler().
233 */
234 complete(&ts->rx_start_comp);
235 wait_for_completion(&ts->rx_post_start_comp);
236}
237
238struct iscsi_thread_set *iscsi_get_thread_set(void)
239{
240 int allocate_ts = 0;
241 struct completion comp;
242 struct iscsi_thread_set *ts = NULL;
243 /*
244 * If no inactive thread set is available on the first call to
245 * iscsi_get_ts_from_inactive_list(), sleep for a second and
246 * try again. If still none are available after two attempts,
247 * allocate a set ourselves.
248 */
249get_set:
250 ts = iscsi_get_ts_from_inactive_list();
251 if (!ts) {
252 if (allocate_ts == 2)
253 iscsi_allocate_thread_sets(1);
254
255 init_completion(&comp);
256 wait_for_completion_timeout(&comp, 1 * HZ);
257
258 allocate_ts++;
259 goto get_set;
260 }
261
262 ts->delay_inactive = 1;
263 ts->signal_sent = 0;
264 ts->thread_count = 2;
265 init_completion(&ts->rx_restart_comp);
266 init_completion(&ts->tx_restart_comp);
267
268 return ts;
269}
270
271void iscsi_set_thread_clear(struct iscsi_conn *conn, u8 thread_clear)
272{
273 struct iscsi_thread_set *ts = NULL;
274
275 if (!conn->thread_set) {
276 pr_err("struct iscsi_conn->thread_set is NULL\n");
277 return;
278 }
279 ts = conn->thread_set;
280
281 spin_lock_bh(&ts->ts_state_lock);
282 ts->thread_clear &= ~thread_clear;
283
284 if ((thread_clear & ISCSI_CLEAR_RX_THREAD) &&
285 (ts->blocked_threads & ISCSI_BLOCK_RX_THREAD))
286 complete(&ts->rx_restart_comp);
287 else if ((thread_clear & ISCSI_CLEAR_TX_THREAD) &&
288 (ts->blocked_threads & ISCSI_BLOCK_TX_THREAD))
289 complete(&ts->tx_restart_comp);
290 spin_unlock_bh(&ts->ts_state_lock);
291}
292
293void iscsi_set_thread_set_signal(struct iscsi_conn *conn, u8 signal_sent)
294{
295 struct iscsi_thread_set *ts = NULL;
296
297 if (!conn->thread_set) {
298 pr_err("struct iscsi_conn->thread_set is NULL\n");
299 return;
300 }
301 ts = conn->thread_set;
302
303 spin_lock_bh(&ts->ts_state_lock);
304 ts->signal_sent |= signal_sent;
305 spin_unlock_bh(&ts->ts_state_lock);
306}
307
308int iscsi_release_thread_set(struct iscsi_conn *conn)
309{
310 int thread_called = 0;
311 struct iscsi_thread_set *ts = NULL;
312
313 if (!conn || !conn->thread_set) {
314 pr_err("connection or thread set pointer is NULL\n");
315 BUG();
316 }
317 ts = conn->thread_set;
318
319 spin_lock_bh(&ts->ts_state_lock);
320 ts->status = ISCSI_THREAD_SET_RESET;
321
322 if (!strncmp(current->comm, ISCSI_RX_THREAD_NAME,
323 strlen(ISCSI_RX_THREAD_NAME)))
324 thread_called = ISCSI_RX_THREAD;
325 else if (!strncmp(current->comm, ISCSI_TX_THREAD_NAME,
326 strlen(ISCSI_TX_THREAD_NAME)))
327 thread_called = ISCSI_TX_THREAD;
328
329 if (ts->rx_thread && (thread_called == ISCSI_TX_THREAD) &&
330 (ts->thread_clear & ISCSI_CLEAR_RX_THREAD)) {
331
332 if (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD)) {
333 send_sig(SIGINT, ts->rx_thread, 1);
334 ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD;
335 }
336 ts->blocked_threads |= ISCSI_BLOCK_RX_THREAD;
337 spin_unlock_bh(&ts->ts_state_lock);
338 wait_for_completion(&ts->rx_restart_comp);
339 spin_lock_bh(&ts->ts_state_lock);
340 ts->blocked_threads &= ~ISCSI_BLOCK_RX_THREAD;
341 }
342 if (ts->tx_thread && (thread_called == ISCSI_RX_THREAD) &&
343 (ts->thread_clear & ISCSI_CLEAR_TX_THREAD)) {
344
345 if (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD)) {
346 send_sig(SIGINT, ts->tx_thread, 1);
347 ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD;
348 }
349 ts->blocked_threads |= ISCSI_BLOCK_TX_THREAD;
350 spin_unlock_bh(&ts->ts_state_lock);
351 wait_for_completion(&ts->tx_restart_comp);
352 spin_lock_bh(&ts->ts_state_lock);
353 ts->blocked_threads &= ~ISCSI_BLOCK_TX_THREAD;
354 }
355
356 ts->conn = NULL;
357 ts->status = ISCSI_THREAD_SET_FREE;
358 spin_unlock_bh(&ts->ts_state_lock);
359
360 return 0;
361}
362
363int iscsi_thread_set_force_reinstatement(struct iscsi_conn *conn)
364{
365 struct iscsi_thread_set *ts;
366
367 if (!conn->thread_set)
368 return -1;
369 ts = conn->thread_set;
370
371 spin_lock_bh(&ts->ts_state_lock);
372 if (ts->status != ISCSI_THREAD_SET_ACTIVE) {
373 spin_unlock_bh(&ts->ts_state_lock);
374 return -1;
375 }
376
377 if (ts->tx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD))) {
378 send_sig(SIGINT, ts->tx_thread, 1);
379 ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD;
380 }
381 if (ts->rx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD))) {
382 send_sig(SIGINT, ts->rx_thread, 1);
383 ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD;
384 }
385 spin_unlock_bh(&ts->ts_state_lock);
386
387 return 0;
388}
389
390static void iscsi_check_to_add_additional_sets(void)
391{
392 int thread_sets_add;
393
394 spin_lock(&inactive_ts_lock);
395 thread_sets_add = iscsit_global->inactive_ts;
396 spin_unlock(&inactive_ts_lock);
397 if (thread_sets_add == 1)
398 iscsi_allocate_thread_sets(1);
399}
400
401static int iscsi_signal_thread_pre_handler(struct iscsi_thread_set *ts)
402{
403 spin_lock_bh(&ts->ts_state_lock);
404 if ((ts->status == ISCSI_THREAD_SET_DIE) || signal_pending(current)) {
405 spin_unlock_bh(&ts->ts_state_lock);
406 return -1;
407 }
408 spin_unlock_bh(&ts->ts_state_lock);
409
410 return 0;
411}
412
413struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts)
414{
415 int ret;
416
417 spin_lock_bh(&ts->ts_state_lock);
418 if (ts->create_threads) {
419 spin_unlock_bh(&ts->ts_state_lock);
420 goto sleep;
421 }
422
423 flush_signals(current);
424
425 if (ts->delay_inactive && (--ts->thread_count == 0)) {
426 spin_unlock_bh(&ts->ts_state_lock);
427 iscsi_del_ts_from_active_list(ts);
428
429 if (!iscsit_global->in_shutdown)
430 iscsi_deallocate_extra_thread_sets();
431
432 iscsi_add_ts_to_inactive_list(ts);
433 spin_lock_bh(&ts->ts_state_lock);
434 }
435
436 if ((ts->status == ISCSI_THREAD_SET_RESET) &&
437 (ts->thread_clear & ISCSI_CLEAR_RX_THREAD))
438 complete(&ts->rx_restart_comp);
439
440 ts->thread_clear &= ~ISCSI_CLEAR_RX_THREAD;
441 spin_unlock_bh(&ts->ts_state_lock);
442sleep:
443 ret = wait_for_completion_interruptible(&ts->rx_start_comp);
444 if (ret != 0)
445 return NULL;
446
447 if (iscsi_signal_thread_pre_handler(ts) < 0)
448 return NULL;
449
450 if (!ts->conn) {
451 pr_err("struct iscsi_thread_set->conn is NULL for"
452 " thread_id: %d, going back to sleep\n", ts->thread_id);
453 goto sleep;
454 }
455 iscsi_check_to_add_additional_sets();
456 /*
457 * The RX Thread starts up the TX Thread and sleeps.
458 */
459 ts->thread_clear |= ISCSI_CLEAR_RX_THREAD;
460 complete(&ts->tx_start_comp);
461 wait_for_completion(&ts->tx_post_start_comp);
462
463 return ts->conn;
464}
465
466struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts)
467{
468 int ret;
469
470 spin_lock_bh(&ts->ts_state_lock);
471 if (ts->create_threads) {
472 spin_unlock_bh(&ts->ts_state_lock);
473 goto sleep;
474 }
475
476 flush_signals(current);
477
478 if (ts->delay_inactive && (--ts->thread_count == 0)) {
479 spin_unlock_bh(&ts->ts_state_lock);
480 iscsi_del_ts_from_active_list(ts);
481
482 if (!iscsit_global->in_shutdown)
483 iscsi_deallocate_extra_thread_sets();
484
485 iscsi_add_ts_to_inactive_list(ts);
486 spin_lock_bh(&ts->ts_state_lock);
487 }
488 if ((ts->status == ISCSI_THREAD_SET_RESET) &&
489 (ts->thread_clear & ISCSI_CLEAR_TX_THREAD))
490 complete(&ts->tx_restart_comp);
491
492 ts->thread_clear &= ~ISCSI_CLEAR_TX_THREAD;
493 spin_unlock_bh(&ts->ts_state_lock);
494sleep:
495 ret = wait_for_completion_interruptible(&ts->tx_start_comp);
496 if (ret != 0)
497 return NULL;
498
499 if (iscsi_signal_thread_pre_handler(ts) < 0)
500 return NULL;
501
502 if (!ts->conn) {
503 pr_err("struct iscsi_thread_set->conn is NULL for "
504 " thread_id: %d, going back to sleep\n",
505 ts->thread_id);
506 goto sleep;
507 }
508
509 iscsi_check_to_add_additional_sets();
510 /*
511 * From the TX thread, up the tx_post_start_comp that the RX Thread is
512 * sleeping on in iscsi_rx_thread_pre_handler(), then up the
513 * rx_post_start_comp that iscsi_activate_thread_set() is sleeping on.
514 */
515 ts->thread_clear |= ISCSI_CLEAR_TX_THREAD;
516 complete(&ts->tx_post_start_comp);
517 complete(&ts->rx_post_start_comp);
518
519 spin_lock_bh(&ts->ts_state_lock);
520 ts->status = ISCSI_THREAD_SET_ACTIVE;
521 spin_unlock_bh(&ts->ts_state_lock);
522
523 return ts->conn;
524}
525
526int iscsi_thread_set_init(void)
527{
528 int size;
529
530 iscsit_global->ts_bitmap_count = ISCSI_TS_BITMAP_BITS;
531
532 size = BITS_TO_LONGS(iscsit_global->ts_bitmap_count) * sizeof(long);
533 iscsit_global->ts_bitmap = kzalloc(size, GFP_KERNEL);
534 if (!iscsit_global->ts_bitmap) {
535 pr_err("Unable to allocate iscsit_global->ts_bitmap\n");
536 return -ENOMEM;
537 }
538
539 spin_lock_init(&active_ts_lock);
540 spin_lock_init(&inactive_ts_lock);
541 spin_lock_init(&ts_bitmap_lock);
542 INIT_LIST_HEAD(&active_ts_list);
543 INIT_LIST_HEAD(&inactive_ts_list);
544
545 return 0;
546}
547
548void iscsi_thread_set_free(void)
549{
550 kfree(iscsit_global->ts_bitmap);
551}
diff --git a/drivers/target/iscsi/iscsi_target_tq.h b/drivers/target/iscsi/iscsi_target_tq.h
new file mode 100644
index 000000000000..26e6a95ec203
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tq.h
@@ -0,0 +1,88 @@
1#ifndef ISCSI_THREAD_QUEUE_H
2#define ISCSI_THREAD_QUEUE_H
3
4/*
5 * Defines for thread sets.
6 */
7extern int iscsi_thread_set_force_reinstatement(struct iscsi_conn *);
8extern void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *);
9extern int iscsi_allocate_thread_sets(u32);
10extern void iscsi_deallocate_thread_sets(void);
11extern void iscsi_activate_thread_set(struct iscsi_conn *, struct iscsi_thread_set *);
12extern struct iscsi_thread_set *iscsi_get_thread_set(void);
13extern void iscsi_set_thread_clear(struct iscsi_conn *, u8);
14extern void iscsi_set_thread_set_signal(struct iscsi_conn *, u8);
15extern int iscsi_release_thread_set(struct iscsi_conn *);
16extern struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *);
17extern struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *);
18extern int iscsi_thread_set_init(void);
19extern void iscsi_thread_set_free(void);
20
21extern int iscsi_target_tx_thread(void *);
22extern int iscsi_target_rx_thread(void *);
23
24#define TARGET_THREAD_SET_COUNT 4
25
26#define ISCSI_RX_THREAD 1
27#define ISCSI_TX_THREAD 2
28#define ISCSI_RX_THREAD_NAME "iscsi_trx"
29#define ISCSI_TX_THREAD_NAME "iscsi_ttx"
30#define ISCSI_BLOCK_RX_THREAD 0x1
31#define ISCSI_BLOCK_TX_THREAD 0x2
32#define ISCSI_CLEAR_RX_THREAD 0x1
33#define ISCSI_CLEAR_TX_THREAD 0x2
34#define ISCSI_SIGNAL_RX_THREAD 0x1
35#define ISCSI_SIGNAL_TX_THREAD 0x2
36
37/* struct iscsi_thread_set->status */
38#define ISCSI_THREAD_SET_FREE 1
39#define ISCSI_THREAD_SET_ACTIVE 2
40#define ISCSI_THREAD_SET_DIE 3
41#define ISCSI_THREAD_SET_RESET 4
42#define ISCSI_THREAD_SET_DEALLOCATE_THREADS 5
43
44/* By default allow a maximum of 32K iSCSI connections */
45#define ISCSI_TS_BITMAP_BITS 32768
46
47struct iscsi_thread_set {
48 /* flags used for blocking and restarting sets */
49 int blocked_threads;
50 /* flag for creating threads */
51 int create_threads;
52 /* flag for delaying readding to inactive list */
53 int delay_inactive;
54 /* status for thread set */
55 int status;
56 /* which threads have had signals sent */
57 int signal_sent;
58 /* flag for which threads exited first */
59 int thread_clear;
60 /* Active threads in the thread set */
61 int thread_count;
62 /* Unique thread ID */
63 u32 thread_id;
64 /* pointer to connection if set is active */
65 struct iscsi_conn *conn;
66 /* used for controlling ts state accesses */
67 spinlock_t ts_state_lock;
68 /* Used for rx side post startup */
69 struct completion rx_post_start_comp;
70 /* Used for tx side post startup */
71 struct completion tx_post_start_comp;
72 /* used for restarting thread queue */
73 struct completion rx_restart_comp;
74 /* used for restarting thread queue */
75 struct completion tx_restart_comp;
76 /* used for normal unused blocking */
77 struct completion rx_start_comp;
78 /* used for normal unused blocking */
79 struct completion tx_start_comp;
80 /* OS descriptor for rx thread */
81 struct task_struct *rx_thread;
82 /* OS descriptor for tx thread */
83 struct task_struct *tx_thread;
84 /* struct iscsi_thread_set in list list head*/
85 struct list_head ts_list;
86};
87
88#endif /*** ISCSI_THREAD_QUEUE_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
new file mode 100644
index 000000000000..a1acb0167902
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -0,0 +1,1819 @@
1/*******************************************************************************
2 * This file contains the iSCSI Target specific utility functions.
3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 ******************************************************************************/
20
21#include <linux/list.h>
22#include <scsi/scsi_tcq.h>
23#include <scsi/iscsi_proto.h>
24#include <target/target_core_base.h>
25#include <target/target_core_transport.h>
26#include <target/target_core_tmr.h>
27#include <target/target_core_fabric_ops.h>
28#include <target/target_core_configfs.h>
29
30#include "iscsi_target_core.h"
31#include "iscsi_target_parameters.h"
32#include "iscsi_target_seq_pdu_list.h"
33#include "iscsi_target_datain_values.h"
34#include "iscsi_target_erl0.h"
35#include "iscsi_target_erl1.h"
36#include "iscsi_target_erl2.h"
37#include "iscsi_target_tpg.h"
38#include "iscsi_target_tq.h"
39#include "iscsi_target_util.h"
40#include "iscsi_target.h"
41
42#define PRINT_BUFF(buff, len) \
43{ \
44 int zzz; \
45 \
46 pr_debug("%d:\n", __LINE__); \
47 for (zzz = 0; zzz < len; zzz++) { \
48 if (zzz % 16 == 0) { \
49 if (zzz) \
50 pr_debug("\n"); \
51 pr_debug("%4i: ", zzz); \
52 } \
53 pr_debug("%02x ", (unsigned char) (buff)[zzz]); \
54 } \
55 if ((len + 1) % 16) \
56 pr_debug("\n"); \
57}
58
59extern struct list_head g_tiqn_list;
60extern spinlock_t tiqn_lock;
61
62/*
63 * Called with cmd->r2t_lock held.
64 */
65int iscsit_add_r2t_to_list(
66 struct iscsi_cmd *cmd,
67 u32 offset,
68 u32 xfer_len,
69 int recovery,
70 u32 r2t_sn)
71{
72 struct iscsi_r2t *r2t;
73
74 r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC);
75 if (!r2t) {
76 pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
77 return -1;
78 }
79 INIT_LIST_HEAD(&r2t->r2t_list);
80
81 r2t->recovery_r2t = recovery;
82 r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn;
83 r2t->offset = offset;
84 r2t->xfer_len = xfer_len;
85 list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list);
86 spin_unlock_bh(&cmd->r2t_lock);
87
88 iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
89
90 spin_lock_bh(&cmd->r2t_lock);
91 return 0;
92}
93
94struct iscsi_r2t *iscsit_get_r2t_for_eos(
95 struct iscsi_cmd *cmd,
96 u32 offset,
97 u32 length)
98{
99 struct iscsi_r2t *r2t;
100
101 spin_lock_bh(&cmd->r2t_lock);
102 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
103 if ((r2t->offset <= offset) &&
104 (r2t->offset + r2t->xfer_len) >= (offset + length)) {
105 spin_unlock_bh(&cmd->r2t_lock);
106 return r2t;
107 }
108 }
109 spin_unlock_bh(&cmd->r2t_lock);
110
111 pr_err("Unable to locate R2T for Offset: %u, Length:"
112 " %u\n", offset, length);
113 return NULL;
114}
115
116struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd)
117{
118 struct iscsi_r2t *r2t;
119
120 spin_lock_bh(&cmd->r2t_lock);
121 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
122 if (!r2t->sent_r2t) {
123 spin_unlock_bh(&cmd->r2t_lock);
124 return r2t;
125 }
126 }
127 spin_unlock_bh(&cmd->r2t_lock);
128
129 pr_err("Unable to locate next R2T to send for ITT:"
130 " 0x%08x.\n", cmd->init_task_tag);
131 return NULL;
132}
133
134/*
135 * Called with cmd->r2t_lock held.
136 */
137void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd)
138{
139 list_del(&r2t->r2t_list);
140 kmem_cache_free(lio_r2t_cache, r2t);
141}
142
143void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
144{
145 struct iscsi_r2t *r2t, *r2t_tmp;
146
147 spin_lock_bh(&cmd->r2t_lock);
148 list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list)
149 iscsit_free_r2t(r2t, cmd);
150 spin_unlock_bh(&cmd->r2t_lock);
151}
152
153/*
154 * May be called from software interrupt (timer) context for allocating
155 * iSCSI NopINs.
156 */
157struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
158{
159 struct iscsi_cmd *cmd;
160
161 cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask);
162 if (!cmd) {
163 pr_err("Unable to allocate memory for struct iscsi_cmd.\n");
164 return NULL;
165 }
166
167 cmd->conn = conn;
168 INIT_LIST_HEAD(&cmd->i_list);
169 INIT_LIST_HEAD(&cmd->datain_list);
170 INIT_LIST_HEAD(&cmd->cmd_r2t_list);
171 init_completion(&cmd->reject_comp);
172 spin_lock_init(&cmd->datain_lock);
173 spin_lock_init(&cmd->dataout_timeout_lock);
174 spin_lock_init(&cmd->istate_lock);
175 spin_lock_init(&cmd->error_lock);
176 spin_lock_init(&cmd->r2t_lock);
177
178 return cmd;
179}
180
181/*
182 * Called from iscsi_handle_scsi_cmd()
183 */
184struct iscsi_cmd *iscsit_allocate_se_cmd(
185 struct iscsi_conn *conn,
186 u32 data_length,
187 int data_direction,
188 int iscsi_task_attr)
189{
190 struct iscsi_cmd *cmd;
191 struct se_cmd *se_cmd;
192 int sam_task_attr;
193
194 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
195 if (!cmd)
196 return NULL;
197
198 cmd->data_direction = data_direction;
199 cmd->data_length = data_length;
200 /*
201 * Figure out the SAM Task Attribute for the incoming SCSI CDB
202 */
203 if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
204 (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
205 sam_task_attr = MSG_SIMPLE_TAG;
206 else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
207 sam_task_attr = MSG_ORDERED_TAG;
208 else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
209 sam_task_attr = MSG_HEAD_TAG;
210 else if (iscsi_task_attr == ISCSI_ATTR_ACA)
211 sam_task_attr = MSG_ACA_TAG;
212 else {
213 pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
214 " MSG_SIMPLE_TAG\n", iscsi_task_attr);
215 sam_task_attr = MSG_SIMPLE_TAG;
216 }
217
218 se_cmd = &cmd->se_cmd;
219 /*
220 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
221 */
222 transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
223 conn->sess->se_sess, data_length, data_direction,
224 sam_task_attr, &cmd->sense_buffer[0]);
225 return cmd;
226}
227
228struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
229 struct iscsi_conn *conn,
230 u8 function)
231{
232 struct iscsi_cmd *cmd;
233 struct se_cmd *se_cmd;
234 u8 tcm_function;
235
236 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
237 if (!cmd)
238 return NULL;
239
240 cmd->data_direction = DMA_NONE;
241
242 cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL);
243 if (!cmd->tmr_req) {
244 pr_err("Unable to allocate memory for"
245 " Task Management command!\n");
246 return NULL;
247 }
248 /*
249 * TASK_REASSIGN for ERL=2 / connection stays inside of
250 * LIO-Target $FABRIC_MOD
251 */
252 if (function == ISCSI_TM_FUNC_TASK_REASSIGN)
253 return cmd;
254
255 se_cmd = &cmd->se_cmd;
256 /*
257 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
258 */
259 transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
260 conn->sess->se_sess, 0, DMA_NONE,
261 MSG_SIMPLE_TAG, &cmd->sense_buffer[0]);
262
263 switch (function) {
264 case ISCSI_TM_FUNC_ABORT_TASK:
265 tcm_function = TMR_ABORT_TASK;
266 break;
267 case ISCSI_TM_FUNC_ABORT_TASK_SET:
268 tcm_function = TMR_ABORT_TASK_SET;
269 break;
270 case ISCSI_TM_FUNC_CLEAR_ACA:
271 tcm_function = TMR_CLEAR_ACA;
272 break;
273 case ISCSI_TM_FUNC_CLEAR_TASK_SET:
274 tcm_function = TMR_CLEAR_TASK_SET;
275 break;
276 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
277 tcm_function = TMR_LUN_RESET;
278 break;
279 case ISCSI_TM_FUNC_TARGET_WARM_RESET:
280 tcm_function = TMR_TARGET_WARM_RESET;
281 break;
282 case ISCSI_TM_FUNC_TARGET_COLD_RESET:
283 tcm_function = TMR_TARGET_COLD_RESET;
284 break;
285 default:
286 pr_err("Unknown iSCSI TMR Function:"
287 " 0x%02x\n", function);
288 goto out;
289 }
290
291 se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd,
292 (void *)cmd->tmr_req, tcm_function);
293 if (!se_cmd->se_tmr_req)
294 goto out;
295
296 cmd->tmr_req->se_tmr_req = se_cmd->se_tmr_req;
297
298 return cmd;
299out:
300 iscsit_release_cmd(cmd);
301 if (se_cmd)
302 transport_free_se_cmd(se_cmd);
303 return NULL;
304}
305
306int iscsit_decide_list_to_build(
307 struct iscsi_cmd *cmd,
308 u32 immediate_data_length)
309{
310 struct iscsi_build_list bl;
311 struct iscsi_conn *conn = cmd->conn;
312 struct iscsi_session *sess = conn->sess;
313 struct iscsi_node_attrib *na;
314
315 if (sess->sess_ops->DataSequenceInOrder &&
316 sess->sess_ops->DataPDUInOrder)
317 return 0;
318
319 if (cmd->data_direction == DMA_NONE)
320 return 0;
321
322 na = iscsit_tpg_get_node_attrib(sess);
323 memset(&bl, 0, sizeof(struct iscsi_build_list));
324
325 if (cmd->data_direction == DMA_FROM_DEVICE) {
326 bl.data_direction = ISCSI_PDU_READ;
327 bl.type = PDULIST_NORMAL;
328 if (na->random_datain_pdu_offsets)
329 bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS;
330 if (na->random_datain_seq_offsets)
331 bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS;
332 } else {
333 bl.data_direction = ISCSI_PDU_WRITE;
334 bl.immediate_data_length = immediate_data_length;
335 if (na->random_r2t_offsets)
336 bl.randomize |= RANDOM_R2T_OFFSETS;
337
338 if (!cmd->immediate_data && !cmd->unsolicited_data)
339 bl.type = PDULIST_NORMAL;
340 else if (cmd->immediate_data && !cmd->unsolicited_data)
341 bl.type = PDULIST_IMMEDIATE;
342 else if (!cmd->immediate_data && cmd->unsolicited_data)
343 bl.type = PDULIST_UNSOLICITED;
344 else if (cmd->immediate_data && cmd->unsolicited_data)
345 bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED;
346 }
347
348 return iscsit_do_build_list(cmd, &bl);
349}
350
351struct iscsi_seq *iscsit_get_seq_holder_for_datain(
352 struct iscsi_cmd *cmd,
353 u32 seq_send_order)
354{
355 u32 i;
356
357 for (i = 0; i < cmd->seq_count; i++)
358 if (cmd->seq_list[i].seq_send_order == seq_send_order)
359 return &cmd->seq_list[i];
360
361 return NULL;
362}
363
364struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd)
365{
366 u32 i;
367
368 if (!cmd->seq_list) {
369 pr_err("struct iscsi_cmd->seq_list is NULL!\n");
370 return NULL;
371 }
372
373 for (i = 0; i < cmd->seq_count; i++) {
374 if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
375 continue;
376 if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) {
377 cmd->seq_send_order++;
378 return &cmd->seq_list[i];
379 }
380 }
381
382 return NULL;
383}
384
385struct iscsi_r2t *iscsit_get_holder_for_r2tsn(
386 struct iscsi_cmd *cmd,
387 u32 r2t_sn)
388{
389 struct iscsi_r2t *r2t;
390
391 spin_lock_bh(&cmd->r2t_lock);
392 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
393 if (r2t->r2t_sn == r2t_sn) {
394 spin_unlock_bh(&cmd->r2t_lock);
395 return r2t;
396 }
397 }
398 spin_unlock_bh(&cmd->r2t_lock);
399
400 return NULL;
401}
402
403static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn)
404{
405 int ret;
406
407 /*
408 * This is the proper method of checking received CmdSN against
409 * ExpCmdSN and MaxCmdSN values, as well as accounting for out
410 * or order CmdSNs due to multiple connection sessions and/or
411 * CRC failures.
412 */
413 if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) {
414 pr_err("Received CmdSN: 0x%08x is greater than"
415 " MaxCmdSN: 0x%08x, protocol error.\n", cmdsn,
416 sess->max_cmd_sn);
417 ret = CMDSN_ERROR_CANNOT_RECOVER;
418
419 } else if (cmdsn == sess->exp_cmd_sn) {
420 sess->exp_cmd_sn++;
421 pr_debug("Received CmdSN matches ExpCmdSN,"
422 " incremented ExpCmdSN to: 0x%08x\n",
423 sess->exp_cmd_sn);
424 ret = CMDSN_NORMAL_OPERATION;
425
426 } else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) {
427 pr_debug("Received CmdSN: 0x%08x is greater"
428 " than ExpCmdSN: 0x%08x, not acknowledging.\n",
429 cmdsn, sess->exp_cmd_sn);
430 ret = CMDSN_HIGHER_THAN_EXP;
431
432 } else {
433 pr_err("Received CmdSN: 0x%08x is less than"
434 " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn,
435 sess->exp_cmd_sn);
436 ret = CMDSN_LOWER_THAN_EXP;
437 }
438
439 return ret;
440}
441
442/*
443 * Commands may be received out of order if MC/S is in use.
444 * Ensure they are executed in CmdSN order.
445 */
446int iscsit_sequence_cmd(
447 struct iscsi_conn *conn,
448 struct iscsi_cmd *cmd,
449 u32 cmdsn)
450{
451 int ret;
452 int cmdsn_ret;
453
454 mutex_lock(&conn->sess->cmdsn_mutex);
455
456 cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, cmdsn);
457 switch (cmdsn_ret) {
458 case CMDSN_NORMAL_OPERATION:
459 ret = iscsit_execute_cmd(cmd, 0);
460 if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list))
461 iscsit_execute_ooo_cmdsns(conn->sess);
462 break;
463 case CMDSN_HIGHER_THAN_EXP:
464 ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, cmdsn);
465 break;
466 case CMDSN_LOWER_THAN_EXP:
467 cmd->i_state = ISTATE_REMOVE;
468 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
469 ret = cmdsn_ret;
470 break;
471 default:
472 ret = cmdsn_ret;
473 break;
474 }
475 mutex_unlock(&conn->sess->cmdsn_mutex);
476
477 return ret;
478}
479
480int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
481{
482 struct iscsi_conn *conn = cmd->conn;
483 struct se_cmd *se_cmd = &cmd->se_cmd;
484 struct iscsi_data *hdr = (struct iscsi_data *) buf;
485 u32 payload_length = ntoh24(hdr->dlength);
486
487 if (conn->sess->sess_ops->InitialR2T) {
488 pr_err("Received unexpected unsolicited data"
489 " while InitialR2T=Yes, protocol error.\n");
490 transport_send_check_condition_and_sense(se_cmd,
491 TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
492 return -1;
493 }
494
495 if ((cmd->first_burst_len + payload_length) >
496 conn->sess->sess_ops->FirstBurstLength) {
497 pr_err("Total %u bytes exceeds FirstBurstLength: %u"
498 " for this Unsolicited DataOut Burst.\n",
499 (cmd->first_burst_len + payload_length),
500 conn->sess->sess_ops->FirstBurstLength);
501 transport_send_check_condition_and_sense(se_cmd,
502 TCM_INCORRECT_AMOUNT_OF_DATA, 0);
503 return -1;
504 }
505
506 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
507 return 0;
508
509 if (((cmd->first_burst_len + payload_length) != cmd->data_length) &&
510 ((cmd->first_burst_len + payload_length) !=
511 conn->sess->sess_ops->FirstBurstLength)) {
512 pr_err("Unsolicited non-immediate data received %u"
513 " does not equal FirstBurstLength: %u, and does"
514 " not equal ExpXferLen %u.\n",
515 (cmd->first_burst_len + payload_length),
516 conn->sess->sess_ops->FirstBurstLength, cmd->data_length);
517 transport_send_check_condition_and_sense(se_cmd,
518 TCM_INCORRECT_AMOUNT_OF_DATA, 0);
519 return -1;
520 }
521 return 0;
522}
523
524struct iscsi_cmd *iscsit_find_cmd_from_itt(
525 struct iscsi_conn *conn,
526 u32 init_task_tag)
527{
528 struct iscsi_cmd *cmd;
529
530 spin_lock_bh(&conn->cmd_lock);
531 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
532 if (cmd->init_task_tag == init_task_tag) {
533 spin_unlock_bh(&conn->cmd_lock);
534 return cmd;
535 }
536 }
537 spin_unlock_bh(&conn->cmd_lock);
538
539 pr_err("Unable to locate ITT: 0x%08x on CID: %hu",
540 init_task_tag, conn->cid);
541 return NULL;
542}
543
544struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
545 struct iscsi_conn *conn,
546 u32 init_task_tag,
547 u32 length)
548{
549 struct iscsi_cmd *cmd;
550
551 spin_lock_bh(&conn->cmd_lock);
552 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
553 if (cmd->init_task_tag == init_task_tag) {
554 spin_unlock_bh(&conn->cmd_lock);
555 return cmd;
556 }
557 }
558 spin_unlock_bh(&conn->cmd_lock);
559
560 pr_err("Unable to locate ITT: 0x%08x on CID: %hu,"
561 " dumping payload\n", init_task_tag, conn->cid);
562 if (length)
563 iscsit_dump_data_payload(conn, length, 1);
564
565 return NULL;
566}
567
568struct iscsi_cmd *iscsit_find_cmd_from_ttt(
569 struct iscsi_conn *conn,
570 u32 targ_xfer_tag)
571{
572 struct iscsi_cmd *cmd = NULL;
573
574 spin_lock_bh(&conn->cmd_lock);
575 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
576 if (cmd->targ_xfer_tag == targ_xfer_tag) {
577 spin_unlock_bh(&conn->cmd_lock);
578 return cmd;
579 }
580 }
581 spin_unlock_bh(&conn->cmd_lock);
582
583 pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n",
584 targ_xfer_tag, conn->cid);
585 return NULL;
586}
587
588int iscsit_find_cmd_for_recovery(
589 struct iscsi_session *sess,
590 struct iscsi_cmd **cmd_ptr,
591 struct iscsi_conn_recovery **cr_ptr,
592 u32 init_task_tag)
593{
594 struct iscsi_cmd *cmd = NULL;
595 struct iscsi_conn_recovery *cr;
596 /*
597 * Scan through the inactive connection recovery list's command list.
598 * If init_task_tag matches the command is still alligent.
599 */
600 spin_lock(&sess->cr_i_lock);
601 list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
602 spin_lock(&cr->conn_recovery_cmd_lock);
603 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
604 if (cmd->init_task_tag == init_task_tag) {
605 spin_unlock(&cr->conn_recovery_cmd_lock);
606 spin_unlock(&sess->cr_i_lock);
607
608 *cr_ptr = cr;
609 *cmd_ptr = cmd;
610 return -2;
611 }
612 }
613 spin_unlock(&cr->conn_recovery_cmd_lock);
614 }
615 spin_unlock(&sess->cr_i_lock);
616 /*
617 * Scan through the active connection recovery list's command list.
618 * If init_task_tag matches the command is ready to be reassigned.
619 */
620 spin_lock(&sess->cr_a_lock);
621 list_for_each_entry(cr, &sess->cr_active_list, cr_list) {
622 spin_lock(&cr->conn_recovery_cmd_lock);
623 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
624 if (cmd->init_task_tag == init_task_tag) {
625 spin_unlock(&cr->conn_recovery_cmd_lock);
626 spin_unlock(&sess->cr_a_lock);
627
628 *cr_ptr = cr;
629 *cmd_ptr = cmd;
630 return 0;
631 }
632 }
633 spin_unlock(&cr->conn_recovery_cmd_lock);
634 }
635 spin_unlock(&sess->cr_a_lock);
636
637 return -1;
638}
639
640void iscsit_add_cmd_to_immediate_queue(
641 struct iscsi_cmd *cmd,
642 struct iscsi_conn *conn,
643 u8 state)
644{
645 struct iscsi_queue_req *qr;
646
647 qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
648 if (!qr) {
649 pr_err("Unable to allocate memory for"
650 " struct iscsi_queue_req\n");
651 return;
652 }
653 INIT_LIST_HEAD(&qr->qr_list);
654 qr->cmd = cmd;
655 qr->state = state;
656
657 spin_lock_bh(&conn->immed_queue_lock);
658 list_add_tail(&qr->qr_list, &conn->immed_queue_list);
659 atomic_inc(&cmd->immed_queue_count);
660 atomic_set(&conn->check_immediate_queue, 1);
661 spin_unlock_bh(&conn->immed_queue_lock);
662
663 wake_up_process(conn->thread_set->tx_thread);
664}
665
666struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
667{
668 struct iscsi_queue_req *qr;
669
670 spin_lock_bh(&conn->immed_queue_lock);
671 if (list_empty(&conn->immed_queue_list)) {
672 spin_unlock_bh(&conn->immed_queue_lock);
673 return NULL;
674 }
675 list_for_each_entry(qr, &conn->immed_queue_list, qr_list)
676 break;
677
678 list_del(&qr->qr_list);
679 if (qr->cmd)
680 atomic_dec(&qr->cmd->immed_queue_count);
681 spin_unlock_bh(&conn->immed_queue_lock);
682
683 return qr;
684}
685
686static void iscsit_remove_cmd_from_immediate_queue(
687 struct iscsi_cmd *cmd,
688 struct iscsi_conn *conn)
689{
690 struct iscsi_queue_req *qr, *qr_tmp;
691
692 spin_lock_bh(&conn->immed_queue_lock);
693 if (!atomic_read(&cmd->immed_queue_count)) {
694 spin_unlock_bh(&conn->immed_queue_lock);
695 return;
696 }
697
698 list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
699 if (qr->cmd != cmd)
700 continue;
701
702 atomic_dec(&qr->cmd->immed_queue_count);
703 list_del(&qr->qr_list);
704 kmem_cache_free(lio_qr_cache, qr);
705 }
706 spin_unlock_bh(&conn->immed_queue_lock);
707
708 if (atomic_read(&cmd->immed_queue_count)) {
709 pr_err("ITT: 0x%08x immed_queue_count: %d\n",
710 cmd->init_task_tag,
711 atomic_read(&cmd->immed_queue_count));
712 }
713}
714
715void iscsit_add_cmd_to_response_queue(
716 struct iscsi_cmd *cmd,
717 struct iscsi_conn *conn,
718 u8 state)
719{
720 struct iscsi_queue_req *qr;
721
722 qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
723 if (!qr) {
724 pr_err("Unable to allocate memory for"
725 " struct iscsi_queue_req\n");
726 return;
727 }
728 INIT_LIST_HEAD(&qr->qr_list);
729 qr->cmd = cmd;
730 qr->state = state;
731
732 spin_lock_bh(&conn->response_queue_lock);
733 list_add_tail(&qr->qr_list, &conn->response_queue_list);
734 atomic_inc(&cmd->response_queue_count);
735 spin_unlock_bh(&conn->response_queue_lock);
736
737 wake_up_process(conn->thread_set->tx_thread);
738}
739
740struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
741{
742 struct iscsi_queue_req *qr;
743
744 spin_lock_bh(&conn->response_queue_lock);
745 if (list_empty(&conn->response_queue_list)) {
746 spin_unlock_bh(&conn->response_queue_lock);
747 return NULL;
748 }
749
750 list_for_each_entry(qr, &conn->response_queue_list, qr_list)
751 break;
752
753 list_del(&qr->qr_list);
754 if (qr->cmd)
755 atomic_dec(&qr->cmd->response_queue_count);
756 spin_unlock_bh(&conn->response_queue_lock);
757
758 return qr;
759}
760
761static void iscsit_remove_cmd_from_response_queue(
762 struct iscsi_cmd *cmd,
763 struct iscsi_conn *conn)
764{
765 struct iscsi_queue_req *qr, *qr_tmp;
766
767 spin_lock_bh(&conn->response_queue_lock);
768 if (!atomic_read(&cmd->response_queue_count)) {
769 spin_unlock_bh(&conn->response_queue_lock);
770 return;
771 }
772
773 list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
774 qr_list) {
775 if (qr->cmd != cmd)
776 continue;
777
778 atomic_dec(&qr->cmd->response_queue_count);
779 list_del(&qr->qr_list);
780 kmem_cache_free(lio_qr_cache, qr);
781 }
782 spin_unlock_bh(&conn->response_queue_lock);
783
784 if (atomic_read(&cmd->response_queue_count)) {
785 pr_err("ITT: 0x%08x response_queue_count: %d\n",
786 cmd->init_task_tag,
787 atomic_read(&cmd->response_queue_count));
788 }
789}
790
791void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
792{
793 struct iscsi_queue_req *qr, *qr_tmp;
794
795 spin_lock_bh(&conn->immed_queue_lock);
796 list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
797 list_del(&qr->qr_list);
798 if (qr->cmd)
799 atomic_dec(&qr->cmd->immed_queue_count);
800
801 kmem_cache_free(lio_qr_cache, qr);
802 }
803 spin_unlock_bh(&conn->immed_queue_lock);
804
805 spin_lock_bh(&conn->response_queue_lock);
806 list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
807 qr_list) {
808 list_del(&qr->qr_list);
809 if (qr->cmd)
810 atomic_dec(&qr->cmd->response_queue_count);
811
812 kmem_cache_free(lio_qr_cache, qr);
813 }
814 spin_unlock_bh(&conn->response_queue_lock);
815}
816
817void iscsit_release_cmd(struct iscsi_cmd *cmd)
818{
819 struct iscsi_conn *conn = cmd->conn;
820 int i;
821
822 iscsit_free_r2ts_from_list(cmd);
823 iscsit_free_all_datain_reqs(cmd);
824
825 kfree(cmd->buf_ptr);
826 kfree(cmd->pdu_list);
827 kfree(cmd->seq_list);
828 kfree(cmd->tmr_req);
829 kfree(cmd->iov_data);
830
831 for (i = 0; i < cmd->t_mem_sg_nents; i++)
832 __free_page(sg_page(&cmd->t_mem_sg[i]));
833
834 kfree(cmd->t_mem_sg);
835
836 if (conn) {
837 iscsit_remove_cmd_from_immediate_queue(cmd, conn);
838 iscsit_remove_cmd_from_response_queue(cmd, conn);
839 }
840
841 kmem_cache_free(lio_cmd_cache, cmd);
842}
843
844int iscsit_check_session_usage_count(struct iscsi_session *sess)
845{
846 spin_lock_bh(&sess->session_usage_lock);
847 if (sess->session_usage_count != 0) {
848 sess->session_waiting_on_uc = 1;
849 spin_unlock_bh(&sess->session_usage_lock);
850 if (in_interrupt())
851 return 2;
852
853 wait_for_completion(&sess->session_waiting_on_uc_comp);
854 return 1;
855 }
856 spin_unlock_bh(&sess->session_usage_lock);
857
858 return 0;
859}
860
861void iscsit_dec_session_usage_count(struct iscsi_session *sess)
862{
863 spin_lock_bh(&sess->session_usage_lock);
864 sess->session_usage_count--;
865
866 if (!sess->session_usage_count && sess->session_waiting_on_uc)
867 complete(&sess->session_waiting_on_uc_comp);
868
869 spin_unlock_bh(&sess->session_usage_lock);
870}
871
872void iscsit_inc_session_usage_count(struct iscsi_session *sess)
873{
874 spin_lock_bh(&sess->session_usage_lock);
875 sess->session_usage_count++;
876 spin_unlock_bh(&sess->session_usage_lock);
877}
878
879/*
880 * Used before iscsi_do[rx,tx]_data() to determine iov and [rx,tx]_marker
881 * array counts needed for sync and steering.
882 */
883static int iscsit_determine_sync_and_steering_counts(
884 struct iscsi_conn *conn,
885 struct iscsi_data_count *count)
886{
887 u32 length = count->data_length;
888 u32 marker, markint;
889
890 count->sync_and_steering = 1;
891
892 marker = (count->type == ISCSI_RX_DATA) ?
893 conn->of_marker : conn->if_marker;
894 markint = (count->type == ISCSI_RX_DATA) ?
895 (conn->conn_ops->OFMarkInt * 4) :
896 (conn->conn_ops->IFMarkInt * 4);
897 count->ss_iov_count = count->iov_count;
898
899 while (length > 0) {
900 if (length >= marker) {
901 count->ss_iov_count += 3;
902 count->ss_marker_count += 2;
903
904 length -= marker;
905 marker = markint;
906 } else
907 length = 0;
908 }
909
910 return 0;
911}
912
913/*
914 * Setup conn->if_marker and conn->of_marker values based upon
915 * the initial marker-less interval. (see iSCSI v19 A.2)
916 */
917int iscsit_set_sync_and_steering_values(struct iscsi_conn *conn)
918{
919 int login_ifmarker_count = 0, login_ofmarker_count = 0, next_marker = 0;
920 /*
921 * IFMarkInt and OFMarkInt are negotiated as 32-bit words.
922 */
923 u32 IFMarkInt = (conn->conn_ops->IFMarkInt * 4);
924 u32 OFMarkInt = (conn->conn_ops->OFMarkInt * 4);
925
926 if (conn->conn_ops->OFMarker) {
927 /*
928 * Account for the first Login Command received not
929 * via iscsi_recv_msg().
930 */
931 conn->of_marker += ISCSI_HDR_LEN;
932 if (conn->of_marker <= OFMarkInt) {
933 conn->of_marker = (OFMarkInt - conn->of_marker);
934 } else {
935 login_ofmarker_count = (conn->of_marker / OFMarkInt);
936 next_marker = (OFMarkInt * (login_ofmarker_count + 1)) +
937 (login_ofmarker_count * MARKER_SIZE);
938 conn->of_marker = (next_marker - conn->of_marker);
939 }
940 conn->of_marker_offset = 0;
941 pr_debug("Setting OFMarker value to %u based on Initial"
942 " Markerless Interval.\n", conn->of_marker);
943 }
944
945 if (conn->conn_ops->IFMarker) {
946 if (conn->if_marker <= IFMarkInt) {
947 conn->if_marker = (IFMarkInt - conn->if_marker);
948 } else {
949 login_ifmarker_count = (conn->if_marker / IFMarkInt);
950 next_marker = (IFMarkInt * (login_ifmarker_count + 1)) +
951 (login_ifmarker_count * MARKER_SIZE);
952 conn->if_marker = (next_marker - conn->if_marker);
953 }
954 pr_debug("Setting IFMarker value to %u based on Initial"
955 " Markerless Interval.\n", conn->if_marker);
956 }
957
958 return 0;
959}
960
961struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid)
962{
963 struct iscsi_conn *conn;
964
965 spin_lock_bh(&sess->conn_lock);
966 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
967 if ((conn->cid == cid) &&
968 (conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) {
969 iscsit_inc_conn_usage_count(conn);
970 spin_unlock_bh(&sess->conn_lock);
971 return conn;
972 }
973 }
974 spin_unlock_bh(&sess->conn_lock);
975
976 return NULL;
977}
978
979struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16 cid)
980{
981 struct iscsi_conn *conn;
982
983 spin_lock_bh(&sess->conn_lock);
984 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
985 if (conn->cid == cid) {
986 iscsit_inc_conn_usage_count(conn);
987 spin_lock(&conn->state_lock);
988 atomic_set(&conn->connection_wait_rcfr, 1);
989 spin_unlock(&conn->state_lock);
990 spin_unlock_bh(&sess->conn_lock);
991 return conn;
992 }
993 }
994 spin_unlock_bh(&sess->conn_lock);
995
996 return NULL;
997}
998
999void iscsit_check_conn_usage_count(struct iscsi_conn *conn)
1000{
1001 spin_lock_bh(&conn->conn_usage_lock);
1002 if (conn->conn_usage_count != 0) {
1003 conn->conn_waiting_on_uc = 1;
1004 spin_unlock_bh(&conn->conn_usage_lock);
1005
1006 wait_for_completion(&conn->conn_waiting_on_uc_comp);
1007 return;
1008 }
1009 spin_unlock_bh(&conn->conn_usage_lock);
1010}
1011
1012void iscsit_dec_conn_usage_count(struct iscsi_conn *conn)
1013{
1014 spin_lock_bh(&conn->conn_usage_lock);
1015 conn->conn_usage_count--;
1016
1017 if (!conn->conn_usage_count && conn->conn_waiting_on_uc)
1018 complete(&conn->conn_waiting_on_uc_comp);
1019
1020 spin_unlock_bh(&conn->conn_usage_lock);
1021}
1022
1023void iscsit_inc_conn_usage_count(struct iscsi_conn *conn)
1024{
1025 spin_lock_bh(&conn->conn_usage_lock);
1026 conn->conn_usage_count++;
1027 spin_unlock_bh(&conn->conn_usage_lock);
1028}
1029
1030static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
1031{
1032 u8 state;
1033 struct iscsi_cmd *cmd;
1034
1035 cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC);
1036 if (!cmd)
1037 return -1;
1038
1039 cmd->iscsi_opcode = ISCSI_OP_NOOP_IN;
1040 state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
1041 ISTATE_SEND_NOPIN_NO_RESPONSE;
1042 cmd->init_task_tag = 0xFFFFFFFF;
1043 spin_lock_bh(&conn->sess->ttt_lock);
1044 cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ :
1045 0xFFFFFFFF;
1046 if (want_response && (cmd->targ_xfer_tag == 0xFFFFFFFF))
1047 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
1048 spin_unlock_bh(&conn->sess->ttt_lock);
1049
1050 spin_lock_bh(&conn->cmd_lock);
1051 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
1052 spin_unlock_bh(&conn->cmd_lock);
1053
1054 if (want_response)
1055 iscsit_start_nopin_response_timer(conn);
1056 iscsit_add_cmd_to_immediate_queue(cmd, conn, state);
1057
1058 return 0;
1059}
1060
1061static void iscsit_handle_nopin_response_timeout(unsigned long data)
1062{
1063 struct iscsi_conn *conn = (struct iscsi_conn *) data;
1064
1065 iscsit_inc_conn_usage_count(conn);
1066
1067 spin_lock_bh(&conn->nopin_timer_lock);
1068 if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) {
1069 spin_unlock_bh(&conn->nopin_timer_lock);
1070 iscsit_dec_conn_usage_count(conn);
1071 return;
1072 }
1073
1074 pr_debug("Did not receive response to NOPIN on CID: %hu on"
1075 " SID: %u, failing connection.\n", conn->cid,
1076 conn->sess->sid);
1077 conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
1078 spin_unlock_bh(&conn->nopin_timer_lock);
1079
1080 {
1081 struct iscsi_portal_group *tpg = conn->sess->tpg;
1082 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
1083
1084 if (tiqn) {
1085 spin_lock_bh(&tiqn->sess_err_stats.lock);
1086 strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
1087 (void *)conn->sess->sess_ops->InitiatorName);
1088 tiqn->sess_err_stats.last_sess_failure_type =
1089 ISCSI_SESS_ERR_CXN_TIMEOUT;
1090 tiqn->sess_err_stats.cxn_timeout_errors++;
1091 conn->sess->conn_timeout_errors++;
1092 spin_unlock_bh(&tiqn->sess_err_stats.lock);
1093 }
1094 }
1095
1096 iscsit_cause_connection_reinstatement(conn, 0);
1097 iscsit_dec_conn_usage_count(conn);
1098}
1099
1100void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn)
1101{
1102 struct iscsi_session *sess = conn->sess;
1103 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1104
1105 spin_lock_bh(&conn->nopin_timer_lock);
1106 if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
1107 spin_unlock_bh(&conn->nopin_timer_lock);
1108 return;
1109 }
1110
1111 mod_timer(&conn->nopin_response_timer,
1112 (get_jiffies_64() + na->nopin_response_timeout * HZ));
1113 spin_unlock_bh(&conn->nopin_timer_lock);
1114}
1115
1116/*
1117 * Called with conn->nopin_timer_lock held.
1118 */
1119void iscsit_start_nopin_response_timer(struct iscsi_conn *conn)
1120{
1121 struct iscsi_session *sess = conn->sess;
1122 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1123
1124 spin_lock_bh(&conn->nopin_timer_lock);
1125 if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) {
1126 spin_unlock_bh(&conn->nopin_timer_lock);
1127 return;
1128 }
1129
1130 init_timer(&conn->nopin_response_timer);
1131 conn->nopin_response_timer.expires =
1132 (get_jiffies_64() + na->nopin_response_timeout * HZ);
1133 conn->nopin_response_timer.data = (unsigned long)conn;
1134 conn->nopin_response_timer.function = iscsit_handle_nopin_response_timeout;
1135 conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP;
1136 conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING;
1137 add_timer(&conn->nopin_response_timer);
1138
1139 pr_debug("Started NOPIN Response Timer on CID: %d to %u"
1140 " seconds\n", conn->cid, na->nopin_response_timeout);
1141 spin_unlock_bh(&conn->nopin_timer_lock);
1142}
1143
1144void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn)
1145{
1146 spin_lock_bh(&conn->nopin_timer_lock);
1147 if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
1148 spin_unlock_bh(&conn->nopin_timer_lock);
1149 return;
1150 }
1151 conn->nopin_response_timer_flags |= ISCSI_TF_STOP;
1152 spin_unlock_bh(&conn->nopin_timer_lock);
1153
1154 del_timer_sync(&conn->nopin_response_timer);
1155
1156 spin_lock_bh(&conn->nopin_timer_lock);
1157 conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
1158 spin_unlock_bh(&conn->nopin_timer_lock);
1159}
1160
1161static void iscsit_handle_nopin_timeout(unsigned long data)
1162{
1163 struct iscsi_conn *conn = (struct iscsi_conn *) data;
1164
1165 iscsit_inc_conn_usage_count(conn);
1166
1167 spin_lock_bh(&conn->nopin_timer_lock);
1168 if (conn->nopin_timer_flags & ISCSI_TF_STOP) {
1169 spin_unlock_bh(&conn->nopin_timer_lock);
1170 iscsit_dec_conn_usage_count(conn);
1171 return;
1172 }
1173 conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
1174 spin_unlock_bh(&conn->nopin_timer_lock);
1175
1176 iscsit_add_nopin(conn, 1);
1177 iscsit_dec_conn_usage_count(conn);
1178}
1179
1180/*
1181 * Called with conn->nopin_timer_lock held.
1182 */
1183void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
1184{
1185 struct iscsi_session *sess = conn->sess;
1186 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1187 /*
1188 * NOPIN timeout is disabled.
1189 */
1190 if (!na->nopin_timeout)
1191 return;
1192
1193 if (conn->nopin_timer_flags & ISCSI_TF_RUNNING)
1194 return;
1195
1196 init_timer(&conn->nopin_timer);
1197 conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
1198 conn->nopin_timer.data = (unsigned long)conn;
1199 conn->nopin_timer.function = iscsit_handle_nopin_timeout;
1200 conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
1201 conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
1202 add_timer(&conn->nopin_timer);
1203
1204 pr_debug("Started NOPIN Timer on CID: %d at %u second"
1205 " interval\n", conn->cid, na->nopin_timeout);
1206}
1207
1208void iscsit_start_nopin_timer(struct iscsi_conn *conn)
1209{
1210 struct iscsi_session *sess = conn->sess;
1211 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1212 /*
1213 * NOPIN timeout is disabled..
1214 */
1215 if (!na->nopin_timeout)
1216 return;
1217
1218 spin_lock_bh(&conn->nopin_timer_lock);
1219 if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) {
1220 spin_unlock_bh(&conn->nopin_timer_lock);
1221 return;
1222 }
1223
1224 init_timer(&conn->nopin_timer);
1225 conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
1226 conn->nopin_timer.data = (unsigned long)conn;
1227 conn->nopin_timer.function = iscsit_handle_nopin_timeout;
1228 conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
1229 conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
1230 add_timer(&conn->nopin_timer);
1231
1232 pr_debug("Started NOPIN Timer on CID: %d at %u second"
1233 " interval\n", conn->cid, na->nopin_timeout);
1234 spin_unlock_bh(&conn->nopin_timer_lock);
1235}
1236
1237void iscsit_stop_nopin_timer(struct iscsi_conn *conn)
1238{
1239 spin_lock_bh(&conn->nopin_timer_lock);
1240 if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) {
1241 spin_unlock_bh(&conn->nopin_timer_lock);
1242 return;
1243 }
1244 conn->nopin_timer_flags |= ISCSI_TF_STOP;
1245 spin_unlock_bh(&conn->nopin_timer_lock);
1246
1247 del_timer_sync(&conn->nopin_timer);
1248
1249 spin_lock_bh(&conn->nopin_timer_lock);
1250 conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
1251 spin_unlock_bh(&conn->nopin_timer_lock);
1252}
1253
1254int iscsit_send_tx_data(
1255 struct iscsi_cmd *cmd,
1256 struct iscsi_conn *conn,
1257 int use_misc)
1258{
1259 int tx_sent, tx_size;
1260 u32 iov_count;
1261 struct kvec *iov;
1262
1263send_data:
1264 tx_size = cmd->tx_size;
1265
1266 if (!use_misc) {
1267 iov = &cmd->iov_data[0];
1268 iov_count = cmd->iov_data_count;
1269 } else {
1270 iov = &cmd->iov_misc[0];
1271 iov_count = cmd->iov_misc_count;
1272 }
1273
1274 tx_sent = tx_data(conn, &iov[0], iov_count, tx_size);
1275 if (tx_size != tx_sent) {
1276 if (tx_sent == -EAGAIN) {
1277 pr_err("tx_data() returned -EAGAIN\n");
1278 goto send_data;
1279 } else
1280 return -1;
1281 }
1282 cmd->tx_size = 0;
1283
1284 return 0;
1285}
1286
1287int iscsit_fe_sendpage_sg(
1288 struct iscsi_cmd *cmd,
1289 struct iscsi_conn *conn)
1290{
1291 struct scatterlist *sg = cmd->first_data_sg;
1292 struct kvec iov;
1293 u32 tx_hdr_size, data_len;
1294 u32 offset = cmd->first_data_sg_off;
1295 int tx_sent;
1296
1297send_hdr:
1298 tx_hdr_size = ISCSI_HDR_LEN;
1299 if (conn->conn_ops->HeaderDigest)
1300 tx_hdr_size += ISCSI_CRC_LEN;
1301
1302 iov.iov_base = cmd->pdu;
1303 iov.iov_len = tx_hdr_size;
1304
1305 tx_sent = tx_data(conn, &iov, 1, tx_hdr_size);
1306 if (tx_hdr_size != tx_sent) {
1307 if (tx_sent == -EAGAIN) {
1308 pr_err("tx_data() returned -EAGAIN\n");
1309 goto send_hdr;
1310 }
1311 return -1;
1312 }
1313
1314 data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
1315 if (conn->conn_ops->DataDigest)
1316 data_len -= ISCSI_CRC_LEN;
1317
1318 /*
1319 * Perform sendpage() for each page in the scatterlist
1320 */
1321 while (data_len) {
1322 u32 space = (sg->length - offset);
1323 u32 sub_len = min_t(u32, data_len, space);
1324send_pg:
1325 tx_sent = conn->sock->ops->sendpage(conn->sock,
1326 sg_page(sg), sg->offset + offset, sub_len, 0);
1327 if (tx_sent != sub_len) {
1328 if (tx_sent == -EAGAIN) {
1329 pr_err("tcp_sendpage() returned"
1330 " -EAGAIN\n");
1331 goto send_pg;
1332 }
1333
1334 pr_err("tcp_sendpage() failure: %d\n",
1335 tx_sent);
1336 return -1;
1337 }
1338
1339 data_len -= sub_len;
1340 offset = 0;
1341 sg = sg_next(sg);
1342 }
1343
1344send_padding:
1345 if (cmd->padding) {
1346 struct kvec *iov_p =
1347 &cmd->iov_data[cmd->iov_data_count-1];
1348
1349 tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
1350 if (cmd->padding != tx_sent) {
1351 if (tx_sent == -EAGAIN) {
1352 pr_err("tx_data() returned -EAGAIN\n");
1353 goto send_padding;
1354 }
1355 return -1;
1356 }
1357 }
1358
1359send_datacrc:
1360 if (conn->conn_ops->DataDigest) {
1361 struct kvec *iov_d =
1362 &cmd->iov_data[cmd->iov_data_count];
1363
1364 tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
1365 if (ISCSI_CRC_LEN != tx_sent) {
1366 if (tx_sent == -EAGAIN) {
1367 pr_err("tx_data() returned -EAGAIN\n");
1368 goto send_datacrc;
1369 }
1370 return -1;
1371 }
1372 }
1373
1374 return 0;
1375}
1376
1377/*
1378 * This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU
1379 * back to the Initiator when an expection condition occurs with the
1380 * errors set in status_class and status_detail.
1381 *
1382 * Parameters: iSCSI Connection, Status Class, Status Detail.
1383 * Returns: 0 on success, -1 on error.
1384 */
1385int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail)
1386{
1387 u8 iscsi_hdr[ISCSI_HDR_LEN];
1388 int err;
1389 struct kvec iov;
1390 struct iscsi_login_rsp *hdr;
1391
1392 iscsit_collect_login_stats(conn, status_class, status_detail);
1393
1394 memset(&iov, 0, sizeof(struct kvec));
1395 memset(&iscsi_hdr, 0x0, ISCSI_HDR_LEN);
1396
1397 hdr = (struct iscsi_login_rsp *)&iscsi_hdr;
1398 hdr->opcode = ISCSI_OP_LOGIN_RSP;
1399 hdr->status_class = status_class;
1400 hdr->status_detail = status_detail;
1401 hdr->itt = cpu_to_be32(conn->login_itt);
1402
1403 iov.iov_base = &iscsi_hdr;
1404 iov.iov_len = ISCSI_HDR_LEN;
1405
1406 PRINT_BUFF(iscsi_hdr, ISCSI_HDR_LEN);
1407
1408 err = tx_data(conn, &iov, 1, ISCSI_HDR_LEN);
1409 if (err != ISCSI_HDR_LEN) {
1410 pr_err("tx_data returned less than expected\n");
1411 return -1;
1412 }
1413
1414 return 0;
1415}
1416
1417void iscsit_print_session_params(struct iscsi_session *sess)
1418{
1419 struct iscsi_conn *conn;
1420
1421 pr_debug("-----------------------------[Session Params for"
1422 " SID: %u]-----------------------------\n", sess->sid);
1423 spin_lock_bh(&sess->conn_lock);
1424 list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
1425 iscsi_dump_conn_ops(conn->conn_ops);
1426 spin_unlock_bh(&sess->conn_lock);
1427
1428 iscsi_dump_sess_ops(sess->sess_ops);
1429}
1430
1431static int iscsit_do_rx_data(
1432 struct iscsi_conn *conn,
1433 struct iscsi_data_count *count)
1434{
1435 int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len;
1436 u32 rx_marker_val[count->ss_marker_count], rx_marker_iov = 0;
1437 struct kvec iov[count->ss_iov_count], *iov_p;
1438 struct msghdr msg;
1439
1440 if (!conn || !conn->sock || !conn->conn_ops)
1441 return -1;
1442
1443 memset(&msg, 0, sizeof(struct msghdr));
1444
1445 if (count->sync_and_steering) {
1446 int size = 0;
1447 u32 i, orig_iov_count = 0;
1448 u32 orig_iov_len = 0, orig_iov_loc = 0;
1449 u32 iov_count = 0, per_iov_bytes = 0;
1450 u32 *rx_marker, old_rx_marker = 0;
1451 struct kvec *iov_record;
1452
1453 memset(&rx_marker_val, 0,
1454 count->ss_marker_count * sizeof(u32));
1455 memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
1456
1457 iov_record = count->iov;
1458 orig_iov_count = count->iov_count;
1459 rx_marker = &conn->of_marker;
1460
1461 i = 0;
1462 size = data;
1463 orig_iov_len = iov_record[orig_iov_loc].iov_len;
1464 while (size > 0) {
1465 pr_debug("rx_data: #1 orig_iov_len %u,"
1466 " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
1467 pr_debug("rx_data: #2 rx_marker %u, size"
1468 " %u\n", *rx_marker, size);
1469
1470 if (orig_iov_len >= *rx_marker) {
1471 iov[iov_count].iov_len = *rx_marker;
1472 iov[iov_count++].iov_base =
1473 (iov_record[orig_iov_loc].iov_base +
1474 per_iov_bytes);
1475
1476 iov[iov_count].iov_len = (MARKER_SIZE / 2);
1477 iov[iov_count++].iov_base =
1478 &rx_marker_val[rx_marker_iov++];
1479 iov[iov_count].iov_len = (MARKER_SIZE / 2);
1480 iov[iov_count++].iov_base =
1481 &rx_marker_val[rx_marker_iov++];
1482 old_rx_marker = *rx_marker;
1483
1484 /*
1485 * OFMarkInt is in 32-bit words.
1486 */
1487 *rx_marker = (conn->conn_ops->OFMarkInt * 4);
1488 size -= old_rx_marker;
1489 orig_iov_len -= old_rx_marker;
1490 per_iov_bytes += old_rx_marker;
1491
1492 pr_debug("rx_data: #3 new_rx_marker"
1493 " %u, size %u\n", *rx_marker, size);
1494 } else {
1495 iov[iov_count].iov_len = orig_iov_len;
1496 iov[iov_count++].iov_base =
1497 (iov_record[orig_iov_loc].iov_base +
1498 per_iov_bytes);
1499
1500 per_iov_bytes = 0;
1501 *rx_marker -= orig_iov_len;
1502 size -= orig_iov_len;
1503
1504 if (size)
1505 orig_iov_len =
1506 iov_record[++orig_iov_loc].iov_len;
1507
1508 pr_debug("rx_data: #4 new_rx_marker"
1509 " %u, size %u\n", *rx_marker, size);
1510 }
1511 }
1512 data += (rx_marker_iov * (MARKER_SIZE / 2));
1513
1514 iov_p = &iov[0];
1515 iov_len = iov_count;
1516
1517 if (iov_count > count->ss_iov_count) {
1518 pr_err("iov_count: %d, count->ss_iov_count:"
1519 " %d\n", iov_count, count->ss_iov_count);
1520 return -1;
1521 }
1522 if (rx_marker_iov > count->ss_marker_count) {
1523 pr_err("rx_marker_iov: %d, count->ss_marker"
1524 "_count: %d\n", rx_marker_iov,
1525 count->ss_marker_count);
1526 return -1;
1527 }
1528 } else {
1529 iov_p = count->iov;
1530 iov_len = count->iov_count;
1531 }
1532
1533 while (total_rx < data) {
1534 rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len,
1535 (data - total_rx), MSG_WAITALL);
1536 if (rx_loop <= 0) {
1537 pr_debug("rx_loop: %d total_rx: %d\n",
1538 rx_loop, total_rx);
1539 return rx_loop;
1540 }
1541 total_rx += rx_loop;
1542 pr_debug("rx_loop: %d, total_rx: %d, data: %d\n",
1543 rx_loop, total_rx, data);
1544 }
1545
1546 if (count->sync_and_steering) {
1547 int j;
1548 for (j = 0; j < rx_marker_iov; j++) {
1549 pr_debug("rx_data: #5 j: %d, offset: %d\n",
1550 j, rx_marker_val[j]);
1551 conn->of_marker_offset = rx_marker_val[j];
1552 }
1553 total_rx -= (rx_marker_iov * (MARKER_SIZE / 2));
1554 }
1555
1556 return total_rx;
1557}
1558
1559static int iscsit_do_tx_data(
1560 struct iscsi_conn *conn,
1561 struct iscsi_data_count *count)
1562{
1563 int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
1564 u32 tx_marker_val[count->ss_marker_count], tx_marker_iov = 0;
1565 struct kvec iov[count->ss_iov_count], *iov_p;
1566 struct msghdr msg;
1567
1568 if (!conn || !conn->sock || !conn->conn_ops)
1569 return -1;
1570
1571 if (data <= 0) {
1572 pr_err("Data length is: %d\n", data);
1573 return -1;
1574 }
1575
1576 memset(&msg, 0, sizeof(struct msghdr));
1577
1578 if (count->sync_and_steering) {
1579 int size = 0;
1580 u32 i, orig_iov_count = 0;
1581 u32 orig_iov_len = 0, orig_iov_loc = 0;
1582 u32 iov_count = 0, per_iov_bytes = 0;
1583 u32 *tx_marker, old_tx_marker = 0;
1584 struct kvec *iov_record;
1585
1586 memset(&tx_marker_val, 0,
1587 count->ss_marker_count * sizeof(u32));
1588 memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
1589
1590 iov_record = count->iov;
1591 orig_iov_count = count->iov_count;
1592 tx_marker = &conn->if_marker;
1593
1594 i = 0;
1595 size = data;
1596 orig_iov_len = iov_record[orig_iov_loc].iov_len;
1597 while (size > 0) {
1598 pr_debug("tx_data: #1 orig_iov_len %u,"
1599 " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
1600 pr_debug("tx_data: #2 tx_marker %u, size"
1601 " %u\n", *tx_marker, size);
1602
1603 if (orig_iov_len >= *tx_marker) {
1604 iov[iov_count].iov_len = *tx_marker;
1605 iov[iov_count++].iov_base =
1606 (iov_record[orig_iov_loc].iov_base +
1607 per_iov_bytes);
1608
1609 tx_marker_val[tx_marker_iov] =
1610 (size - *tx_marker);
1611 iov[iov_count].iov_len = (MARKER_SIZE / 2);
1612 iov[iov_count++].iov_base =
1613 &tx_marker_val[tx_marker_iov++];
1614 iov[iov_count].iov_len = (MARKER_SIZE / 2);
1615 iov[iov_count++].iov_base =
1616 &tx_marker_val[tx_marker_iov++];
1617 old_tx_marker = *tx_marker;
1618
1619 /*
1620 * IFMarkInt is in 32-bit words.
1621 */
1622 *tx_marker = (conn->conn_ops->IFMarkInt * 4);
1623 size -= old_tx_marker;
1624 orig_iov_len -= old_tx_marker;
1625 per_iov_bytes += old_tx_marker;
1626
1627 pr_debug("tx_data: #3 new_tx_marker"
1628 " %u, size %u\n", *tx_marker, size);
1629 pr_debug("tx_data: #4 offset %u\n",
1630 tx_marker_val[tx_marker_iov-1]);
1631 } else {
1632 iov[iov_count].iov_len = orig_iov_len;
1633 iov[iov_count++].iov_base
1634 = (iov_record[orig_iov_loc].iov_base +
1635 per_iov_bytes);
1636
1637 per_iov_bytes = 0;
1638 *tx_marker -= orig_iov_len;
1639 size -= orig_iov_len;
1640
1641 if (size)
1642 orig_iov_len =
1643 iov_record[++orig_iov_loc].iov_len;
1644
1645 pr_debug("tx_data: #5 new_tx_marker"
1646 " %u, size %u\n", *tx_marker, size);
1647 }
1648 }
1649
1650 data += (tx_marker_iov * (MARKER_SIZE / 2));
1651
1652 iov_p = &iov[0];
1653 iov_len = iov_count;
1654
1655 if (iov_count > count->ss_iov_count) {
1656 pr_err("iov_count: %d, count->ss_iov_count:"
1657 " %d\n", iov_count, count->ss_iov_count);
1658 return -1;
1659 }
1660 if (tx_marker_iov > count->ss_marker_count) {
1661 pr_err("tx_marker_iov: %d, count->ss_marker"
1662 "_count: %d\n", tx_marker_iov,
1663 count->ss_marker_count);
1664 return -1;
1665 }
1666 } else {
1667 iov_p = count->iov;
1668 iov_len = count->iov_count;
1669 }
1670
1671 while (total_tx < data) {
1672 tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
1673 (data - total_tx));
1674 if (tx_loop <= 0) {
1675 pr_debug("tx_loop: %d total_tx %d\n",
1676 tx_loop, total_tx);
1677 return tx_loop;
1678 }
1679 total_tx += tx_loop;
1680 pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
1681 tx_loop, total_tx, data);
1682 }
1683
1684 if (count->sync_and_steering)
1685 total_tx -= (tx_marker_iov * (MARKER_SIZE / 2));
1686
1687 return total_tx;
1688}
1689
1690int rx_data(
1691 struct iscsi_conn *conn,
1692 struct kvec *iov,
1693 int iov_count,
1694 int data)
1695{
1696 struct iscsi_data_count c;
1697
1698 if (!conn || !conn->sock || !conn->conn_ops)
1699 return -1;
1700
1701 memset(&c, 0, sizeof(struct iscsi_data_count));
1702 c.iov = iov;
1703 c.iov_count = iov_count;
1704 c.data_length = data;
1705 c.type = ISCSI_RX_DATA;
1706
1707 if (conn->conn_ops->OFMarker &&
1708 (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
1709 if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
1710 return -1;
1711 }
1712
1713 return iscsit_do_rx_data(conn, &c);
1714}
1715
1716int tx_data(
1717 struct iscsi_conn *conn,
1718 struct kvec *iov,
1719 int iov_count,
1720 int data)
1721{
1722 struct iscsi_data_count c;
1723
1724 if (!conn || !conn->sock || !conn->conn_ops)
1725 return -1;
1726
1727 memset(&c, 0, sizeof(struct iscsi_data_count));
1728 c.iov = iov;
1729 c.iov_count = iov_count;
1730 c.data_length = data;
1731 c.type = ISCSI_TX_DATA;
1732
1733 if (conn->conn_ops->IFMarker &&
1734 (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
1735 if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
1736 return -1;
1737 }
1738
1739 return iscsit_do_tx_data(conn, &c);
1740}
1741
1742void iscsit_collect_login_stats(
1743 struct iscsi_conn *conn,
1744 u8 status_class,
1745 u8 status_detail)
1746{
1747 struct iscsi_param *intrname = NULL;
1748 struct iscsi_tiqn *tiqn;
1749 struct iscsi_login_stats *ls;
1750
1751 tiqn = iscsit_snmp_get_tiqn(conn);
1752 if (!tiqn)
1753 return;
1754
1755 ls = &tiqn->login_stats;
1756
1757 spin_lock(&ls->lock);
1758 if (!strcmp(conn->login_ip, ls->last_intr_fail_ip_addr) &&
1759 ((get_jiffies_64() - ls->last_fail_time) < 10)) {
1760 /* We already have the failure info for this login */
1761 spin_unlock(&ls->lock);
1762 return;
1763 }
1764
1765 if (status_class == ISCSI_STATUS_CLS_SUCCESS)
1766 ls->accepts++;
1767 else if (status_class == ISCSI_STATUS_CLS_REDIRECT) {
1768 ls->redirects++;
1769 ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT;
1770 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
1771 (status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) {
1772 ls->authenticate_fails++;
1773 ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHENTICATE;
1774 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
1775 (status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) {
1776 ls->authorize_fails++;
1777 ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE;
1778 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
1779 (status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) {
1780 ls->negotiate_fails++;
1781 ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE;
1782 } else {
1783 ls->other_fails++;
1784 ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER;
1785 }
1786
1787 /* Save initiator name, ip address and time, if it is a failed login */
1788 if (status_class != ISCSI_STATUS_CLS_SUCCESS) {
1789 if (conn->param_list)
1790 intrname = iscsi_find_param_from_key(INITIATORNAME,
1791 conn->param_list);
1792 strcpy(ls->last_intr_fail_name,
1793 (intrname ? intrname->value : "Unknown"));
1794
1795 ls->last_intr_fail_ip_family = conn->sock->sk->sk_family;
1796 snprintf(ls->last_intr_fail_ip_addr, IPV6_ADDRESS_SPACE,
1797 "%s", conn->login_ip);
1798 ls->last_fail_time = get_jiffies_64();
1799 }
1800
1801 spin_unlock(&ls->lock);
1802}
1803
1804struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
1805{
1806 struct iscsi_portal_group *tpg;
1807
1808 if (!conn || !conn->sess)
1809 return NULL;
1810
1811 tpg = conn->sess->tpg;
1812 if (!tpg)
1813 return NULL;
1814
1815 if (!tpg->tpg_tiqn)
1816 return NULL;
1817
1818 return tpg->tpg_tiqn;
1819}
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
new file mode 100644
index 000000000000..2cd49d607bda
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -0,0 +1,60 @@
1#ifndef ISCSI_TARGET_UTIL_H
2#define ISCSI_TARGET_UTIL_H
3
4#define MARKER_SIZE 8
5
6extern int iscsit_add_r2t_to_list(struct iscsi_cmd *, u32, u32, int, u32);
7extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsi_cmd *, u32, u32);
8extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);
9extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *);
10extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *);
11extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
12extern struct iscsi_cmd *iscsit_allocate_se_cmd(struct iscsi_conn *, u32, int, int);
13extern struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(struct iscsi_conn *, u8);
14extern int iscsit_decide_list_to_build(struct iscsi_cmd *, u32);
15extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
16extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
17extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
18int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, u32 cmdsn);
19extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *);
20extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, u32);
21extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *,
22 u32, u32);
23extern struct iscsi_cmd *iscsit_find_cmd_from_ttt(struct iscsi_conn *, u32);
24extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd **,
25 struct iscsi_conn_recovery **, u32);
26extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
27extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *);
28extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
29extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
30extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
31extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
32extern void iscsit_release_cmd(struct iscsi_cmd *);
33extern int iscsit_check_session_usage_count(struct iscsi_session *);
34extern void iscsit_dec_session_usage_count(struct iscsi_session *);
35extern void iscsit_inc_session_usage_count(struct iscsi_session *);
36extern int iscsit_set_sync_and_steering_values(struct iscsi_conn *);
37extern struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *, u16);
38extern struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *, u16);
39extern void iscsit_check_conn_usage_count(struct iscsi_conn *);
40extern void iscsit_dec_conn_usage_count(struct iscsi_conn *);
41extern void iscsit_inc_conn_usage_count(struct iscsi_conn *);
42extern void iscsit_mod_nopin_response_timer(struct iscsi_conn *);
43extern void iscsit_start_nopin_response_timer(struct iscsi_conn *);
44extern void iscsit_stop_nopin_response_timer(struct iscsi_conn *);
45extern void __iscsit_start_nopin_timer(struct iscsi_conn *);
46extern void iscsit_start_nopin_timer(struct iscsi_conn *);
47extern void iscsit_stop_nopin_timer(struct iscsi_conn *);
48extern int iscsit_send_tx_data(struct iscsi_cmd *, struct iscsi_conn *, int);
49extern int iscsit_fe_sendpage_sg(struct iscsi_cmd *, struct iscsi_conn *);
50extern int iscsit_tx_login_rsp(struct iscsi_conn *, u8, u8);
51extern void iscsit_print_session_params(struct iscsi_session *);
52extern int iscsit_print_dev_to_proc(char *, char **, off_t, int);
53extern int iscsit_print_sessions_to_proc(char *, char **, off_t, int);
54extern int iscsit_print_tpg_to_proc(char *, char **, off_t, int);
55extern int rx_data(struct iscsi_conn *, struct kvec *, int, int);
56extern int tx_data(struct iscsi_conn *, struct kvec *, int, int);
57extern void iscsit_collect_login_stats(struct iscsi_conn *, u8, u8);
58extern struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *);
59
60#endif /*** ISCSI_TARGET_UTIL_H ***/
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 46352d658e35..c75a01a1c475 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -4052,17 +4052,16 @@ static int transport_allocate_data_tasks(
4052 struct se_task *task; 4052 struct se_task *task;
4053 struct se_device *dev = cmd->se_dev; 4053 struct se_device *dev = cmd->se_dev;
4054 unsigned long flags; 4054 unsigned long flags;
4055 sector_t sectors;
4056 int task_count, i, ret; 4055 int task_count, i, ret;
4057 sector_t dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; 4056 sector_t sectors, dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
4058 u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size; 4057 u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
4059 struct scatterlist *sg; 4058 struct scatterlist *sg;
4060 struct scatterlist *cmd_sg; 4059 struct scatterlist *cmd_sg;
4061 4060
4062 WARN_ON(cmd->data_length % sector_size); 4061 WARN_ON(cmd->data_length % sector_size);
4063 sectors = DIV_ROUND_UP(cmd->data_length, sector_size); 4062 sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
4064 task_count = DIV_ROUND_UP(sectors, dev_max_sectors); 4063 task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
4065 4064
4066 cmd_sg = sgl; 4065 cmd_sg = sgl;
4067 for (i = 0; i < task_count; i++) { 4066 for (i = 0; i < task_count; i++) {
4068 unsigned int task_size; 4067 unsigned int task_size;
diff --git a/drivers/tty/bfin_jtag_comm.c b/drivers/tty/bfin_jtag_comm.c
index 03c285bb2f18..3a997760ec32 100644
--- a/drivers/tty/bfin_jtag_comm.c
+++ b/drivers/tty/bfin_jtag_comm.c
@@ -25,7 +25,7 @@
25#include <linux/tty.h> 25#include <linux/tty.h>
26#include <linux/tty_driver.h> 26#include <linux/tty_driver.h>
27#include <linux/tty_flip.h> 27#include <linux/tty_flip.h>
28#include <asm/atomic.h> 28#include <linux/atomic.h>
29 29
30#define pr_init(fmt, args...) ({ static const __initconst char __fmt[] = fmt; printk(__fmt, ## args); }) 30#define pr_init(fmt, args...) ({ static const __initconst char __fmt[] = fmt; printk(__fmt, ## args); })
31 31
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index 13043e8d37fe..6a1241c7f841 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -83,7 +83,7 @@
83#include <linux/wait.h> 83#include <linux/wait.h>
84#include <linux/pci.h> 84#include <linux/pci.h>
85#include <linux/uaccess.h> 85#include <linux/uaccess.h>
86#include <asm/atomic.h> 86#include <linux/atomic.h>
87#include <asm/unaligned.h> 87#include <asm/unaligned.h>
88#include <linux/bitops.h> 88#include <linux/bitops.h>
89#include <linux/spinlock.h> 89#include <linux/spinlock.h>
diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
index 57421d776329..ddc487a2d42f 100644
--- a/drivers/tty/serial/dz.c
+++ b/drivers/tty/serial/dz.c
@@ -48,7 +48,7 @@
48#include <linux/sysrq.h> 48#include <linux/sysrq.h>
49#include <linux/tty.h> 49#include <linux/tty.h>
50 50
51#include <asm/atomic.h> 51#include <linux/atomic.h>
52#include <asm/bootinfo.h> 52#include <asm/bootinfo.h>
53#include <asm/io.h> 53#include <asm/io.h>
54#include <asm/system.h> 54#include <asm/system.h>
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 22fe801cce31..827db7654594 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -45,10 +45,11 @@
45#include <linux/delay.h> 45#include <linux/delay.h>
46#include <linux/rational.h> 46#include <linux/rational.h>
47#include <linux/slab.h> 47#include <linux/slab.h>
48#include <linux/of.h>
49#include <linux/of_device.h>
48 50
49#include <asm/io.h> 51#include <asm/io.h>
50#include <asm/irq.h> 52#include <asm/irq.h>
51#include <mach/hardware.h>
52#include <mach/imx-uart.h> 53#include <mach/imx-uart.h>
53 54
54/* Register definitions */ 55/* Register definitions */
@@ -66,8 +67,9 @@
66#define UBIR 0xa4 /* BRM Incremental Register */ 67#define UBIR 0xa4 /* BRM Incremental Register */
67#define UBMR 0xa8 /* BRM Modulator Register */ 68#define UBMR 0xa8 /* BRM Modulator Register */
68#define UBRC 0xac /* Baud Rate Count Register */ 69#define UBRC 0xac /* Baud Rate Count Register */
69#define MX2_ONEMS 0xb0 /* One Millisecond register */ 70#define IMX21_ONEMS 0xb0 /* One Millisecond register */
70#define UTS (cpu_is_mx1() ? 0xd0 : 0xb4) /* UART Test Register */ 71#define IMX1_UTS 0xd0 /* UART Test Register on i.mx1 */
72#define IMX21_UTS 0xb4 /* UART Test Register on all other i.mx*/
71 73
72/* UART Control Register Bit Fields.*/ 74/* UART Control Register Bit Fields.*/
73#define URXD_CHARRDY (1<<15) 75#define URXD_CHARRDY (1<<15)
@@ -87,7 +89,7 @@
87#define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */ 89#define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */
88#define UCR1_SNDBRK (1<<4) /* Send break */ 90#define UCR1_SNDBRK (1<<4) /* Send break */
89#define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */ 91#define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */
90#define MX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, mx1 only */ 92#define IMX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, i.mx1 only */
91#define UCR1_DOZE (1<<1) /* Doze */ 93#define UCR1_DOZE (1<<1) /* Doze */
92#define UCR1_UARTEN (1<<0) /* UART enabled */ 94#define UCR1_UARTEN (1<<0) /* UART enabled */
93#define UCR2_ESCI (1<<15) /* Escape seq interrupt enable */ 95#define UCR2_ESCI (1<<15) /* Escape seq interrupt enable */
@@ -113,9 +115,7 @@
113#define UCR3_RXDSEN (1<<6) /* Receive status interrupt enable */ 115#define UCR3_RXDSEN (1<<6) /* Receive status interrupt enable */
114#define UCR3_AIRINTEN (1<<5) /* Async IR wake interrupt enable */ 116#define UCR3_AIRINTEN (1<<5) /* Async IR wake interrupt enable */
115#define UCR3_AWAKEN (1<<4) /* Async wake interrupt enable */ 117#define UCR3_AWAKEN (1<<4) /* Async wake interrupt enable */
116#define MX1_UCR3_REF25 (1<<3) /* Ref freq 25 MHz, only on mx1 */ 118#define IMX21_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select */
117#define MX1_UCR3_REF30 (1<<2) /* Ref Freq 30 MHz, only on mx1 */
118#define MX2_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select, on mx2/mx3 */
119#define UCR3_INVT (1<<1) /* Inverted Infrared transmission */ 119#define UCR3_INVT (1<<1) /* Inverted Infrared transmission */
120#define UCR3_BPEN (1<<0) /* Preset registers enable */ 120#define UCR3_BPEN (1<<0) /* Preset registers enable */
121#define UCR4_CTSTL_SHF 10 /* CTS trigger level shift */ 121#define UCR4_CTSTL_SHF 10 /* CTS trigger level shift */
@@ -181,6 +181,18 @@
181 181
182#define UART_NR 8 182#define UART_NR 8
183 183
184/* i.mx21 type uart runs on all i.mx except i.mx1 */
185enum imx_uart_type {
186 IMX1_UART,
187 IMX21_UART,
188};
189
190/* device type dependent stuff */
191struct imx_uart_data {
192 unsigned uts_reg;
193 enum imx_uart_type devtype;
194};
195
184struct imx_port { 196struct imx_port {
185 struct uart_port port; 197 struct uart_port port;
186 struct timer_list timer; 198 struct timer_list timer;
@@ -192,6 +204,7 @@ struct imx_port {
192 unsigned int irda_inv_tx:1; 204 unsigned int irda_inv_tx:1;
193 unsigned short trcv_delay; /* transceiver delay */ 205 unsigned short trcv_delay; /* transceiver delay */
194 struct clk *clk; 206 struct clk *clk;
207 struct imx_uart_data *devdata;
195}; 208};
196 209
197#ifdef CONFIG_IRDA 210#ifdef CONFIG_IRDA
@@ -200,6 +213,52 @@ struct imx_port {
200#define USE_IRDA(sport) (0) 213#define USE_IRDA(sport) (0)
201#endif 214#endif
202 215
216static struct imx_uart_data imx_uart_devdata[] = {
217 [IMX1_UART] = {
218 .uts_reg = IMX1_UTS,
219 .devtype = IMX1_UART,
220 },
221 [IMX21_UART] = {
222 .uts_reg = IMX21_UTS,
223 .devtype = IMX21_UART,
224 },
225};
226
227static struct platform_device_id imx_uart_devtype[] = {
228 {
229 .name = "imx1-uart",
230 .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX1_UART],
231 }, {
232 .name = "imx21-uart",
233 .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX21_UART],
234 }, {
235 /* sentinel */
236 }
237};
238MODULE_DEVICE_TABLE(platform, imx_uart_devtype);
239
240static struct of_device_id imx_uart_dt_ids[] = {
241 { .compatible = "fsl,imx1-uart", .data = &imx_uart_devdata[IMX1_UART], },
242 { .compatible = "fsl,imx21-uart", .data = &imx_uart_devdata[IMX21_UART], },
243 { /* sentinel */ }
244};
245MODULE_DEVICE_TABLE(of, imx_uart_dt_ids);
246
247static inline unsigned uts_reg(struct imx_port *sport)
248{
249 return sport->devdata->uts_reg;
250}
251
252static inline int is_imx1_uart(struct imx_port *sport)
253{
254 return sport->devdata->devtype == IMX1_UART;
255}
256
257static inline int is_imx21_uart(struct imx_port *sport)
258{
259 return sport->devdata->devtype == IMX21_UART;
260}
261
203/* 262/*
204 * Handle any change of modem status signal since we were last called. 263 * Handle any change of modem status signal since we were last called.
205 */ 264 */
@@ -326,7 +385,8 @@ static inline void imx_transmit_buffer(struct imx_port *sport)
326 struct circ_buf *xmit = &sport->port.state->xmit; 385 struct circ_buf *xmit = &sport->port.state->xmit;
327 386
328 while (!uart_circ_empty(xmit) && 387 while (!uart_circ_empty(xmit) &&
329 !(readl(sport->port.membase + UTS) & UTS_TXFULL)) { 388 !(readl(sport->port.membase + uts_reg(sport))
389 & UTS_TXFULL)) {
330 /* send xmit->buf[xmit->tail] 390 /* send xmit->buf[xmit->tail]
331 * out the port here */ 391 * out the port here */
332 writel(xmit->buf[xmit->tail], sport->port.membase + URTX0); 392 writel(xmit->buf[xmit->tail], sport->port.membase + URTX0);
@@ -373,7 +433,7 @@ static void imx_start_tx(struct uart_port *port)
373 writel(temp, sport->port.membase + UCR4); 433 writel(temp, sport->port.membase + UCR4);
374 } 434 }
375 435
376 if (readl(sport->port.membase + UTS) & UTS_TXEMPTY) 436 if (readl(sport->port.membase + uts_reg(sport)) & UTS_TXEMPTY)
377 imx_transmit_buffer(sport); 437 imx_transmit_buffer(sport);
378} 438}
379 439
@@ -689,9 +749,9 @@ static int imx_startup(struct uart_port *port)
689 } 749 }
690 } 750 }
691 751
692 if (!cpu_is_mx1()) { 752 if (is_imx21_uart(sport)) {
693 temp = readl(sport->port.membase + UCR3); 753 temp = readl(sport->port.membase + UCR3);
694 temp |= MX2_UCR3_RXDMUXSEL; 754 temp |= IMX21_UCR3_RXDMUXSEL;
695 writel(temp, sport->port.membase + UCR3); 755 writel(temp, sport->port.membase + UCR3);
696 } 756 }
697 757
@@ -923,9 +983,9 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
923 writel(num, sport->port.membase + UBIR); 983 writel(num, sport->port.membase + UBIR);
924 writel(denom, sport->port.membase + UBMR); 984 writel(denom, sport->port.membase + UBMR);
925 985
926 if (!cpu_is_mx1()) 986 if (is_imx21_uart(sport))
927 writel(sport->port.uartclk / div / 1000, 987 writel(sport->port.uartclk / div / 1000,
928 sport->port.membase + MX2_ONEMS); 988 sport->port.membase + IMX21_ONEMS);
929 989
930 writel(old_ucr1, sport->port.membase + UCR1); 990 writel(old_ucr1, sport->port.membase + UCR1);
931 991
@@ -1041,7 +1101,7 @@ static void imx_console_putchar(struct uart_port *port, int ch)
1041{ 1101{
1042 struct imx_port *sport = (struct imx_port *)port; 1102 struct imx_port *sport = (struct imx_port *)port;
1043 1103
1044 while (readl(sport->port.membase + UTS) & UTS_TXFULL) 1104 while (readl(sport->port.membase + uts_reg(sport)) & UTS_TXFULL)
1045 barrier(); 1105 barrier();
1046 1106
1047 writel(ch, sport->port.membase + URTX0); 1107 writel(ch, sport->port.membase + URTX0);
@@ -1062,8 +1122,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
1062 ucr1 = old_ucr1 = readl(sport->port.membase + UCR1); 1122 ucr1 = old_ucr1 = readl(sport->port.membase + UCR1);
1063 old_ucr2 = readl(sport->port.membase + UCR2); 1123 old_ucr2 = readl(sport->port.membase + UCR2);
1064 1124
1065 if (cpu_is_mx1()) 1125 if (is_imx1_uart(sport))
1066 ucr1 |= MX1_UCR1_UARTCLKEN; 1126 ucr1 |= IMX1_UCR1_UARTCLKEN;
1067 ucr1 |= UCR1_UARTEN; 1127 ucr1 |= UCR1_UARTEN;
1068 ucr1 &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN); 1128 ucr1 &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN);
1069 1129
@@ -1222,6 +1282,63 @@ static int serial_imx_resume(struct platform_device *dev)
1222 return 0; 1282 return 0;
1223} 1283}
1224 1284
1285#ifdef CONFIG_OF
1286static int serial_imx_probe_dt(struct imx_port *sport,
1287 struct platform_device *pdev)
1288{
1289 struct device_node *np = pdev->dev.of_node;
1290 const struct of_device_id *of_id =
1291 of_match_device(imx_uart_dt_ids, &pdev->dev);
1292 int ret;
1293
1294 if (!np)
1295 return -ENODEV;
1296
1297 ret = of_alias_get_id(np, "serial");
1298 if (ret < 0) {
1299 pr_err("%s: failed to get alias id, errno %d\n",
1300 __func__, ret);
1301 return -ENODEV;
1302 } else {
1303 sport->port.line = ret;
1304 }
1305
1306 if (of_get_property(np, "fsl,uart-has-rtscts", NULL))
1307 sport->have_rtscts = 1;
1308
1309 if (of_get_property(np, "fsl,irda-mode", NULL))
1310 sport->use_irda = 1;
1311
1312 sport->devdata = of_id->data;
1313
1314 return 0;
1315}
1316#else
1317static inline int serial_imx_probe_dt(struct imx_port *sport,
1318 struct platform_device *pdev)
1319{
1320 return -ENODEV;
1321}
1322#endif
1323
1324static void serial_imx_probe_pdata(struct imx_port *sport,
1325 struct platform_device *pdev)
1326{
1327 struct imxuart_platform_data *pdata = pdev->dev.platform_data;
1328
1329 sport->port.line = pdev->id;
1330 sport->devdata = (struct imx_uart_data *) pdev->id_entry->driver_data;
1331
1332 if (!pdata)
1333 return;
1334
1335 if (pdata->flags & IMXUART_HAVE_RTSCTS)
1336 sport->have_rtscts = 1;
1337
1338 if (pdata->flags & IMXUART_IRDA)
1339 sport->use_irda = 1;
1340}
1341
1225static int serial_imx_probe(struct platform_device *pdev) 1342static int serial_imx_probe(struct platform_device *pdev)
1226{ 1343{
1227 struct imx_port *sport; 1344 struct imx_port *sport;
@@ -1234,6 +1351,10 @@ static int serial_imx_probe(struct platform_device *pdev)
1234 if (!sport) 1351 if (!sport)
1235 return -ENOMEM; 1352 return -ENOMEM;
1236 1353
1354 ret = serial_imx_probe_dt(sport, pdev);
1355 if (ret == -ENODEV)
1356 serial_imx_probe_pdata(sport, pdev);
1357
1237 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1358 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1238 if (!res) { 1359 if (!res) {
1239 ret = -ENODEV; 1360 ret = -ENODEV;
@@ -1258,7 +1379,6 @@ static int serial_imx_probe(struct platform_device *pdev)
1258 sport->port.fifosize = 32; 1379 sport->port.fifosize = 32;
1259 sport->port.ops = &imx_pops; 1380 sport->port.ops = &imx_pops;
1260 sport->port.flags = UPF_BOOT_AUTOCONF; 1381 sport->port.flags = UPF_BOOT_AUTOCONF;
1261 sport->port.line = pdev->id;
1262 init_timer(&sport->timer); 1382 init_timer(&sport->timer);
1263 sport->timer.function = imx_timeout; 1383 sport->timer.function = imx_timeout;
1264 sport->timer.data = (unsigned long)sport; 1384 sport->timer.data = (unsigned long)sport;
@@ -1272,17 +1392,9 @@ static int serial_imx_probe(struct platform_device *pdev)
1272 1392
1273 sport->port.uartclk = clk_get_rate(sport->clk); 1393 sport->port.uartclk = clk_get_rate(sport->clk);
1274 1394
1275 imx_ports[pdev->id] = sport; 1395 imx_ports[sport->port.line] = sport;
1276 1396
1277 pdata = pdev->dev.platform_data; 1397 pdata = pdev->dev.platform_data;
1278 if (pdata && (pdata->flags & IMXUART_HAVE_RTSCTS))
1279 sport->have_rtscts = 1;
1280
1281#ifdef CONFIG_IRDA
1282 if (pdata && (pdata->flags & IMXUART_IRDA))
1283 sport->use_irda = 1;
1284#endif
1285
1286 if (pdata && pdata->init) { 1398 if (pdata && pdata->init) {
1287 ret = pdata->init(pdev); 1399 ret = pdata->init(pdev);
1288 if (ret) 1400 if (ret)
@@ -1340,9 +1452,11 @@ static struct platform_driver serial_imx_driver = {
1340 1452
1341 .suspend = serial_imx_suspend, 1453 .suspend = serial_imx_suspend,
1342 .resume = serial_imx_resume, 1454 .resume = serial_imx_resume,
1455 .id_table = imx_uart_devtype,
1343 .driver = { 1456 .driver = {
1344 .name = "imx-uart", 1457 .name = "imx-uart",
1345 .owner = THIS_MODULE, 1458 .owner = THIS_MODULE,
1459 .of_match_table = imx_uart_dt_ids,
1346 }, 1460 },
1347}; 1461};
1348 1462
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index ea2340b814e9..6bc2e3f876f4 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -39,7 +39,7 @@
39#include <linux/tty.h> 39#include <linux/tty.h>
40#include <linux/types.h> 40#include <linux/types.h>
41 41
42#include <asm/atomic.h> 42#include <linux/atomic.h>
43#include <asm/io.h> 43#include <asm/io.h>
44#include <asm/war.h> 44#include <asm/war.h>
45 45
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index 1a7fd3e70315..0aebd7121b56 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -65,7 +65,7 @@
65#include <linux/tty.h> 65#include <linux/tty.h>
66#include <linux/types.h> 66#include <linux/types.h>
67 67
68#include <asm/atomic.h> 68#include <linux/atomic.h>
69#include <asm/system.h> 69#include <asm/system.h>
70 70
71#include <asm/dec/interrupts.h> 71#include <asm/dec/interrupts.h>
diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c
index 02a02700b51d..a9a4eade7e80 100644
--- a/drivers/usb/gadget/f_audio.c
+++ b/drivers/usb/gadget/f_audio.c
@@ -12,7 +12,7 @@
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/device.h> 14#include <linux/device.h>
15#include <asm/atomic.h> 15#include <linux/atomic.h>
16 16
17#include "u_audio.h" 17#include "u_audio.h"
18 18
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 8f3eae90919f..3ea4666be3d0 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -29,7 +29,7 @@
29#include <linux/device.h> 29#include <linux/device.h>
30#include <linux/etherdevice.h> 30#include <linux/etherdevice.h>
31 31
32#include <asm/atomic.h> 32#include <linux/atomic.h>
33 33
34#include "u_ether.h" 34#include "u_ether.h"
35#include "rndis.h" 35#include "rndis.h"
diff --git a/drivers/usb/gadget/uvc_queue.c b/drivers/usb/gadget/uvc_queue.c
index f7395ac5dc17..aa0ad34e0f1f 100644
--- a/drivers/usb/gadget/uvc_queue.c
+++ b/drivers/usb/gadget/uvc_queue.c
@@ -19,7 +19,7 @@
19#include <linux/videodev2.h> 19#include <linux/videodev2.h>
20#include <linux/vmalloc.h> 20#include <linux/vmalloc.h>
21#include <linux/wait.h> 21#include <linux/wait.h>
22#include <asm/atomic.h> 22#include <linux/atomic.h>
23 23
24#include "uvc.h" 24#include "uvc.h"
25 25
diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c
index 5e807f083bc8..52f8f9e513af 100644
--- a/drivers/usb/gadget/uvc_v4l2.c
+++ b/drivers/usb/gadget/uvc_v4l2.c
@@ -124,24 +124,12 @@ uvc_v4l2_open(struct file *file)
124 struct video_device *vdev = video_devdata(file); 124 struct video_device *vdev = video_devdata(file);
125 struct uvc_device *uvc = video_get_drvdata(vdev); 125 struct uvc_device *uvc = video_get_drvdata(vdev);
126 struct uvc_file_handle *handle; 126 struct uvc_file_handle *handle;
127 int ret;
128 127
129 handle = kzalloc(sizeof(*handle), GFP_KERNEL); 128 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
130 if (handle == NULL) 129 if (handle == NULL)
131 return -ENOMEM; 130 return -ENOMEM;
132 131
133 ret = v4l2_fh_init(&handle->vfh, vdev); 132 v4l2_fh_init(&handle->vfh, vdev);
134 if (ret < 0)
135 goto error;
136
137 ret = v4l2_event_init(&handle->vfh);
138 if (ret < 0)
139 goto error;
140
141 ret = v4l2_event_alloc(&handle->vfh, 8);
142 if (ret < 0)
143 goto error;
144
145 v4l2_fh_add(&handle->vfh); 133 v4l2_fh_add(&handle->vfh);
146 134
147 handle->device = &uvc->video; 135 handle->device = &uvc->video;
@@ -149,10 +137,6 @@ uvc_v4l2_open(struct file *file)
149 137
150 uvc_function_connect(uvc); 138 uvc_function_connect(uvc);
151 return 0; 139 return 0;
152
153error:
154 v4l2_fh_exit(&handle->vfh);
155 return ret;
156} 140}
157 141
158static int 142static int
@@ -314,7 +298,7 @@ uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
314 if (sub->type < UVC_EVENT_FIRST || sub->type > UVC_EVENT_LAST) 298 if (sub->type < UVC_EVENT_FIRST || sub->type > UVC_EVENT_LAST)
315 return -EINVAL; 299 return -EINVAL;
316 300
317 return v4l2_event_subscribe(&handle->vfh, arg); 301 return v4l2_event_subscribe(&handle->vfh, arg, 2);
318 } 302 }
319 303
320 case VIDIOC_UNSUBSCRIBE_EVENT: 304 case VIDIOC_UNSUBSCRIBE_EVENT:
@@ -354,7 +338,7 @@ uvc_v4l2_poll(struct file *file, poll_table *wait)
354 struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data); 338 struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data);
355 unsigned int mask = 0; 339 unsigned int mask = 0;
356 340
357 poll_wait(file, &handle->vfh.events->wait, wait); 341 poll_wait(file, &handle->vfh.wait, wait);
358 if (v4l2_event_pending(&handle->vfh)) 342 if (v4l2_event_pending(&handle->vfh))
359 mask |= POLLPRI; 343 mask |= POLLPRI;
360 344
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index a0037961e5bd..27e209a7222f 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -131,7 +131,7 @@
131#include <linux/usb.h> 131#include <linux/usb.h>
132#include <linux/proc_fs.h> 132#include <linux/proc_fs.h>
133 133
134#include <asm/atomic.h> 134#include <linux/atomic.h>
135#include <linux/blkdev.h> 135#include <linux/blkdev.h>
136#include "../../scsi/scsi.h" 136#include "../../scsi/scsi.h"
137#include <scsi/scsi_host.h> 137#include <scsi/scsi_host.h>
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index 68ab460a735c..ac0d75a9005a 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -29,7 +29,7 @@
29#include <linux/backlight.h> 29#include <linux/backlight.h>
30#include <linux/timer.h> 30#include <linux/timer.h>
31#include <linux/workqueue.h> 31#include <linux/workqueue.h>
32#include <asm/atomic.h> 32#include <linux/atomic.h>
33 33
34#define APPLE_VENDOR_ID 0x05AC 34#define APPLE_VENDOR_ID 0x05AC
35 35
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 406893e4422b..a34430f55fb7 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -16,6 +16,7 @@
16 */ 16 */
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/scatterlist.h>
19#include "./common.h" 20#include "./common.h"
20#include "./pipe.h" 21#include "./pipe.h"
21 22
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index b0a7a9e909a4..1a49ca9c8ea5 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -34,7 +34,7 @@
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/spinlock.h> 35#include <linux/spinlock.h>
36#include <linux/uaccess.h> 36#include <linux/uaccess.h>
37#include <asm/atomic.h> 37#include <linux/atomic.h>
38#include <linux/usb.h> 38#include <linux/usb.h>
39#include <linux/usb/serial.h> 39#include <linux/usb/serial.h>
40 40
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
index ca80171f42c6..2acc7f504c51 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -58,7 +58,7 @@
58 * destination address. 58 * destination address.
59 */ 59 */
60#include <linux/init.h> 60#include <linux/init.h>
61#include <asm/atomic.h> 61#include <linux/atomic.h>
62#include <linux/bitmap.h> 62#include <linux/bitmap.h>
63#include <linux/slab.h> 63#include <linux/slab.h>
64 64
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 14c9abf0d800..a801e2821d03 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -11,7 +11,7 @@
11#include <linux/uio.h> 11#include <linux/uio.h>
12#include <linux/virtio_config.h> 12#include <linux/virtio_config.h>
13#include <linux/virtio_ring.h> 13#include <linux/virtio_ring.h>
14#include <asm/atomic.h> 14#include <linux/atomic.h>
15 15
16/* This is for zerocopy, used buffer len is set to 1 when lower device DMA 16/* This is for zerocopy, used buffer len is set to 1 when lower device DMA
17 * done */ 17 * done */
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 019dbd3f12b2..b048417247e8 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -24,7 +24,7 @@
24#include <linux/backlight.h> 24#include <linux/backlight.h>
25#include <linux/gpio.h> 25#include <linux/gpio.h>
26#include <video/sh_mobile_lcdc.h> 26#include <video/sh_mobile_lcdc.h>
27#include <asm/atomic.h> 27#include <linux/atomic.h>
28 28
29#include "sh_mobile_lcdcfb.h" 29#include "sh_mobile_lcdcfb.h"
30#include "sh_mobile_meram.h" 30#include "sh_mobile_meram.h"
diff --git a/drivers/video/vermilion/vermilion.h b/drivers/video/vermilion/vermilion.h
index 7491abfcf1fc..43d11ec197fc 100644
--- a/drivers/video/vermilion/vermilion.h
+++ b/drivers/video/vermilion/vermilion.h
@@ -31,7 +31,7 @@
31 31
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/pci.h> 33#include <linux/pci.h>
34#include <asm/atomic.h> 34#include <linux/atomic.h>
35#include <linux/mutex.h> 35#include <linux/mutex.h>
36 36
37#define VML_DEVICE_GPU 0x5002 37#define VML_DEVICE_GPU 0x5002
diff --git a/drivers/w1/masters/matrox_w1.c b/drivers/w1/masters/matrox_w1.c
index 1550431ccb6a..334d1ccf9c92 100644
--- a/drivers/w1/masters/matrox_w1.c
+++ b/drivers/w1/masters/matrox_w1.c
@@ -20,7 +20,7 @@
20 */ 20 */
21 21
22#include <asm/types.h> 22#include <asm/types.h>
23#include <asm/atomic.h> 23#include <linux/atomic.h>
24#include <asm/io.h> 24#include <asm/io.h>
25 25
26#include <linux/delay.h> 26#include <linux/delay.h>
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 17726a05a0a6..402928b135d1 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -86,6 +86,11 @@ static struct w1_family w1_therm_family_DS1822 = {
86 .fops = &w1_therm_fops, 86 .fops = &w1_therm_fops,
87}; 87};
88 88
89static struct w1_family w1_therm_family_DS28EA00 = {
90 .fid = W1_THERM_DS28EA00,
91 .fops = &w1_therm_fops,
92};
93
89struct w1_therm_family_converter 94struct w1_therm_family_converter
90{ 95{
91 u8 broken; 96 u8 broken;
@@ -111,6 +116,10 @@ static struct w1_therm_family_converter w1_therm_families[] = {
111 .f = &w1_therm_family_DS18B20, 116 .f = &w1_therm_family_DS18B20,
112 .convert = w1_DS18B20_convert_temp 117 .convert = w1_DS18B20_convert_temp
113 }, 118 },
119 {
120 .f = &w1_therm_family_DS28EA00,
121 .convert = w1_DS18B20_convert_temp
122 },
114}; 123};
115 124
116static inline int w1_DS18B20_convert_temp(u8 rom[9]) 125static inline int w1_DS18B20_convert_temp(u8 rom[9])
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 10606c822756..6c136c19e982 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -33,7 +33,7 @@
33#include <linux/kthread.h> 33#include <linux/kthread.h>
34#include <linux/freezer.h> 34#include <linux/freezer.h>
35 35
36#include <asm/atomic.h> 36#include <linux/atomic.h>
37 37
38#include "w1.h" 38#include "w1.h"
39#include "w1_log.h" 39#include "w1_log.h"
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index 97479ae70b9c..98a1ac0f4693 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -24,7 +24,7 @@
24 24
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/device.h> 26#include <linux/device.h>
27#include <asm/atomic.h> 27#include <linux/atomic.h>
28 28
29#define W1_FAMILY_DEFAULT 0 29#define W1_FAMILY_DEFAULT 0
30#define W1_FAMILY_SMEM_01 0x01 30#define W1_FAMILY_SMEM_01 0x01
@@ -38,6 +38,7 @@
38#define W1_EEPROM_DS2431 0x2D 38#define W1_EEPROM_DS2431 0x2D
39#define W1_FAMILY_DS2760 0x30 39#define W1_FAMILY_DS2760 0x30
40#define W1_FAMILY_DS2780 0x32 40#define W1_FAMILY_DS2780 0x32
41#define W1_THERM_DS28EA00 0x42
41 42
42#define MAXNAMELEN 32 43#define MAXNAMELEN 32
43 44
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 21d816e9dfa5..f441726ddf2b 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -28,6 +28,17 @@ menuconfig WATCHDOG
28 28
29if WATCHDOG 29if WATCHDOG
30 30
31config WATCHDOG_CORE
32 bool "WatchDog Timer Driver Core"
33 ---help---
34 Say Y here if you want to use the new watchdog timer driver core.
35 This driver provides a framework for all watchdog timer drivers
36 and gives them the /dev/watchdog interface (and later also the
37 sysfs interface).
38
39 To compile this driver as a module, choose M here: the module will
40 be called watchdog.
41
31config WATCHDOG_NOWAYOUT 42config WATCHDOG_NOWAYOUT
32 bool "Disable watchdog shutdown on close" 43 bool "Disable watchdog shutdown on close"
33 help 44 help
@@ -186,6 +197,15 @@ config SA1100_WATCHDOG
186 To compile this driver as a module, choose M here: the 197 To compile this driver as a module, choose M here: the
187 module will be called sa1100_wdt. 198 module will be called sa1100_wdt.
188 199
200config DW_WATCHDOG
201 tristate "Synopsys DesignWare watchdog"
202 depends on ARM && HAVE_CLK
203 help
204 Say Y here if to include support for the Synopsys DesignWare
205 watchdog timer found in many ARM chips.
206 To compile this driver as a module, choose M here: the
207 module will be called dw_wdt.
208
189config MPCORE_WATCHDOG 209config MPCORE_WATCHDOG
190 tristate "MPcore watchdog" 210 tristate "MPcore watchdog"
191 depends on HAVE_ARM_TWD 211 depends on HAVE_ARM_TWD
@@ -321,7 +341,7 @@ config MAX63XX_WATCHDOG
321 341
322config IMX2_WDT 342config IMX2_WDT
323 tristate "IMX2+ Watchdog" 343 tristate "IMX2+ Watchdog"
324 depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3 || ARCH_MX5 344 depends on IMX_HAVE_PLATFORM_IMX2_WDT
325 help 345 help
326 This is the driver for the hardware watchdog 346 This is the driver for the hardware watchdog
327 on the Freescale IMX2 and later processors. 347 on the Freescale IMX2 and later processors.
@@ -879,6 +899,20 @@ config M54xx_WATCHDOG
879 To compile this driver as a module, choose M here: the 899 To compile this driver as a module, choose M here: the
880 module will be called m54xx_wdt. 900 module will be called m54xx_wdt.
881 901
902# MicroBlaze Architecture
903
904config XILINX_WATCHDOG
905 tristate "Xilinx Watchdog timer"
906 depends on MICROBLAZE
907 ---help---
908 Watchdog driver for the xps_timebase_wdt ip core.
909
910 IMPORTANT: The xps_timebase_wdt parent must have the property
911 "clock-frequency" at device tree.
912
913 To compile this driver as a module, choose M here: the
914 module will be called of_xilinx_wdt.
915
882# MIPS Architecture 916# MIPS Architecture
883 917
884config ATH79_WDT 918config ATH79_WDT
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index ed26f7094e47..55bd5740e910 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -2,6 +2,10 @@
2# Makefile for the WatchDog device drivers. 2# Makefile for the WatchDog device drivers.
3# 3#
4 4
5# The WatchDog Timer Driver Core.
6watchdog-objs += watchdog_core.o watchdog_dev.o
7obj-$(CONFIG_WATCHDOG_CORE) += watchdog.o
8
5# Only one watchdog can succeed. We probe the ISA/PCI/USB based 9# Only one watchdog can succeed. We probe the ISA/PCI/USB based
6# watchdog-cards first, then the architecture specific watchdog 10# watchdog-cards first, then the architecture specific watchdog
7# drivers and then the architecture independent "softdog" driver. 11# drivers and then the architecture independent "softdog" driver.
@@ -37,6 +41,7 @@ obj-$(CONFIG_IXP4XX_WATCHDOG) += ixp4xx_wdt.o
37obj-$(CONFIG_KS8695_WATCHDOG) += ks8695_wdt.o 41obj-$(CONFIG_KS8695_WATCHDOG) += ks8695_wdt.o
38obj-$(CONFIG_S3C2410_WATCHDOG) += s3c2410_wdt.o 42obj-$(CONFIG_S3C2410_WATCHDOG) += s3c2410_wdt.o
39obj-$(CONFIG_SA1100_WATCHDOG) += sa1100_wdt.o 43obj-$(CONFIG_SA1100_WATCHDOG) += sa1100_wdt.o
44obj-$(CONFIG_DW_WATCHDOG) += dw_wdt.o
40obj-$(CONFIG_MPCORE_WATCHDOG) += mpcore_wdt.o 45obj-$(CONFIG_MPCORE_WATCHDOG) += mpcore_wdt.o
41obj-$(CONFIG_EP93XX_WATCHDOG) += ep93xx_wdt.o 46obj-$(CONFIG_EP93XX_WATCHDOG) += ep93xx_wdt.o
42obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o 47obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o
@@ -109,6 +114,9 @@ obj-$(CONFIG_INTEL_SCU_WATCHDOG) += intel_scu_watchdog.o
109# M68K Architecture 114# M68K Architecture
110obj-$(CONFIG_M54xx_WATCHDOG) += m54xx_wdt.o 115obj-$(CONFIG_M54xx_WATCHDOG) += m54xx_wdt.o
111 116
117# MicroBlaze Architecture
118obj-$(CONFIG_XILINX_WATCHDOG) += of_xilinx_wdt.o
119
112# MIPS Architecture 120# MIPS Architecture
113obj-$(CONFIG_ATH79_WDT) += ath79_wdt.o 121obj-$(CONFIG_ATH79_WDT) += ath79_wdt.o
114obj-$(CONFIG_BCM47XX_WDT) += bcm47xx_wdt.o 122obj-$(CONFIG_BCM47XX_WDT) += bcm47xx_wdt.o
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index eac26021e8da..87445b2d72a7 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -31,7 +31,7 @@
31#include <linux/bitops.h> 31#include <linux/bitops.h>
32#include <linux/uaccess.h> 32#include <linux/uaccess.h>
33 33
34#include <mach/at91_wdt.h> 34#include "at91sam9_wdt.h"
35 35
36#define DRV_NAME "AT91SAM9 Watchdog" 36#define DRV_NAME "AT91SAM9 Watchdog"
37 37
@@ -284,27 +284,8 @@ static int __exit at91wdt_remove(struct platform_device *pdev)
284 return res; 284 return res;
285} 285}
286 286
287#ifdef CONFIG_PM
288
289static int at91wdt_suspend(struct platform_device *pdev, pm_message_t message)
290{
291 return 0;
292}
293
294static int at91wdt_resume(struct platform_device *pdev)
295{
296 return 0;
297}
298
299#else
300#define at91wdt_suspend NULL
301#define at91wdt_resume NULL
302#endif
303
304static struct platform_driver at91wdt_driver = { 287static struct platform_driver at91wdt_driver = {
305 .remove = __exit_p(at91wdt_remove), 288 .remove = __exit_p(at91wdt_remove),
306 .suspend = at91wdt_suspend,
307 .resume = at91wdt_resume,
308 .driver = { 289 .driver = {
309 .name = "at91_wdt", 290 .name = "at91_wdt",
310 .owner = THIS_MODULE, 291 .owner = THIS_MODULE,
diff --git a/drivers/watchdog/at91sam9_wdt.h b/drivers/watchdog/at91sam9_wdt.h
new file mode 100644
index 000000000000..757f9cab5c82
--- /dev/null
+++ b/drivers/watchdog/at91sam9_wdt.h
@@ -0,0 +1,37 @@
1/*
2 * drivers/watchdog/at91sam9_wdt.h
3 *
4 * Copyright (C) 2007 Andrew Victor
5 * Copyright (C) 2007 Atmel Corporation.
6 *
7 * Watchdog Timer (WDT) - System peripherals regsters.
8 * Based on AT91SAM9261 datasheet revision D.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#ifndef AT91_WDT_H
17#define AT91_WDT_H
18
19#define AT91_WDT_CR (AT91_WDT + 0x00) /* Watchdog Control Register */
20#define AT91_WDT_WDRSTT (1 << 0) /* Restart */
21#define AT91_WDT_KEY (0xa5 << 24) /* KEY Password */
22
23#define AT91_WDT_MR (AT91_WDT + 0x04) /* Watchdog Mode Register */
24#define AT91_WDT_WDV (0xfff << 0) /* Counter Value */
25#define AT91_WDT_WDFIEN (1 << 12) /* Fault Interrupt Enable */
26#define AT91_WDT_WDRSTEN (1 << 13) /* Reset Processor */
27#define AT91_WDT_WDRPROC (1 << 14) /* Timer Restart */
28#define AT91_WDT_WDDIS (1 << 15) /* Watchdog Disable */
29#define AT91_WDT_WDD (0xfff << 16) /* Delta Value */
30#define AT91_WDT_WDDBGHLT (1 << 28) /* Debug Halt */
31#define AT91_WDT_WDIDLEHLT (1 << 29) /* Idle Halt */
32
33#define AT91_WDT_SR (AT91_WDT + 0x08) /* Watchdog Status Register */
34#define AT91_WDT_WDUNF (1 << 0) /* Watchdog Underflow */
35#define AT91_WDT_WDERR (1 << 1) /* Watchdog Error */
36
37#endif
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
new file mode 100644
index 000000000000..f10f8c0abba4
--- /dev/null
+++ b/drivers/watchdog/dw_wdt.c
@@ -0,0 +1,376 @@
1/*
2 * Copyright 2010-2011 Picochip Ltd., Jamie Iles
3 * http://www.picochip.com
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 * This file implements a driver for the Synopsys DesignWare watchdog device
11 * in the many ARM subsystems. The watchdog has 16 different timeout periods
12 * and these are a function of the input clock frequency.
13 *
14 * The DesignWare watchdog cannot be stopped once it has been started so we
15 * use a software timer to implement a ping that will keep the watchdog alive.
16 * If we receive an expected close for the watchdog then we keep the timer
17 * running, otherwise the timer is stopped and the watchdog will expire.
18 */
19#define pr_fmt(fmt) "dw_wdt: " fmt
20
21#include <linux/bitops.h>
22#include <linux/clk.h>
23#include <linux/device.h>
24#include <linux/err.h>
25#include <linux/fs.h>
26#include <linux/io.h>
27#include <linux/kernel.h>
28#include <linux/miscdevice.h>
29#include <linux/module.h>
30#include <linux/moduleparam.h>
31#include <linux/pm.h>
32#include <linux/platform_device.h>
33#include <linux/spinlock.h>
34#include <linux/timer.h>
35#include <linux/uaccess.h>
36#include <linux/watchdog.h>
37
38#define WDOG_CONTROL_REG_OFFSET 0x00
39#define WDOG_CONTROL_REG_WDT_EN_MASK 0x01
40#define WDOG_TIMEOUT_RANGE_REG_OFFSET 0x04
41#define WDOG_CURRENT_COUNT_REG_OFFSET 0x08
42#define WDOG_COUNTER_RESTART_REG_OFFSET 0x0c
43#define WDOG_COUNTER_RESTART_KICK_VALUE 0x76
44
45/* The maximum TOP (timeout period) value that can be set in the watchdog. */
46#define DW_WDT_MAX_TOP 15
47
48static int nowayout = WATCHDOG_NOWAYOUT;
49module_param(nowayout, int, 0);
50MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
51 "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
52
53#define WDT_TIMEOUT (HZ / 2)
54
55static struct {
56 spinlock_t lock;
57 void __iomem *regs;
58 struct clk *clk;
59 unsigned long in_use;
60 unsigned long next_heartbeat;
61 struct timer_list timer;
62 int expect_close;
63} dw_wdt;
64
65static inline int dw_wdt_is_enabled(void)
66{
67 return readl(dw_wdt.regs + WDOG_CONTROL_REG_OFFSET) &
68 WDOG_CONTROL_REG_WDT_EN_MASK;
69}
70
71static inline int dw_wdt_top_in_seconds(unsigned top)
72{
73 /*
74 * There are 16 possible timeout values in 0..15 where the number of
75 * cycles is 2 ^ (16 + i) and the watchdog counts down.
76 */
77 return (1 << (16 + top)) / clk_get_rate(dw_wdt.clk);
78}
79
80static int dw_wdt_get_top(void)
81{
82 int top = readl(dw_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET) & 0xF;
83
84 return dw_wdt_top_in_seconds(top);
85}
86
87static inline void dw_wdt_set_next_heartbeat(void)
88{
89 dw_wdt.next_heartbeat = jiffies + dw_wdt_get_top() * HZ;
90}
91
92static int dw_wdt_set_top(unsigned top_s)
93{
94 int i, top_val = DW_WDT_MAX_TOP;
95
96 /*
97 * Iterate over the timeout values until we find the closest match. We
98 * always look for >=.
99 */
100 for (i = 0; i <= DW_WDT_MAX_TOP; ++i)
101 if (dw_wdt_top_in_seconds(i) >= top_s) {
102 top_val = i;
103 break;
104 }
105
106 /* Set the new value in the watchdog. */
107 writel(top_val, dw_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
108
109 dw_wdt_set_next_heartbeat();
110
111 return dw_wdt_top_in_seconds(top_val);
112}
113
114static void dw_wdt_keepalive(void)
115{
116 writel(WDOG_COUNTER_RESTART_KICK_VALUE, dw_wdt.regs +
117 WDOG_COUNTER_RESTART_REG_OFFSET);
118}
119
120static void dw_wdt_ping(unsigned long data)
121{
122 if (time_before(jiffies, dw_wdt.next_heartbeat) ||
123 (!nowayout && !dw_wdt.in_use)) {
124 dw_wdt_keepalive();
125 mod_timer(&dw_wdt.timer, jiffies + WDT_TIMEOUT);
126 } else
127 pr_crit("keepalive missed, machine will reset\n");
128}
129
130static int dw_wdt_open(struct inode *inode, struct file *filp)
131{
132 if (test_and_set_bit(0, &dw_wdt.in_use))
133 return -EBUSY;
134
135 /* Make sure we don't get unloaded. */
136 __module_get(THIS_MODULE);
137
138 spin_lock(&dw_wdt.lock);
139 if (!dw_wdt_is_enabled()) {
140 /*
141 * The watchdog is not currently enabled. Set the timeout to
142 * the maximum and then start it.
143 */
144 dw_wdt_set_top(DW_WDT_MAX_TOP);
145 writel(WDOG_CONTROL_REG_WDT_EN_MASK,
146 dw_wdt.regs + WDOG_CONTROL_REG_OFFSET);
147 }
148
149 dw_wdt_set_next_heartbeat();
150
151 spin_unlock(&dw_wdt.lock);
152
153 return nonseekable_open(inode, filp);
154}
155
156ssize_t dw_wdt_write(struct file *filp, const char __user *buf, size_t len,
157 loff_t *offset)
158{
159 if (!len)
160 return 0;
161
162 if (!nowayout) {
163 size_t i;
164
165 dw_wdt.expect_close = 0;
166
167 for (i = 0; i < len; ++i) {
168 char c;
169
170 if (get_user(c, buf + i))
171 return -EFAULT;
172
173 if (c == 'V') {
174 dw_wdt.expect_close = 1;
175 break;
176 }
177 }
178 }
179
180 dw_wdt_set_next_heartbeat();
181 mod_timer(&dw_wdt.timer, jiffies + WDT_TIMEOUT);
182
183 return len;
184}
185
186static u32 dw_wdt_time_left(void)
187{
188 return readl(dw_wdt.regs + WDOG_CURRENT_COUNT_REG_OFFSET) /
189 clk_get_rate(dw_wdt.clk);
190}
191
192static const struct watchdog_info dw_wdt_ident = {
193 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
194 WDIOF_MAGICCLOSE,
195 .identity = "Synopsys DesignWare Watchdog",
196};
197
198static long dw_wdt_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
199{
200 unsigned long val;
201 int timeout;
202
203 switch (cmd) {
204 case WDIOC_GETSUPPORT:
205 return copy_to_user((struct watchdog_info *)arg, &dw_wdt_ident,
206 sizeof(dw_wdt_ident)) ? -EFAULT : 0;
207
208 case WDIOC_GETSTATUS:
209 case WDIOC_GETBOOTSTATUS:
210 return put_user(0, (int *)arg);
211
212 case WDIOC_KEEPALIVE:
213 dw_wdt_set_next_heartbeat();
214 return 0;
215
216 case WDIOC_SETTIMEOUT:
217 if (get_user(val, (int __user *)arg))
218 return -EFAULT;
219 timeout = dw_wdt_set_top(val);
220 return put_user(timeout , (int __user *)arg);
221
222 case WDIOC_GETTIMEOUT:
223 return put_user(dw_wdt_get_top(), (int __user *)arg);
224
225 case WDIOC_GETTIMELEFT:
226 /* Get the time left until expiry. */
227 if (get_user(val, (int __user *)arg))
228 return -EFAULT;
229 return put_user(dw_wdt_time_left(), (int __user *)arg);
230
231 default:
232 return -ENOTTY;
233 }
234}
235
236static int dw_wdt_release(struct inode *inode, struct file *filp)
237{
238 clear_bit(0, &dw_wdt.in_use);
239
240 if (!dw_wdt.expect_close) {
241 del_timer(&dw_wdt.timer);
242
243 if (!nowayout)
244 pr_crit("unexpected close, system will reboot soon\n");
245 else
246 pr_crit("watchdog cannot be disabled, system will reboot soon\n");
247 }
248
249 dw_wdt.expect_close = 0;
250
251 return 0;
252}
253
254#ifdef CONFIG_PM
255static int dw_wdt_suspend(struct device *dev)
256{
257 clk_disable(dw_wdt.clk);
258
259 return 0;
260}
261
262static int dw_wdt_resume(struct device *dev)
263{
264 int err = clk_enable(dw_wdt.clk);
265
266 if (err)
267 return err;
268
269 dw_wdt_keepalive();
270
271 return 0;
272}
273
274static const struct dev_pm_ops dw_wdt_pm_ops = {
275 .suspend = dw_wdt_suspend,
276 .resume = dw_wdt_resume,
277};
278#endif /* CONFIG_PM */
279
280static const struct file_operations wdt_fops = {
281 .owner = THIS_MODULE,
282 .llseek = no_llseek,
283 .open = dw_wdt_open,
284 .write = dw_wdt_write,
285 .unlocked_ioctl = dw_wdt_ioctl,
286 .release = dw_wdt_release
287};
288
289static struct miscdevice dw_wdt_miscdev = {
290 .fops = &wdt_fops,
291 .name = "watchdog",
292 .minor = WATCHDOG_MINOR,
293};
294
295static int __devinit dw_wdt_drv_probe(struct platform_device *pdev)
296{
297 int ret;
298 struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
299
300 if (!mem)
301 return -EINVAL;
302
303 if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
304 "dw_wdt"))
305 return -ENOMEM;
306
307 dw_wdt.regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
308 if (!dw_wdt.regs)
309 return -ENOMEM;
310
311 dw_wdt.clk = clk_get(&pdev->dev, NULL);
312 if (IS_ERR(dw_wdt.clk))
313 return PTR_ERR(dw_wdt.clk);
314
315 ret = clk_enable(dw_wdt.clk);
316 if (ret)
317 goto out_put_clk;
318
319 spin_lock_init(&dw_wdt.lock);
320
321 ret = misc_register(&dw_wdt_miscdev);
322 if (ret)
323 goto out_disable_clk;
324
325 dw_wdt_set_next_heartbeat();
326 setup_timer(&dw_wdt.timer, dw_wdt_ping, 0);
327 mod_timer(&dw_wdt.timer, jiffies + WDT_TIMEOUT);
328
329 return 0;
330
331out_disable_clk:
332 clk_disable(dw_wdt.clk);
333out_put_clk:
334 clk_put(dw_wdt.clk);
335
336 return ret;
337}
338
339static int __devexit dw_wdt_drv_remove(struct platform_device *pdev)
340{
341 misc_deregister(&dw_wdt_miscdev);
342
343 clk_disable(dw_wdt.clk);
344 clk_put(dw_wdt.clk);
345
346 return 0;
347}
348
349static struct platform_driver dw_wdt_driver = {
350 .probe = dw_wdt_drv_probe,
351 .remove = __devexit_p(dw_wdt_drv_remove),
352 .driver = {
353 .name = "dw_wdt",
354 .owner = THIS_MODULE,
355#ifdef CONFIG_PM
356 .pm = &dw_wdt_pm_ops,
357#endif /* CONFIG_PM */
358 },
359};
360
361static int __init dw_wdt_watchdog_init(void)
362{
363 return platform_driver_register(&dw_wdt_driver);
364}
365module_init(dw_wdt_watchdog_init);
366
367static void __exit dw_wdt_watchdog_exit(void)
368{
369 platform_driver_unregister(&dw_wdt_driver);
370}
371module_exit(dw_wdt_watchdog_exit);
372
373MODULE_AUTHOR("Jamie Iles");
374MODULE_DESCRIPTION("Synopsys DesignWare Watchdog Driver");
375MODULE_LICENSE("GPL");
376MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 8cb26855bfed..410fba45378d 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -36,7 +36,7 @@
36#include <asm/cacheflush.h> 36#include <asm/cacheflush.h>
37#endif /* CONFIG_HPWDT_NMI_DECODING */ 37#endif /* CONFIG_HPWDT_NMI_DECODING */
38 38
39#define HPWDT_VERSION "1.2.0" 39#define HPWDT_VERSION "1.3.0"
40#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128) 40#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128)
41#define TICKS_TO_SECS(ticks) ((ticks) * 128 / 1000) 41#define TICKS_TO_SECS(ticks) ((ticks) * 128 / 1000)
42#define HPWDT_MAX_TIMER TICKS_TO_SECS(65535) 42#define HPWDT_MAX_TIMER TICKS_TO_SECS(65535)
@@ -87,6 +87,19 @@ struct smbios_cru64_info {
87}; 87};
88#define SMBIOS_CRU64_INFORMATION 212 88#define SMBIOS_CRU64_INFORMATION 212
89 89
90/* type 219 */
91struct smbios_proliant_info {
92 u8 type;
93 u8 byte_length;
94 u16 handle;
95 u32 power_features;
96 u32 omega_features;
97 u32 reserved;
98 u32 misc_features;
99};
100#define SMBIOS_ICRU_INFORMATION 219
101
102
90struct cmn_registers { 103struct cmn_registers {
91 union { 104 union {
92 struct { 105 struct {
@@ -132,6 +145,7 @@ struct cmn_registers {
132static unsigned int hpwdt_nmi_decoding; 145static unsigned int hpwdt_nmi_decoding;
133static unsigned int allow_kdump; 146static unsigned int allow_kdump;
134static unsigned int priority; /* hpwdt at end of die_notify list */ 147static unsigned int priority; /* hpwdt at end of die_notify list */
148static unsigned int is_icru;
135static DEFINE_SPINLOCK(rom_lock); 149static DEFINE_SPINLOCK(rom_lock);
136static void *cru_rom_addr; 150static void *cru_rom_addr;
137static struct cmn_registers cmn_regs; 151static struct cmn_registers cmn_regs;
@@ -476,19 +490,22 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
476 goto out; 490 goto out;
477 491
478 spin_lock_irqsave(&rom_lock, rom_pl); 492 spin_lock_irqsave(&rom_lock, rom_pl);
479 if (!die_nmi_called) 493 if (!die_nmi_called && !is_icru)
480 asminline_call(&cmn_regs, cru_rom_addr); 494 asminline_call(&cmn_regs, cru_rom_addr);
481 die_nmi_called = 1; 495 die_nmi_called = 1;
482 spin_unlock_irqrestore(&rom_lock, rom_pl); 496 spin_unlock_irqrestore(&rom_lock, rom_pl);
483 if (cmn_regs.u1.ral == 0) { 497 if (!is_icru) {
484 printk(KERN_WARNING "hpwdt: An NMI occurred, " 498 if (cmn_regs.u1.ral == 0) {
485 "but unable to determine source.\n"); 499 printk(KERN_WARNING "hpwdt: An NMI occurred, "
486 } else { 500 "but unable to determine source.\n");
487 if (allow_kdump) 501 }
488 hpwdt_stop();
489 panic("An NMI occurred, please see the Integrated "
490 "Management Log for details.\n");
491 } 502 }
503
504 if (allow_kdump)
505 hpwdt_stop();
506 panic("An NMI occurred, please see the Integrated "
507 "Management Log for details.\n");
508
492out: 509out:
493 return NOTIFY_OK; 510 return NOTIFY_OK;
494} 511}
@@ -659,30 +676,63 @@ static void __devinit hpwdt_check_nmi_decoding(struct pci_dev *dev)
659} 676}
660#endif /* CONFIG_X86_LOCAL_APIC */ 677#endif /* CONFIG_X86_LOCAL_APIC */
661 678
679/*
680 * dmi_find_icru
681 *
682 * Routine Description:
683 * This function checks whether or not we are on an iCRU-based server.
684 * This check is independent of architecture and needs to be made for
685 * any ProLiant system.
686 */
687static void __devinit dmi_find_icru(const struct dmi_header *dm, void *dummy)
688{
689 struct smbios_proliant_info *smbios_proliant_ptr;
690
691 if (dm->type == SMBIOS_ICRU_INFORMATION) {
692 smbios_proliant_ptr = (struct smbios_proliant_info *) dm;
693 if (smbios_proliant_ptr->misc_features & 0x01)
694 is_icru = 1;
695 }
696}
697
662static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev) 698static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev)
663{ 699{
664 int retval; 700 int retval;
665 701
666 /* 702 /*
667 * We need to map the ROM to get the CRU service. 703 * On typical CRU-based systems we need to map that service in
668 * For 32 bit Operating Systems we need to go through the 32 Bit 704 * the BIOS. For 32 bit Operating Systems we need to go through
669 * BIOS Service Directory 705 * the 32 Bit BIOS Service Directory. For 64 bit Operating
670 * For 64 bit Operating Systems we get that service through SMBIOS. 706 * Systems we get that service through SMBIOS.
707 *
708 * On systems that support the new iCRU service all we need to
709 * do is call dmi_walk to get the supported flag value and skip
710 * the old cru detect code.
671 */ 711 */
672 retval = detect_cru_service(); 712 dmi_walk(dmi_find_icru, NULL);
673 if (retval < 0) { 713 if (!is_icru) {
674 dev_warn(&dev->dev, 714
675 "Unable to detect the %d Bit CRU Service.\n", 715 /*
676 HPWDT_ARCH); 716 * We need to map the ROM to get the CRU service.
677 return retval; 717 * For 32 bit Operating Systems we need to go through the 32 Bit
678 } 718 * BIOS Service Directory
719 * For 64 bit Operating Systems we get that service through SMBIOS.
720 */
721 retval = detect_cru_service();
722 if (retval < 0) {
723 dev_warn(&dev->dev,
724 "Unable to detect the %d Bit CRU Service.\n",
725 HPWDT_ARCH);
726 return retval;
727 }
679 728
680 /* 729 /*
681 * We know this is the only CRU call we need to make so lets keep as 730 * We know this is the only CRU call we need to make so lets keep as
682 * few instructions as possible once the NMI comes in. 731 * few instructions as possible once the NMI comes in.
683 */ 732 */
684 cmn_regs.u1.rah = 0x0D; 733 cmn_regs.u1.rah = 0x0D;
685 cmn_regs.u1.ral = 0x02; 734 cmn_regs.u1.ral = 0x02;
735 }
686 736
687 /* 737 /*
688 * If the priority is set to 1, then we will be put first on the 738 * If the priority is set to 1, then we will be put first on the
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 5fd020da7c55..751a591684da 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -120,72 +120,12 @@ enum iTCO_chipsets {
120 TCO_3420, /* 3420 */ 120 TCO_3420, /* 3420 */
121 TCO_3450, /* 3450 */ 121 TCO_3450, /* 3450 */
122 TCO_EP80579, /* EP80579 */ 122 TCO_EP80579, /* EP80579 */
123 TCO_CPT1, /* Cougar Point */ 123 TCO_CPT, /* Cougar Point */
124 TCO_CPT2, /* Cougar Point Desktop */ 124 TCO_CPTD, /* Cougar Point Desktop */
125 TCO_CPT3, /* Cougar Point Mobile */ 125 TCO_CPTM, /* Cougar Point Mobile */
126 TCO_CPT4, /* Cougar Point */ 126 TCO_PBG, /* Patsburg */
127 TCO_CPT5, /* Cougar Point */
128 TCO_CPT6, /* Cougar Point */
129 TCO_CPT7, /* Cougar Point */
130 TCO_CPT8, /* Cougar Point */
131 TCO_CPT9, /* Cougar Point */
132 TCO_CPT10, /* Cougar Point */
133 TCO_CPT11, /* Cougar Point */
134 TCO_CPT12, /* Cougar Point */
135 TCO_CPT13, /* Cougar Point */
136 TCO_CPT14, /* Cougar Point */
137 TCO_CPT15, /* Cougar Point */
138 TCO_CPT16, /* Cougar Point */
139 TCO_CPT17, /* Cougar Point */
140 TCO_CPT18, /* Cougar Point */
141 TCO_CPT19, /* Cougar Point */
142 TCO_CPT20, /* Cougar Point */
143 TCO_CPT21, /* Cougar Point */
144 TCO_CPT22, /* Cougar Point */
145 TCO_CPT23, /* Cougar Point */
146 TCO_CPT24, /* Cougar Point */
147 TCO_CPT25, /* Cougar Point */
148 TCO_CPT26, /* Cougar Point */
149 TCO_CPT27, /* Cougar Point */
150 TCO_CPT28, /* Cougar Point */
151 TCO_CPT29, /* Cougar Point */
152 TCO_CPT30, /* Cougar Point */
153 TCO_CPT31, /* Cougar Point */
154 TCO_PBG1, /* Patsburg */
155 TCO_PBG2, /* Patsburg */
156 TCO_DH89XXCC, /* DH89xxCC */ 127 TCO_DH89XXCC, /* DH89xxCC */
157 TCO_PPT0, /* Panther Point */ 128 TCO_PPT, /* Panther Point */
158 TCO_PPT1, /* Panther Point */
159 TCO_PPT2, /* Panther Point */
160 TCO_PPT3, /* Panther Point */
161 TCO_PPT4, /* Panther Point */
162 TCO_PPT5, /* Panther Point */
163 TCO_PPT6, /* Panther Point */
164 TCO_PPT7, /* Panther Point */
165 TCO_PPT8, /* Panther Point */
166 TCO_PPT9, /* Panther Point */
167 TCO_PPT10, /* Panther Point */
168 TCO_PPT11, /* Panther Point */
169 TCO_PPT12, /* Panther Point */
170 TCO_PPT13, /* Panther Point */
171 TCO_PPT14, /* Panther Point */
172 TCO_PPT15, /* Panther Point */
173 TCO_PPT16, /* Panther Point */
174 TCO_PPT17, /* Panther Point */
175 TCO_PPT18, /* Panther Point */
176 TCO_PPT19, /* Panther Point */
177 TCO_PPT20, /* Panther Point */
178 TCO_PPT21, /* Panther Point */
179 TCO_PPT22, /* Panther Point */
180 TCO_PPT23, /* Panther Point */
181 TCO_PPT24, /* Panther Point */
182 TCO_PPT25, /* Panther Point */
183 TCO_PPT26, /* Panther Point */
184 TCO_PPT27, /* Panther Point */
185 TCO_PPT28, /* Panther Point */
186 TCO_PPT29, /* Panther Point */
187 TCO_PPT30, /* Panther Point */
188 TCO_PPT31, /* Panther Point */
189}; 129};
190 130
191static struct { 131static struct {
@@ -244,83 +184,14 @@ static struct {
244 {"3450", 2}, 184 {"3450", 2},
245 {"EP80579", 2}, 185 {"EP80579", 2},
246 {"Cougar Point", 2}, 186 {"Cougar Point", 2},
247 {"Cougar Point", 2}, 187 {"Cougar Point Desktop", 2},
248 {"Cougar Point", 2}, 188 {"Cougar Point Mobile", 2},
249 {"Cougar Point", 2},
250 {"Cougar Point", 2},
251 {"Cougar Point", 2},
252 {"Cougar Point", 2},
253 {"Cougar Point", 2},
254 {"Cougar Point", 2},
255 {"Cougar Point", 2},
256 {"Cougar Point", 2},
257 {"Cougar Point", 2},
258 {"Cougar Point", 2},
259 {"Cougar Point", 2},
260 {"Cougar Point", 2},
261 {"Cougar Point", 2},
262 {"Cougar Point", 2},
263 {"Cougar Point", 2},
264 {"Cougar Point", 2},
265 {"Cougar Point", 2},
266 {"Cougar Point", 2},
267 {"Cougar Point", 2},
268 {"Cougar Point", 2},
269 {"Cougar Point", 2},
270 {"Cougar Point", 2},
271 {"Cougar Point", 2},
272 {"Cougar Point", 2},
273 {"Cougar Point", 2},
274 {"Cougar Point", 2},
275 {"Cougar Point", 2},
276 {"Cougar Point", 2},
277 {"Patsburg", 2},
278 {"Patsburg", 2}, 189 {"Patsburg", 2},
279 {"DH89xxCC", 2}, 190 {"DH89xxCC", 2},
280 {"Panther Point", 2}, 191 {"Panther Point", 2},
281 {"Panther Point", 2},
282 {"Panther Point", 2},
283 {"Panther Point", 2},
284 {"Panther Point", 2},
285 {"Panther Point", 2},
286 {"Panther Point", 2},
287 {"Panther Point", 2},
288 {"Panther Point", 2},
289 {"Panther Point", 2},
290 {"Panther Point", 2},
291 {"Panther Point", 2},
292 {"Panther Point", 2},
293 {"Panther Point", 2},
294 {"Panther Point", 2},
295 {"Panther Point", 2},
296 {"Panther Point", 2},
297 {"Panther Point", 2},
298 {"Panther Point", 2},
299 {"Panther Point", 2},
300 {"Panther Point", 2},
301 {"Panther Point", 2},
302 {"Panther Point", 2},
303 {"Panther Point", 2},
304 {"Panther Point", 2},
305 {"Panther Point", 2},
306 {"Panther Point", 2},
307 {"Panther Point", 2},
308 {"Panther Point", 2},
309 {"Panther Point", 2},
310 {"Panther Point", 2},
311 {"Panther Point", 2},
312 {NULL, 0} 192 {NULL, 0}
313}; 193};
314 194
315#define ITCO_PCI_DEVICE(dev, data) \
316 .vendor = PCI_VENDOR_ID_INTEL, \
317 .device = dev, \
318 .subvendor = PCI_ANY_ID, \
319 .subdevice = PCI_ANY_ID, \
320 .class = 0, \
321 .class_mask = 0, \
322 .driver_data = data
323
324/* 195/*
325 * This data only exists for exporting the supported PCI ids 196 * This data only exists for exporting the supported PCI ids
326 * via MODULE_DEVICE_TABLE. We do not actually register a 197 * via MODULE_DEVICE_TABLE. We do not actually register a
@@ -328,138 +199,138 @@ static struct {
328 * functions that probably will be registered by other drivers. 199 * functions that probably will be registered by other drivers.
329 */ 200 */
330static DEFINE_PCI_DEVICE_TABLE(iTCO_wdt_pci_tbl) = { 201static DEFINE_PCI_DEVICE_TABLE(iTCO_wdt_pci_tbl) = {
331 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801AA_0, TCO_ICH)}, 202 { PCI_VDEVICE(INTEL, 0x2410), TCO_ICH},
332 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801AB_0, TCO_ICH0)}, 203 { PCI_VDEVICE(INTEL, 0x2420), TCO_ICH0},
333 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801BA_0, TCO_ICH2)}, 204 { PCI_VDEVICE(INTEL, 0x2440), TCO_ICH2},
334 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801BA_10, TCO_ICH2M)}, 205 { PCI_VDEVICE(INTEL, 0x244c), TCO_ICH2M},
335 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801CA_0, TCO_ICH3)}, 206 { PCI_VDEVICE(INTEL, 0x2480), TCO_ICH3},
336 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801CA_12, TCO_ICH3M)}, 207 { PCI_VDEVICE(INTEL, 0x248c), TCO_ICH3M},
337 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801DB_0, TCO_ICH4)}, 208 { PCI_VDEVICE(INTEL, 0x24c0), TCO_ICH4},
338 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801DB_12, TCO_ICH4M)}, 209 { PCI_VDEVICE(INTEL, 0x24cc), TCO_ICH4M},
339 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801E_0, TCO_CICH)}, 210 { PCI_VDEVICE(INTEL, 0x2450), TCO_CICH},
340 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801EB_0, TCO_ICH5)}, 211 { PCI_VDEVICE(INTEL, 0x24d0), TCO_ICH5},
341 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ESB_1, TCO_6300ESB)}, 212 { PCI_VDEVICE(INTEL, 0x25a1), TCO_6300ESB},
342 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_0, TCO_ICH6)}, 213 { PCI_VDEVICE(INTEL, 0x2640), TCO_ICH6},
343 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_1, TCO_ICH6M)}, 214 { PCI_VDEVICE(INTEL, 0x2641), TCO_ICH6M},
344 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_2, TCO_ICH6W)}, 215 { PCI_VDEVICE(INTEL, 0x2642), TCO_ICH6W},
345 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ESB2_0, TCO_631XESB)}, 216 { PCI_VDEVICE(INTEL, 0x2670), TCO_631XESB},
346 { ITCO_PCI_DEVICE(0x2671, TCO_631XESB)}, 217 { PCI_VDEVICE(INTEL, 0x2671), TCO_631XESB},
347 { ITCO_PCI_DEVICE(0x2672, TCO_631XESB)}, 218 { PCI_VDEVICE(INTEL, 0x2672), TCO_631XESB},
348 { ITCO_PCI_DEVICE(0x2673, TCO_631XESB)}, 219 { PCI_VDEVICE(INTEL, 0x2673), TCO_631XESB},
349 { ITCO_PCI_DEVICE(0x2674, TCO_631XESB)}, 220 { PCI_VDEVICE(INTEL, 0x2674), TCO_631XESB},
350 { ITCO_PCI_DEVICE(0x2675, TCO_631XESB)}, 221 { PCI_VDEVICE(INTEL, 0x2675), TCO_631XESB},
351 { ITCO_PCI_DEVICE(0x2676, TCO_631XESB)}, 222 { PCI_VDEVICE(INTEL, 0x2676), TCO_631XESB},
352 { ITCO_PCI_DEVICE(0x2677, TCO_631XESB)}, 223 { PCI_VDEVICE(INTEL, 0x2677), TCO_631XESB},
353 { ITCO_PCI_DEVICE(0x2678, TCO_631XESB)}, 224 { PCI_VDEVICE(INTEL, 0x2678), TCO_631XESB},
354 { ITCO_PCI_DEVICE(0x2679, TCO_631XESB)}, 225 { PCI_VDEVICE(INTEL, 0x2679), TCO_631XESB},
355 { ITCO_PCI_DEVICE(0x267a, TCO_631XESB)}, 226 { PCI_VDEVICE(INTEL, 0x267a), TCO_631XESB},
356 { ITCO_PCI_DEVICE(0x267b, TCO_631XESB)}, 227 { PCI_VDEVICE(INTEL, 0x267b), TCO_631XESB},
357 { ITCO_PCI_DEVICE(0x267c, TCO_631XESB)}, 228 { PCI_VDEVICE(INTEL, 0x267c), TCO_631XESB},
358 { ITCO_PCI_DEVICE(0x267d, TCO_631XESB)}, 229 { PCI_VDEVICE(INTEL, 0x267d), TCO_631XESB},
359 { ITCO_PCI_DEVICE(0x267e, TCO_631XESB)}, 230 { PCI_VDEVICE(INTEL, 0x267e), TCO_631XESB},
360 { ITCO_PCI_DEVICE(0x267f, TCO_631XESB)}, 231 { PCI_VDEVICE(INTEL, 0x267f), TCO_631XESB},
361 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_0, TCO_ICH7)}, 232 { PCI_VDEVICE(INTEL, 0x27b8), TCO_ICH7},
362 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_30, TCO_ICH7DH)}, 233 { PCI_VDEVICE(INTEL, 0x27b0), TCO_ICH7DH},
363 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_1, TCO_ICH7M)}, 234 { PCI_VDEVICE(INTEL, 0x27b9), TCO_ICH7M},
364 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_31, TCO_ICH7MDH)}, 235 { PCI_VDEVICE(INTEL, 0x27bd), TCO_ICH7MDH},
365 { ITCO_PCI_DEVICE(0x27bc, TCO_NM10)}, 236 { PCI_VDEVICE(INTEL, 0x27bc), TCO_NM10},
366 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_0, TCO_ICH8)}, 237 { PCI_VDEVICE(INTEL, 0x2810), TCO_ICH8},
367 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_2, TCO_ICH8DH)}, 238 { PCI_VDEVICE(INTEL, 0x2812), TCO_ICH8DH},
368 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_3, TCO_ICH8DO)}, 239 { PCI_VDEVICE(INTEL, 0x2814), TCO_ICH8DO},
369 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_4, TCO_ICH8M)}, 240 { PCI_VDEVICE(INTEL, 0x2815), TCO_ICH8M},
370 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_1, TCO_ICH8ME)}, 241 { PCI_VDEVICE(INTEL, 0x2811), TCO_ICH8ME},
371 { ITCO_PCI_DEVICE(0x2918, TCO_ICH9)}, 242 { PCI_VDEVICE(INTEL, 0x2918), TCO_ICH9},
372 { ITCO_PCI_DEVICE(0x2916, TCO_ICH9R)}, 243 { PCI_VDEVICE(INTEL, 0x2916), TCO_ICH9R},
373 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH9_2, TCO_ICH9DH)}, 244 { PCI_VDEVICE(INTEL, 0x2912), TCO_ICH9DH},
374 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH9_4, TCO_ICH9DO)}, 245 { PCI_VDEVICE(INTEL, 0x2914), TCO_ICH9DO},
375 { ITCO_PCI_DEVICE(0x2919, TCO_ICH9M)}, 246 { PCI_VDEVICE(INTEL, 0x2919), TCO_ICH9M},
376 { ITCO_PCI_DEVICE(0x2917, TCO_ICH9ME)}, 247 { PCI_VDEVICE(INTEL, 0x2917), TCO_ICH9ME},
377 { ITCO_PCI_DEVICE(0x3a18, TCO_ICH10)}, 248 { PCI_VDEVICE(INTEL, 0x3a18), TCO_ICH10},
378 { ITCO_PCI_DEVICE(0x3a16, TCO_ICH10R)}, 249 { PCI_VDEVICE(INTEL, 0x3a16), TCO_ICH10R},
379 { ITCO_PCI_DEVICE(0x3a1a, TCO_ICH10D)}, 250 { PCI_VDEVICE(INTEL, 0x3a1a), TCO_ICH10D},
380 { ITCO_PCI_DEVICE(0x3a14, TCO_ICH10DO)}, 251 { PCI_VDEVICE(INTEL, 0x3a14), TCO_ICH10DO},
381 { ITCO_PCI_DEVICE(0x3b00, TCO_PCH)}, 252 { PCI_VDEVICE(INTEL, 0x3b00), TCO_PCH},
382 { ITCO_PCI_DEVICE(0x3b01, TCO_PCHM)}, 253 { PCI_VDEVICE(INTEL, 0x3b01), TCO_PCHM},
383 { ITCO_PCI_DEVICE(0x3b02, TCO_P55)}, 254 { PCI_VDEVICE(INTEL, 0x3b02), TCO_P55},
384 { ITCO_PCI_DEVICE(0x3b03, TCO_PM55)}, 255 { PCI_VDEVICE(INTEL, 0x3b03), TCO_PM55},
385 { ITCO_PCI_DEVICE(0x3b06, TCO_H55)}, 256 { PCI_VDEVICE(INTEL, 0x3b06), TCO_H55},
386 { ITCO_PCI_DEVICE(0x3b07, TCO_QM57)}, 257 { PCI_VDEVICE(INTEL, 0x3b07), TCO_QM57},
387 { ITCO_PCI_DEVICE(0x3b08, TCO_H57)}, 258 { PCI_VDEVICE(INTEL, 0x3b08), TCO_H57},
388 { ITCO_PCI_DEVICE(0x3b09, TCO_HM55)}, 259 { PCI_VDEVICE(INTEL, 0x3b09), TCO_HM55},
389 { ITCO_PCI_DEVICE(0x3b0a, TCO_Q57)}, 260 { PCI_VDEVICE(INTEL, 0x3b0a), TCO_Q57},
390 { ITCO_PCI_DEVICE(0x3b0b, TCO_HM57)}, 261 { PCI_VDEVICE(INTEL, 0x3b0b), TCO_HM57},
391 { ITCO_PCI_DEVICE(0x3b0d, TCO_PCHMSFF)}, 262 { PCI_VDEVICE(INTEL, 0x3b0d), TCO_PCHMSFF},
392 { ITCO_PCI_DEVICE(0x3b0f, TCO_QS57)}, 263 { PCI_VDEVICE(INTEL, 0x3b0f), TCO_QS57},
393 { ITCO_PCI_DEVICE(0x3b12, TCO_3400)}, 264 { PCI_VDEVICE(INTEL, 0x3b12), TCO_3400},
394 { ITCO_PCI_DEVICE(0x3b14, TCO_3420)}, 265 { PCI_VDEVICE(INTEL, 0x3b14), TCO_3420},
395 { ITCO_PCI_DEVICE(0x3b16, TCO_3450)}, 266 { PCI_VDEVICE(INTEL, 0x3b16), TCO_3450},
396 { ITCO_PCI_DEVICE(0x5031, TCO_EP80579)}, 267 { PCI_VDEVICE(INTEL, 0x5031), TCO_EP80579},
397 { ITCO_PCI_DEVICE(0x1c41, TCO_CPT1)}, 268 { PCI_VDEVICE(INTEL, 0x1c41), TCO_CPT},
398 { ITCO_PCI_DEVICE(0x1c42, TCO_CPT2)}, 269 { PCI_VDEVICE(INTEL, 0x1c42), TCO_CPTD},
399 { ITCO_PCI_DEVICE(0x1c43, TCO_CPT3)}, 270 { PCI_VDEVICE(INTEL, 0x1c43), TCO_CPTM},
400 { ITCO_PCI_DEVICE(0x1c44, TCO_CPT4)}, 271 { PCI_VDEVICE(INTEL, 0x1c44), TCO_CPT},
401 { ITCO_PCI_DEVICE(0x1c45, TCO_CPT5)}, 272 { PCI_VDEVICE(INTEL, 0x1c45), TCO_CPT},
402 { ITCO_PCI_DEVICE(0x1c46, TCO_CPT6)}, 273 { PCI_VDEVICE(INTEL, 0x1c46), TCO_CPT},
403 { ITCO_PCI_DEVICE(0x1c47, TCO_CPT7)}, 274 { PCI_VDEVICE(INTEL, 0x1c47), TCO_CPT},
404 { ITCO_PCI_DEVICE(0x1c48, TCO_CPT8)}, 275 { PCI_VDEVICE(INTEL, 0x1c48), TCO_CPT},
405 { ITCO_PCI_DEVICE(0x1c49, TCO_CPT9)}, 276 { PCI_VDEVICE(INTEL, 0x1c49), TCO_CPT},
406 { ITCO_PCI_DEVICE(0x1c4a, TCO_CPT10)}, 277 { PCI_VDEVICE(INTEL, 0x1c4a), TCO_CPT},
407 { ITCO_PCI_DEVICE(0x1c4b, TCO_CPT11)}, 278 { PCI_VDEVICE(INTEL, 0x1c4b), TCO_CPT},
408 { ITCO_PCI_DEVICE(0x1c4c, TCO_CPT12)}, 279 { PCI_VDEVICE(INTEL, 0x1c4c), TCO_CPT},
409 { ITCO_PCI_DEVICE(0x1c4d, TCO_CPT13)}, 280 { PCI_VDEVICE(INTEL, 0x1c4d), TCO_CPT},
410 { ITCO_PCI_DEVICE(0x1c4e, TCO_CPT14)}, 281 { PCI_VDEVICE(INTEL, 0x1c4e), TCO_CPT},
411 { ITCO_PCI_DEVICE(0x1c4f, TCO_CPT15)}, 282 { PCI_VDEVICE(INTEL, 0x1c4f), TCO_CPT},
412 { ITCO_PCI_DEVICE(0x1c50, TCO_CPT16)}, 283 { PCI_VDEVICE(INTEL, 0x1c50), TCO_CPT},
413 { ITCO_PCI_DEVICE(0x1c51, TCO_CPT17)}, 284 { PCI_VDEVICE(INTEL, 0x1c51), TCO_CPT},
414 { ITCO_PCI_DEVICE(0x1c52, TCO_CPT18)}, 285 { PCI_VDEVICE(INTEL, 0x1c52), TCO_CPT},
415 { ITCO_PCI_DEVICE(0x1c53, TCO_CPT19)}, 286 { PCI_VDEVICE(INTEL, 0x1c53), TCO_CPT},
416 { ITCO_PCI_DEVICE(0x1c54, TCO_CPT20)}, 287 { PCI_VDEVICE(INTEL, 0x1c54), TCO_CPT},
417 { ITCO_PCI_DEVICE(0x1c55, TCO_CPT21)}, 288 { PCI_VDEVICE(INTEL, 0x1c55), TCO_CPT},
418 { ITCO_PCI_DEVICE(0x1c56, TCO_CPT22)}, 289 { PCI_VDEVICE(INTEL, 0x1c56), TCO_CPT},
419 { ITCO_PCI_DEVICE(0x1c57, TCO_CPT23)}, 290 { PCI_VDEVICE(INTEL, 0x1c57), TCO_CPT},
420 { ITCO_PCI_DEVICE(0x1c58, TCO_CPT24)}, 291 { PCI_VDEVICE(INTEL, 0x1c58), TCO_CPT},
421 { ITCO_PCI_DEVICE(0x1c59, TCO_CPT25)}, 292 { PCI_VDEVICE(INTEL, 0x1c59), TCO_CPT},
422 { ITCO_PCI_DEVICE(0x1c5a, TCO_CPT26)}, 293 { PCI_VDEVICE(INTEL, 0x1c5a), TCO_CPT},
423 { ITCO_PCI_DEVICE(0x1c5b, TCO_CPT27)}, 294 { PCI_VDEVICE(INTEL, 0x1c5b), TCO_CPT},
424 { ITCO_PCI_DEVICE(0x1c5c, TCO_CPT28)}, 295 { PCI_VDEVICE(INTEL, 0x1c5c), TCO_CPT},
425 { ITCO_PCI_DEVICE(0x1c5d, TCO_CPT29)}, 296 { PCI_VDEVICE(INTEL, 0x1c5d), TCO_CPT},
426 { ITCO_PCI_DEVICE(0x1c5e, TCO_CPT30)}, 297 { PCI_VDEVICE(INTEL, 0x1c5e), TCO_CPT},
427 { ITCO_PCI_DEVICE(0x1c5f, TCO_CPT31)}, 298 { PCI_VDEVICE(INTEL, 0x1c5f), TCO_CPT},
428 { ITCO_PCI_DEVICE(0x1d40, TCO_PBG1)}, 299 { PCI_VDEVICE(INTEL, 0x1d40), TCO_PBG},
429 { ITCO_PCI_DEVICE(0x1d41, TCO_PBG2)}, 300 { PCI_VDEVICE(INTEL, 0x1d41), TCO_PBG},
430 { ITCO_PCI_DEVICE(0x2310, TCO_DH89XXCC)}, 301 { PCI_VDEVICE(INTEL, 0x2310), TCO_DH89XXCC},
431 { ITCO_PCI_DEVICE(0x1e40, TCO_PPT0)}, 302 { PCI_VDEVICE(INTEL, 0x1e40), TCO_PPT},
432 { ITCO_PCI_DEVICE(0x1e41, TCO_PPT1)}, 303 { PCI_VDEVICE(INTEL, 0x1e41), TCO_PPT},
433 { ITCO_PCI_DEVICE(0x1e42, TCO_PPT2)}, 304 { PCI_VDEVICE(INTEL, 0x1e42), TCO_PPT},
434 { ITCO_PCI_DEVICE(0x1e43, TCO_PPT3)}, 305 { PCI_VDEVICE(INTEL, 0x1e43), TCO_PPT},
435 { ITCO_PCI_DEVICE(0x1e44, TCO_PPT4)}, 306 { PCI_VDEVICE(INTEL, 0x1e44), TCO_PPT},
436 { ITCO_PCI_DEVICE(0x1e45, TCO_PPT5)}, 307 { PCI_VDEVICE(INTEL, 0x1e45), TCO_PPT},
437 { ITCO_PCI_DEVICE(0x1e46, TCO_PPT6)}, 308 { PCI_VDEVICE(INTEL, 0x1e46), TCO_PPT},
438 { ITCO_PCI_DEVICE(0x1e47, TCO_PPT7)}, 309 { PCI_VDEVICE(INTEL, 0x1e47), TCO_PPT},
439 { ITCO_PCI_DEVICE(0x1e48, TCO_PPT8)}, 310 { PCI_VDEVICE(INTEL, 0x1e48), TCO_PPT},
440 { ITCO_PCI_DEVICE(0x1e49, TCO_PPT9)}, 311 { PCI_VDEVICE(INTEL, 0x1e49), TCO_PPT},
441 { ITCO_PCI_DEVICE(0x1e4a, TCO_PPT10)}, 312 { PCI_VDEVICE(INTEL, 0x1e4a), TCO_PPT},
442 { ITCO_PCI_DEVICE(0x1e4b, TCO_PPT11)}, 313 { PCI_VDEVICE(INTEL, 0x1e4b), TCO_PPT},
443 { ITCO_PCI_DEVICE(0x1e4c, TCO_PPT12)}, 314 { PCI_VDEVICE(INTEL, 0x1e4c), TCO_PPT},
444 { ITCO_PCI_DEVICE(0x1e4d, TCO_PPT13)}, 315 { PCI_VDEVICE(INTEL, 0x1e4d), TCO_PPT},
445 { ITCO_PCI_DEVICE(0x1e4e, TCO_PPT14)}, 316 { PCI_VDEVICE(INTEL, 0x1e4e), TCO_PPT},
446 { ITCO_PCI_DEVICE(0x1e4f, TCO_PPT15)}, 317 { PCI_VDEVICE(INTEL, 0x1e4f), TCO_PPT},
447 { ITCO_PCI_DEVICE(0x1e50, TCO_PPT16)}, 318 { PCI_VDEVICE(INTEL, 0x1e50), TCO_PPT},
448 { ITCO_PCI_DEVICE(0x1e51, TCO_PPT17)}, 319 { PCI_VDEVICE(INTEL, 0x1e51), TCO_PPT},
449 { ITCO_PCI_DEVICE(0x1e52, TCO_PPT18)}, 320 { PCI_VDEVICE(INTEL, 0x1e52), TCO_PPT},
450 { ITCO_PCI_DEVICE(0x1e53, TCO_PPT19)}, 321 { PCI_VDEVICE(INTEL, 0x1e53), TCO_PPT},
451 { ITCO_PCI_DEVICE(0x1e54, TCO_PPT20)}, 322 { PCI_VDEVICE(INTEL, 0x1e54), TCO_PPT},
452 { ITCO_PCI_DEVICE(0x1e55, TCO_PPT21)}, 323 { PCI_VDEVICE(INTEL, 0x1e55), TCO_PPT},
453 { ITCO_PCI_DEVICE(0x1e56, TCO_PPT22)}, 324 { PCI_VDEVICE(INTEL, 0x1e56), TCO_PPT},
454 { ITCO_PCI_DEVICE(0x1e57, TCO_PPT23)}, 325 { PCI_VDEVICE(INTEL, 0x1e57), TCO_PPT},
455 { ITCO_PCI_DEVICE(0x1e58, TCO_PPT24)}, 326 { PCI_VDEVICE(INTEL, 0x1e58), TCO_PPT},
456 { ITCO_PCI_DEVICE(0x1e59, TCO_PPT25)}, 327 { PCI_VDEVICE(INTEL, 0x1e59), TCO_PPT},
457 { ITCO_PCI_DEVICE(0x1e5a, TCO_PPT26)}, 328 { PCI_VDEVICE(INTEL, 0x1e5a), TCO_PPT},
458 { ITCO_PCI_DEVICE(0x1e5b, TCO_PPT27)}, 329 { PCI_VDEVICE(INTEL, 0x1e5b), TCO_PPT},
459 { ITCO_PCI_DEVICE(0x1e5c, TCO_PPT28)}, 330 { PCI_VDEVICE(INTEL, 0x1e5c), TCO_PPT},
460 { ITCO_PCI_DEVICE(0x1e5d, TCO_PPT29)}, 331 { PCI_VDEVICE(INTEL, 0x1e5d), TCO_PPT},
461 { ITCO_PCI_DEVICE(0x1e5e, TCO_PPT30)}, 332 { PCI_VDEVICE(INTEL, 0x1e5e), TCO_PPT},
462 { ITCO_PCI_DEVICE(0x1e5f, TCO_PPT31)}, 333 { PCI_VDEVICE(INTEL, 0x1e5f), TCO_PPT},
463 { 0, }, /* End of list */ 334 { 0, }, /* End of list */
464}; 335};
465MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl); 336MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
@@ -1052,15 +923,10 @@ static void iTCO_wdt_shutdown(struct platform_device *dev)
1052 iTCO_wdt_stop(); 923 iTCO_wdt_stop();
1053} 924}
1054 925
1055#define iTCO_wdt_suspend NULL
1056#define iTCO_wdt_resume NULL
1057
1058static struct platform_driver iTCO_wdt_driver = { 926static struct platform_driver iTCO_wdt_driver = {
1059 .probe = iTCO_wdt_probe, 927 .probe = iTCO_wdt_probe,
1060 .remove = __devexit_p(iTCO_wdt_remove), 928 .remove = __devexit_p(iTCO_wdt_remove),
1061 .shutdown = iTCO_wdt_shutdown, 929 .shutdown = iTCO_wdt_shutdown,
1062 .suspend = iTCO_wdt_suspend,
1063 .resume = iTCO_wdt_resume,
1064 .driver = { 930 .driver = {
1065 .owner = THIS_MODULE, 931 .owner = THIS_MODULE,
1066 .name = DRV_NAME, 932 .name = DRV_NAME,
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 86f7cac1026c..b8ef2c6dca7c 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -329,12 +329,18 @@ static void imx2_wdt_shutdown(struct platform_device *pdev)
329 } 329 }
330} 330}
331 331
332static const struct of_device_id imx2_wdt_dt_ids[] = {
333 { .compatible = "fsl,imx21-wdt", },
334 { /* sentinel */ }
335};
336
332static struct platform_driver imx2_wdt_driver = { 337static struct platform_driver imx2_wdt_driver = {
333 .remove = __exit_p(imx2_wdt_remove), 338 .remove = __exit_p(imx2_wdt_remove),
334 .shutdown = imx2_wdt_shutdown, 339 .shutdown = imx2_wdt_shutdown,
335 .driver = { 340 .driver = {
336 .name = DRIVER_NAME, 341 .name = DRIVER_NAME,
337 .owner = THIS_MODULE, 342 .owner = THIS_MODULE,
343 .of_match_table = imx2_wdt_dt_ids,
338 }, 344 },
339}; 345};
340 346
diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c
index ba4386066a42..1abdc0454c54 100644
--- a/drivers/watchdog/intel_scu_watchdog.c
+++ b/drivers/watchdog/intel_scu_watchdog.c
@@ -43,7 +43,7 @@
43#include <linux/signal.h> 43#include <linux/signal.h>
44#include <linux/sfi.h> 44#include <linux/sfi.h>
45#include <asm/irq.h> 45#include <asm/irq.h>
46#include <asm/atomic.h> 46#include <linux/atomic.h>
47#include <asm/intel_scu_ipc.h> 47#include <asm/intel_scu_ipc.h>
48#include <asm/apb_timer.h> 48#include <asm/apb_timer.h>
49#include <asm/mrst.h> 49#include <asm/mrst.h>
diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c
index 6143f52ba6b8..8d2d8502d3e8 100644
--- a/drivers/watchdog/it8712f_wdt.c
+++ b/drivers/watchdog/it8712f_wdt.c
@@ -28,10 +28,10 @@
28#include <linux/notifier.h> 28#include <linux/notifier.h>
29#include <linux/reboot.h> 29#include <linux/reboot.h>
30#include <linux/fs.h> 30#include <linux/fs.h>
31#include <linux/pci.h>
32#include <linux/spinlock.h> 31#include <linux/spinlock.h>
33#include <linux/uaccess.h> 32#include <linux/uaccess.h>
34#include <linux/io.h> 33#include <linux/io.h>
34#include <linux/ioport.h>
35 35
36#define NAME "it8712f_wdt" 36#define NAME "it8712f_wdt"
37 37
@@ -51,7 +51,6 @@ MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close");
51 51
52static unsigned long wdt_open; 52static unsigned long wdt_open;
53static unsigned expect_close; 53static unsigned expect_close;
54static spinlock_t io_lock;
55static unsigned char revision; 54static unsigned char revision;
56 55
57/* Dog Food address - We use the game port address */ 56/* Dog Food address - We use the game port address */
@@ -121,20 +120,26 @@ static inline void superio_select(int ldn)
121 outb(ldn, VAL); 120 outb(ldn, VAL);
122} 121}
123 122
124static inline void superio_enter(void) 123static inline int superio_enter(void)
125{ 124{
126 spin_lock(&io_lock); 125 /*
126 * Try to reserve REG and REG + 1 for exclusive access.
127 */
128 if (!request_muxed_region(REG, 2, NAME))
129 return -EBUSY;
130
127 outb(0x87, REG); 131 outb(0x87, REG);
128 outb(0x01, REG); 132 outb(0x01, REG);
129 outb(0x55, REG); 133 outb(0x55, REG);
130 outb(0x55, REG); 134 outb(0x55, REG);
135 return 0;
131} 136}
132 137
133static inline void superio_exit(void) 138static inline void superio_exit(void)
134{ 139{
135 outb(0x02, REG); 140 outb(0x02, REG);
136 outb(0x02, VAL); 141 outb(0x02, VAL);
137 spin_unlock(&io_lock); 142 release_region(REG, 2);
138} 143}
139 144
140static inline void it8712f_wdt_ping(void) 145static inline void it8712f_wdt_ping(void)
@@ -173,10 +178,13 @@ static int it8712f_wdt_get_status(void)
173 return 0; 178 return 0;
174} 179}
175 180
176static void it8712f_wdt_enable(void) 181static int it8712f_wdt_enable(void)
177{ 182{
183 int ret = superio_enter();
184 if (ret)
185 return ret;
186
178 printk(KERN_DEBUG NAME ": enabling watchdog timer\n"); 187 printk(KERN_DEBUG NAME ": enabling watchdog timer\n");
179 superio_enter();
180 superio_select(LDN_GPIO); 188 superio_select(LDN_GPIO);
181 189
182 superio_outb(wdt_control_reg, WDT_CONTROL); 190 superio_outb(wdt_control_reg, WDT_CONTROL);
@@ -186,13 +194,17 @@ static void it8712f_wdt_enable(void)
186 superio_exit(); 194 superio_exit();
187 195
188 it8712f_wdt_ping(); 196 it8712f_wdt_ping();
197
198 return 0;
189} 199}
190 200
191static void it8712f_wdt_disable(void) 201static int it8712f_wdt_disable(void)
192{ 202{
193 printk(KERN_DEBUG NAME ": disabling watchdog timer\n"); 203 int ret = superio_enter();
204 if (ret)
205 return ret;
194 206
195 superio_enter(); 207 printk(KERN_DEBUG NAME ": disabling watchdog timer\n");
196 superio_select(LDN_GPIO); 208 superio_select(LDN_GPIO);
197 209
198 superio_outb(0, WDT_CONFIG); 210 superio_outb(0, WDT_CONFIG);
@@ -202,6 +214,7 @@ static void it8712f_wdt_disable(void)
202 superio_outb(0, WDT_TIMEOUT); 214 superio_outb(0, WDT_TIMEOUT);
203 215
204 superio_exit(); 216 superio_exit();
217 return 0;
205} 218}
206 219
207static int it8712f_wdt_notify(struct notifier_block *this, 220static int it8712f_wdt_notify(struct notifier_block *this,
@@ -252,6 +265,7 @@ static long it8712f_wdt_ioctl(struct file *file, unsigned int cmd,
252 WDIOF_MAGICCLOSE, 265 WDIOF_MAGICCLOSE,
253 }; 266 };
254 int value; 267 int value;
268 int ret;
255 269
256 switch (cmd) { 270 switch (cmd) {
257 case WDIOC_GETSUPPORT: 271 case WDIOC_GETSUPPORT:
@@ -259,7 +273,9 @@ static long it8712f_wdt_ioctl(struct file *file, unsigned int cmd,
259 return -EFAULT; 273 return -EFAULT;
260 return 0; 274 return 0;
261 case WDIOC_GETSTATUS: 275 case WDIOC_GETSTATUS:
262 superio_enter(); 276 ret = superio_enter();
277 if (ret)
278 return ret;
263 superio_select(LDN_GPIO); 279 superio_select(LDN_GPIO);
264 280
265 value = it8712f_wdt_get_status(); 281 value = it8712f_wdt_get_status();
@@ -280,7 +296,9 @@ static long it8712f_wdt_ioctl(struct file *file, unsigned int cmd,
280 if (value > (max_units * 60)) 296 if (value > (max_units * 60))
281 return -EINVAL; 297 return -EINVAL;
282 margin = value; 298 margin = value;
283 superio_enter(); 299 ret = superio_enter();
300 if (ret)
301 return ret;
284 superio_select(LDN_GPIO); 302 superio_select(LDN_GPIO);
285 303
286 it8712f_wdt_update_margin(); 304 it8712f_wdt_update_margin();
@@ -299,10 +317,14 @@ static long it8712f_wdt_ioctl(struct file *file, unsigned int cmd,
299 317
300static int it8712f_wdt_open(struct inode *inode, struct file *file) 318static int it8712f_wdt_open(struct inode *inode, struct file *file)
301{ 319{
320 int ret;
302 /* only allow one at a time */ 321 /* only allow one at a time */
303 if (test_and_set_bit(0, &wdt_open)) 322 if (test_and_set_bit(0, &wdt_open))
304 return -EBUSY; 323 return -EBUSY;
305 it8712f_wdt_enable(); 324
325 ret = it8712f_wdt_enable();
326 if (ret)
327 return ret;
306 return nonseekable_open(inode, file); 328 return nonseekable_open(inode, file);
307} 329}
308 330
@@ -313,7 +335,8 @@ static int it8712f_wdt_release(struct inode *inode, struct file *file)
313 ": watchdog device closed unexpectedly, will not" 335 ": watchdog device closed unexpectedly, will not"
314 " disable the watchdog timer\n"); 336 " disable the watchdog timer\n");
315 } else if (!nowayout) { 337 } else if (!nowayout) {
316 it8712f_wdt_disable(); 338 if (it8712f_wdt_disable())
339 printk(KERN_WARNING NAME "Watchdog disable failed\n");
317 } 340 }
318 expect_close = 0; 341 expect_close = 0;
319 clear_bit(0, &wdt_open); 342 clear_bit(0, &wdt_open);
@@ -340,8 +363,10 @@ static int __init it8712f_wdt_find(unsigned short *address)
340{ 363{
341 int err = -ENODEV; 364 int err = -ENODEV;
342 int chip_type; 365 int chip_type;
366 int ret = superio_enter();
367 if (ret)
368 return ret;
343 369
344 superio_enter();
345 chip_type = superio_inw(DEVID); 370 chip_type = superio_inw(DEVID);
346 if (chip_type != IT8712F_DEVID) 371 if (chip_type != IT8712F_DEVID)
347 goto exit; 372 goto exit;
@@ -382,8 +407,6 @@ static int __init it8712f_wdt_init(void)
382{ 407{
383 int err = 0; 408 int err = 0;
384 409
385 spin_lock_init(&io_lock);
386
387 if (it8712f_wdt_find(&address)) 410 if (it8712f_wdt_find(&address))
388 return -ENODEV; 411 return -ENODEV;
389 412
@@ -392,7 +415,11 @@ static int __init it8712f_wdt_init(void)
392 return -EBUSY; 415 return -EBUSY;
393 } 416 }
394 417
395 it8712f_wdt_disable(); 418 err = it8712f_wdt_disable();
419 if (err) {
420 printk(KERN_ERR NAME ": unable to disable watchdog timer.\n");
421 goto out;
422 }
396 423
397 err = register_reboot_notifier(&it8712f_wdt_notifier); 424 err = register_reboot_notifier(&it8712f_wdt_notifier);
398 if (err) { 425 if (err) {
diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
index b1bc72f9a209..a2d9a1266a23 100644
--- a/drivers/watchdog/it87_wdt.c
+++ b/drivers/watchdog/it87_wdt.c
@@ -137,7 +137,6 @@
137 137
138static unsigned int base, gpact, ciract, max_units, chip_type; 138static unsigned int base, gpact, ciract, max_units, chip_type;
139static unsigned long wdt_status; 139static unsigned long wdt_status;
140static DEFINE_SPINLOCK(spinlock);
141 140
142static int nogameport = DEFAULT_NOGAMEPORT; 141static int nogameport = DEFAULT_NOGAMEPORT;
143static int exclusive = DEFAULT_EXCLUSIVE; 142static int exclusive = DEFAULT_EXCLUSIVE;
@@ -163,18 +162,26 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started, default="
163 162
164/* Superio Chip */ 163/* Superio Chip */
165 164
166static inline void superio_enter(void) 165static inline int superio_enter(void)
167{ 166{
167 /*
168 * Try to reserve REG and REG + 1 for exclusive access.
169 */
170 if (!request_muxed_region(REG, 2, WATCHDOG_NAME))
171 return -EBUSY;
172
168 outb(0x87, REG); 173 outb(0x87, REG);
169 outb(0x01, REG); 174 outb(0x01, REG);
170 outb(0x55, REG); 175 outb(0x55, REG);
171 outb(0x55, REG); 176 outb(0x55, REG);
177 return 0;
172} 178}
173 179
174static inline void superio_exit(void) 180static inline void superio_exit(void)
175{ 181{
176 outb(0x02, REG); 182 outb(0x02, REG);
177 outb(0x02, VAL); 183 outb(0x02, VAL);
184 release_region(REG, 2);
178} 185}
179 186
180static inline void superio_select(int ldn) 187static inline void superio_select(int ldn)
@@ -255,12 +262,11 @@ static void wdt_keepalive(void)
255 set_bit(WDTS_KEEPALIVE, &wdt_status); 262 set_bit(WDTS_KEEPALIVE, &wdt_status);
256} 263}
257 264
258static void wdt_start(void) 265static int wdt_start(void)
259{ 266{
260 unsigned long flags; 267 int ret = superio_enter();
261 268 if (ret)
262 spin_lock_irqsave(&spinlock, flags); 269 return ret;
263 superio_enter();
264 270
265 superio_select(GPIO); 271 superio_select(GPIO);
266 if (test_bit(WDTS_USE_GP, &wdt_status)) 272 if (test_bit(WDTS_USE_GP, &wdt_status))
@@ -270,15 +276,15 @@ static void wdt_start(void)
270 wdt_update_timeout(); 276 wdt_update_timeout();
271 277
272 superio_exit(); 278 superio_exit();
273 spin_unlock_irqrestore(&spinlock, flags); 279
280 return 0;
274} 281}
275 282
276static void wdt_stop(void) 283static int wdt_stop(void)
277{ 284{
278 unsigned long flags; 285 int ret = superio_enter();
279 286 if (ret)
280 spin_lock_irqsave(&spinlock, flags); 287 return ret;
281 superio_enter();
282 288
283 superio_select(GPIO); 289 superio_select(GPIO);
284 superio_outb(0x00, WDTCTRL); 290 superio_outb(0x00, WDTCTRL);
@@ -288,7 +294,7 @@ static void wdt_stop(void)
288 superio_outb(0x00, WDTVALMSB); 294 superio_outb(0x00, WDTVALMSB);
289 295
290 superio_exit(); 296 superio_exit();
291 spin_unlock_irqrestore(&spinlock, flags); 297 return 0;
292} 298}
293 299
294/** 300/**
@@ -303,8 +309,6 @@ static void wdt_stop(void)
303 309
304static int wdt_set_timeout(int t) 310static int wdt_set_timeout(int t)
305{ 311{
306 unsigned long flags;
307
308 if (t < 1 || t > max_units * 60) 312 if (t < 1 || t > max_units * 60)
309 return -EINVAL; 313 return -EINVAL;
310 314
@@ -313,14 +317,15 @@ static int wdt_set_timeout(int t)
313 else 317 else
314 timeout = t; 318 timeout = t;
315 319
316 spin_lock_irqsave(&spinlock, flags);
317 if (test_bit(WDTS_TIMER_RUN, &wdt_status)) { 320 if (test_bit(WDTS_TIMER_RUN, &wdt_status)) {
318 superio_enter(); 321 int ret = superio_enter();
322 if (ret)
323 return ret;
324
319 superio_select(GPIO); 325 superio_select(GPIO);
320 wdt_update_timeout(); 326 wdt_update_timeout();
321 superio_exit(); 327 superio_exit();
322 } 328 }
323 spin_unlock_irqrestore(&spinlock, flags);
324 return 0; 329 return 0;
325} 330}
326 331
@@ -339,12 +344,12 @@ static int wdt_set_timeout(int t)
339 344
340static int wdt_get_status(int *status) 345static int wdt_get_status(int *status)
341{ 346{
342 unsigned long flags;
343
344 *status = 0; 347 *status = 0;
345 if (testmode) { 348 if (testmode) {
346 spin_lock_irqsave(&spinlock, flags); 349 int ret = superio_enter();
347 superio_enter(); 350 if (ret)
351 return ret;
352
348 superio_select(GPIO); 353 superio_select(GPIO);
349 if (superio_inb(WDTCTRL) & WDT_ZERO) { 354 if (superio_inb(WDTCTRL) & WDT_ZERO) {
350 superio_outb(0x00, WDTCTRL); 355 superio_outb(0x00, WDTCTRL);
@@ -353,7 +358,6 @@ static int wdt_get_status(int *status)
353 } 358 }
354 359
355 superio_exit(); 360 superio_exit();
356 spin_unlock_irqrestore(&spinlock, flags);
357 } 361 }
358 if (test_and_clear_bit(WDTS_KEEPALIVE, &wdt_status)) 362 if (test_and_clear_bit(WDTS_KEEPALIVE, &wdt_status))
359 *status |= WDIOF_KEEPALIVEPING; 363 *status |= WDIOF_KEEPALIVEPING;
@@ -379,9 +383,17 @@ static int wdt_open(struct inode *inode, struct file *file)
379 if (exclusive && test_and_set_bit(WDTS_DEV_OPEN, &wdt_status)) 383 if (exclusive && test_and_set_bit(WDTS_DEV_OPEN, &wdt_status))
380 return -EBUSY; 384 return -EBUSY;
381 if (!test_and_set_bit(WDTS_TIMER_RUN, &wdt_status)) { 385 if (!test_and_set_bit(WDTS_TIMER_RUN, &wdt_status)) {
386 int ret;
382 if (nowayout && !test_and_set_bit(WDTS_LOCKED, &wdt_status)) 387 if (nowayout && !test_and_set_bit(WDTS_LOCKED, &wdt_status))
383 __module_get(THIS_MODULE); 388 __module_get(THIS_MODULE);
384 wdt_start(); 389
390 ret = wdt_start();
391 if (ret) {
392 clear_bit(WDTS_LOCKED, &wdt_status);
393 clear_bit(WDTS_TIMER_RUN, &wdt_status);
394 clear_bit(WDTS_DEV_OPEN, &wdt_status);
395 return ret;
396 }
385 } 397 }
386 return nonseekable_open(inode, file); 398 return nonseekable_open(inode, file);
387} 399}
@@ -403,7 +415,16 @@ static int wdt_release(struct inode *inode, struct file *file)
403{ 415{
404 if (test_bit(WDTS_TIMER_RUN, &wdt_status)) { 416 if (test_bit(WDTS_TIMER_RUN, &wdt_status)) {
405 if (test_and_clear_bit(WDTS_EXPECTED, &wdt_status)) { 417 if (test_and_clear_bit(WDTS_EXPECTED, &wdt_status)) {
406 wdt_stop(); 418 int ret = wdt_stop();
419 if (ret) {
420 /*
421 * Stop failed. Just keep the watchdog alive
422 * and hope nothing bad happens.
423 */
424 set_bit(WDTS_EXPECTED, &wdt_status);
425 wdt_keepalive();
426 return ret;
427 }
407 clear_bit(WDTS_TIMER_RUN, &wdt_status); 428 clear_bit(WDTS_TIMER_RUN, &wdt_status);
408 } else { 429 } else {
409 wdt_keepalive(); 430 wdt_keepalive();
@@ -484,7 +505,9 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
484 &ident, sizeof(ident)) ? -EFAULT : 0; 505 &ident, sizeof(ident)) ? -EFAULT : 0;
485 506
486 case WDIOC_GETSTATUS: 507 case WDIOC_GETSTATUS:
487 wdt_get_status(&status); 508 rc = wdt_get_status(&status);
509 if (rc)
510 return rc;
488 return put_user(status, uarg.i); 511 return put_user(status, uarg.i);
489 512
490 case WDIOC_GETBOOTSTATUS: 513 case WDIOC_GETBOOTSTATUS:
@@ -500,14 +523,22 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
500 523
501 switch (new_options) { 524 switch (new_options) {
502 case WDIOS_DISABLECARD: 525 case WDIOS_DISABLECARD:
503 if (test_bit(WDTS_TIMER_RUN, &wdt_status)) 526 if (test_bit(WDTS_TIMER_RUN, &wdt_status)) {
504 wdt_stop(); 527 rc = wdt_stop();
528 if (rc)
529 return rc;
530 }
505 clear_bit(WDTS_TIMER_RUN, &wdt_status); 531 clear_bit(WDTS_TIMER_RUN, &wdt_status);
506 return 0; 532 return 0;
507 533
508 case WDIOS_ENABLECARD: 534 case WDIOS_ENABLECARD:
509 if (!test_and_set_bit(WDTS_TIMER_RUN, &wdt_status)) 535 if (!test_and_set_bit(WDTS_TIMER_RUN, &wdt_status)) {
510 wdt_start(); 536 rc = wdt_start();
537 if (rc) {
538 clear_bit(WDTS_TIMER_RUN, &wdt_status);
539 return rc;
540 }
541 }
511 return 0; 542 return 0;
512 543
513 default: 544 default:
@@ -560,16 +591,17 @@ static int __init it87_wdt_init(void)
560 int rc = 0; 591 int rc = 0;
561 int try_gameport = !nogameport; 592 int try_gameport = !nogameport;
562 u8 chip_rev; 593 u8 chip_rev;
563 unsigned long flags; 594 int gp_rreq_fail = 0;
564 595
565 wdt_status = 0; 596 wdt_status = 0;
566 597
567 spin_lock_irqsave(&spinlock, flags); 598 rc = superio_enter();
568 superio_enter(); 599 if (rc)
600 return rc;
601
569 chip_type = superio_inw(CHIPID); 602 chip_type = superio_inw(CHIPID);
570 chip_rev = superio_inb(CHIPREV) & 0x0f; 603 chip_rev = superio_inb(CHIPREV) & 0x0f;
571 superio_exit(); 604 superio_exit();
572 spin_unlock_irqrestore(&spinlock, flags);
573 605
574 switch (chip_type) { 606 switch (chip_type) {
575 case IT8702_ID: 607 case IT8702_ID:
@@ -603,8 +635,9 @@ static int __init it87_wdt_init(void)
603 return -ENODEV; 635 return -ENODEV;
604 } 636 }
605 637
606 spin_lock_irqsave(&spinlock, flags); 638 rc = superio_enter();
607 superio_enter(); 639 if (rc)
640 return rc;
608 641
609 superio_select(GPIO); 642 superio_select(GPIO);
610 superio_outb(WDT_TOV1, WDTCFG); 643 superio_outb(WDT_TOV1, WDTCFG);
@@ -620,21 +653,16 @@ static int __init it87_wdt_init(void)
620 } 653 }
621 gpact = superio_inb(ACTREG); 654 gpact = superio_inb(ACTREG);
622 superio_outb(0x01, ACTREG); 655 superio_outb(0x01, ACTREG);
623 superio_exit();
624 spin_unlock_irqrestore(&spinlock, flags);
625 if (request_region(base, 1, WATCHDOG_NAME)) 656 if (request_region(base, 1, WATCHDOG_NAME))
626 set_bit(WDTS_USE_GP, &wdt_status); 657 set_bit(WDTS_USE_GP, &wdt_status);
627 else 658 else
628 rc = -EIO; 659 gp_rreq_fail = 1;
629 } else {
630 superio_exit();
631 spin_unlock_irqrestore(&spinlock, flags);
632 } 660 }
633 661
634 /* If we haven't Gameport support, try to get CIR support */ 662 /* If we haven't Gameport support, try to get CIR support */
635 if (!test_bit(WDTS_USE_GP, &wdt_status)) { 663 if (!test_bit(WDTS_USE_GP, &wdt_status)) {
636 if (!request_region(CIR_BASE, 8, WATCHDOG_NAME)) { 664 if (!request_region(CIR_BASE, 8, WATCHDOG_NAME)) {
637 if (rc == -EIO) 665 if (gp_rreq_fail)
638 printk(KERN_ERR PFX 666 printk(KERN_ERR PFX
639 "I/O Address 0x%04x and 0x%04x" 667 "I/O Address 0x%04x and 0x%04x"
640 " already in use\n", base, CIR_BASE); 668 " already in use\n", base, CIR_BASE);
@@ -646,21 +674,16 @@ static int __init it87_wdt_init(void)
646 goto err_out; 674 goto err_out;
647 } 675 }
648 base = CIR_BASE; 676 base = CIR_BASE;
649 spin_lock_irqsave(&spinlock, flags);
650 superio_enter();
651 677
652 superio_select(CIR); 678 superio_select(CIR);
653 superio_outw(base, BASEREG); 679 superio_outw(base, BASEREG);
654 superio_outb(0x00, CIR_ILS); 680 superio_outb(0x00, CIR_ILS);
655 ciract = superio_inb(ACTREG); 681 ciract = superio_inb(ACTREG);
656 superio_outb(0x01, ACTREG); 682 superio_outb(0x01, ACTREG);
657 if (rc == -EIO) { 683 if (gp_rreq_fail) {
658 superio_select(GAMEPORT); 684 superio_select(GAMEPORT);
659 superio_outb(gpact, ACTREG); 685 superio_outb(gpact, ACTREG);
660 } 686 }
661
662 superio_exit();
663 spin_unlock_irqrestore(&spinlock, flags);
664 } 687 }
665 688
666 if (timeout < 1 || timeout > max_units * 60) { 689 if (timeout < 1 || timeout > max_units * 60) {
@@ -704,6 +727,7 @@ static int __init it87_wdt_init(void)
704 "nogameport=%d)\n", chip_type, chip_rev, timeout, 727 "nogameport=%d)\n", chip_type, chip_rev, timeout,
705 nowayout, testmode, exclusive, nogameport); 728 nowayout, testmode, exclusive, nogameport);
706 729
730 superio_exit();
707 return 0; 731 return 0;
708 732
709err_out_reboot: 733err_out_reboot:
@@ -711,49 +735,37 @@ err_out_reboot:
711err_out_region: 735err_out_region:
712 release_region(base, test_bit(WDTS_USE_GP, &wdt_status) ? 1 : 8); 736 release_region(base, test_bit(WDTS_USE_GP, &wdt_status) ? 1 : 8);
713 if (!test_bit(WDTS_USE_GP, &wdt_status)) { 737 if (!test_bit(WDTS_USE_GP, &wdt_status)) {
714 spin_lock_irqsave(&spinlock, flags);
715 superio_enter();
716 superio_select(CIR); 738 superio_select(CIR);
717 superio_outb(ciract, ACTREG); 739 superio_outb(ciract, ACTREG);
718 superio_exit();
719 spin_unlock_irqrestore(&spinlock, flags);
720 } 740 }
721err_out: 741err_out:
722 if (try_gameport) { 742 if (try_gameport) {
723 spin_lock_irqsave(&spinlock, flags);
724 superio_enter();
725 superio_select(GAMEPORT); 743 superio_select(GAMEPORT);
726 superio_outb(gpact, ACTREG); 744 superio_outb(gpact, ACTREG);
727 superio_exit();
728 spin_unlock_irqrestore(&spinlock, flags);
729 } 745 }
730 746
747 superio_exit();
731 return rc; 748 return rc;
732} 749}
733 750
734static void __exit it87_wdt_exit(void) 751static void __exit it87_wdt_exit(void)
735{ 752{
736 unsigned long flags; 753 if (superio_enter() == 0) {
737 int nolock; 754 superio_select(GPIO);
738 755 superio_outb(0x00, WDTCTRL);
739 nolock = !spin_trylock_irqsave(&spinlock, flags); 756 superio_outb(0x00, WDTCFG);
740 superio_enter(); 757 superio_outb(0x00, WDTVALLSB);
741 superio_select(GPIO); 758 if (max_units > 255)
742 superio_outb(0x00, WDTCTRL); 759 superio_outb(0x00, WDTVALMSB);
743 superio_outb(0x00, WDTCFG); 760 if (test_bit(WDTS_USE_GP, &wdt_status)) {
744 superio_outb(0x00, WDTVALLSB); 761 superio_select(GAMEPORT);
745 if (max_units > 255) 762 superio_outb(gpact, ACTREG);
746 superio_outb(0x00, WDTVALMSB); 763 } else {
747 if (test_bit(WDTS_USE_GP, &wdt_status)) { 764 superio_select(CIR);
748 superio_select(GAMEPORT); 765 superio_outb(ciract, ACTREG);
749 superio_outb(gpact, ACTREG); 766 }
750 } else { 767 superio_exit();
751 superio_select(CIR);
752 superio_outb(ciract, ACTREG);
753 } 768 }
754 superio_exit();
755 if (!nolock)
756 spin_unlock_irqrestore(&spinlock, flags);
757 769
758 misc_deregister(&wdt_miscdev); 770 misc_deregister(&wdt_miscdev);
759 unregister_reboot_notifier(&wdt_notifier); 771 unregister_reboot_notifier(&wdt_notifier);
diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c
index 2b4af222b5f2..4dc31024d26c 100644
--- a/drivers/watchdog/mpcore_wdt.c
+++ b/drivers/watchdog/mpcore_wdt.c
@@ -407,12 +407,35 @@ static int __devexit mpcore_wdt_remove(struct platform_device *dev)
407 return 0; 407 return 0;
408} 408}
409 409
410#ifdef CONFIG_PM
411static int mpcore_wdt_suspend(struct platform_device *dev, pm_message_t msg)
412{
413 struct mpcore_wdt *wdt = platform_get_drvdata(dev);
414 mpcore_wdt_stop(wdt); /* Turn the WDT off */
415 return 0;
416}
417
418static int mpcore_wdt_resume(struct platform_device *dev)
419{
420 struct mpcore_wdt *wdt = platform_get_drvdata(dev);
421 /* re-activate timer */
422 if (test_bit(0, &wdt->timer_alive))
423 mpcore_wdt_start(wdt);
424 return 0;
425}
426#else
427#define mpcore_wdt_suspend NULL
428#define mpcore_wdt_resume NULL
429#endif
430
410/* work with hotplug and coldplug */ 431/* work with hotplug and coldplug */
411MODULE_ALIAS("platform:mpcore_wdt"); 432MODULE_ALIAS("platform:mpcore_wdt");
412 433
413static struct platform_driver mpcore_wdt_driver = { 434static struct platform_driver mpcore_wdt_driver = {
414 .probe = mpcore_wdt_probe, 435 .probe = mpcore_wdt_probe,
415 .remove = __devexit_p(mpcore_wdt_remove), 436 .remove = __devexit_p(mpcore_wdt_remove),
437 .suspend = mpcore_wdt_suspend,
438 .resume = mpcore_wdt_resume,
416 .shutdown = mpcore_wdt_shutdown, 439 .shutdown = mpcore_wdt_shutdown,
417 .driver = { 440 .driver = {
418 .owner = THIS_MODULE, 441 .owner = THIS_MODULE,
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index 0430e093b1a0..ac37bb82392c 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -225,11 +225,11 @@ static int __devinit mtx1_wdt_probe(struct platform_device *pdev)
225 225
226 ret = misc_register(&mtx1_wdt_misc); 226 ret = misc_register(&mtx1_wdt_misc);
227 if (ret < 0) { 227 if (ret < 0) {
228 printk(KERN_ERR " mtx-1_wdt : failed to register\n"); 228 dev_err(&pdev->dev, "failed to register\n");
229 return ret; 229 return ret;
230 } 230 }
231 mtx1_wdt_start(); 231 mtx1_wdt_start();
232 printk(KERN_INFO "MTX-1 Watchdog driver\n"); 232 dev_info(&pdev->dev, "MTX-1 Watchdog driver\n");
233 return 0; 233 return 0;
234} 234}
235 235
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
new file mode 100644
index 000000000000..4ec741ac952c
--- /dev/null
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -0,0 +1,433 @@
1/*
2* of_xilinx_wdt.c 1.01 A Watchdog Device Driver for Xilinx xps_timebase_wdt
3*
4* (C) Copyright 2011 (Alejandro Cabrera <aldaya@gmail.com>)
5*
6* -----------------------
7*
8* This program is free software; you can redistribute it and/or
9* modify it under the terms of the GNU General Public License
10* as published by the Free Software Foundation; either version
11* 2 of the License, or (at your option) any later version.
12*
13* -----------------------
14* 30-May-2011 Alejandro Cabrera <aldaya@gmail.com>
15* - If "xlnx,wdt-enable-once" wasn't found on device tree the
16* module will use CONFIG_WATCHDOG_NOWAYOUT
17* - If the device tree parameters ("clock-frequency" and
18* "xlnx,wdt-interval") wasn't found the driver won't
19* know the wdt reset interval
20*/
21
22#include <linux/module.h>
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/fs.h>
26#include <linux/miscdevice.h>
27#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/watchdog.h>
30#include <linux/io.h>
31#include <linux/uaccess.h>
32#include <linux/of.h>
33#include <linux/of_device.h>
34#include <linux/of_address.h>
35
36/* Register offsets for the Wdt device */
37#define XWT_TWCSR0_OFFSET 0x0 /* Control/Status Register0 */
38#define XWT_TWCSR1_OFFSET 0x4 /* Control/Status Register1 */
39#define XWT_TBR_OFFSET 0x8 /* Timebase Register Offset */
40
41/* Control/Status Register Masks */
42#define XWT_CSR0_WRS_MASK 0x00000008 /* Reset status */
43#define XWT_CSR0_WDS_MASK 0x00000004 /* Timer state */
44#define XWT_CSR0_EWDT1_MASK 0x00000002 /* Enable bit 1 */
45
46/* Control/Status Register 0/1 bits */
47#define XWT_CSRX_EWDT2_MASK 0x00000001 /* Enable bit 2 */
48
49/* SelfTest constants */
50#define XWT_MAX_SELFTEST_LOOP_COUNT 0x00010000
51#define XWT_TIMER_FAILED 0xFFFFFFFF
52
53#define WATCHDOG_NAME "Xilinx Watchdog"
54#define PFX WATCHDOG_NAME ": "
55
56struct xwdt_device {
57 struct resource res;
58 void __iomem *base;
59 u32 nowayout;
60 u32 wdt_interval;
61 u32 boot_status;
62};
63
64static struct xwdt_device xdev;
65
66static u32 timeout;
67static u32 control_status_reg;
68static u8 expect_close;
69static u8 no_timeout;
70static unsigned long driver_open;
71
72static DEFINE_SPINLOCK(spinlock);
73
74static void xwdt_start(void)
75{
76 spin_lock(&spinlock);
77
78 /* Clean previous status and enable the watchdog timer */
79 control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET);
80 control_status_reg |= (XWT_CSR0_WRS_MASK | XWT_CSR0_WDS_MASK);
81
82 iowrite32((control_status_reg | XWT_CSR0_EWDT1_MASK),
83 xdev.base + XWT_TWCSR0_OFFSET);
84
85 iowrite32(XWT_CSRX_EWDT2_MASK, xdev.base + XWT_TWCSR1_OFFSET);
86
87 spin_unlock(&spinlock);
88}
89
90static void xwdt_stop(void)
91{
92 spin_lock(&spinlock);
93
94 control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET);
95
96 iowrite32((control_status_reg & ~XWT_CSR0_EWDT1_MASK),
97 xdev.base + XWT_TWCSR0_OFFSET);
98
99 iowrite32(0, xdev.base + XWT_TWCSR1_OFFSET);
100
101 spin_unlock(&spinlock);
102 printk(KERN_INFO PFX "Stopped!\n");
103}
104
105static void xwdt_keepalive(void)
106{
107 spin_lock(&spinlock);
108
109 control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET);
110 control_status_reg |= (XWT_CSR0_WRS_MASK | XWT_CSR0_WDS_MASK);
111 iowrite32(control_status_reg, xdev.base + XWT_TWCSR0_OFFSET);
112
113 spin_unlock(&spinlock);
114}
115
116static void xwdt_get_status(int *status)
117{
118 int new_status;
119
120 spin_lock(&spinlock);
121
122 control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET);
123 new_status = ((control_status_reg &
124 (XWT_CSR0_WRS_MASK | XWT_CSR0_WDS_MASK)) != 0);
125 spin_unlock(&spinlock);
126
127 *status = 0;
128 if (new_status & 1)
129 *status |= WDIOF_CARDRESET;
130}
131
132static u32 xwdt_selftest(void)
133{
134 int i;
135 u32 timer_value1;
136 u32 timer_value2;
137
138 spin_lock(&spinlock);
139
140 timer_value1 = ioread32(xdev.base + XWT_TBR_OFFSET);
141 timer_value2 = ioread32(xdev.base + XWT_TBR_OFFSET);
142
143 for (i = 0;
144 ((i <= XWT_MAX_SELFTEST_LOOP_COUNT) &&
145 (timer_value2 == timer_value1)); i++) {
146 timer_value2 = ioread32(xdev.base + XWT_TBR_OFFSET);
147 }
148
149 spin_unlock(&spinlock);
150
151 if (timer_value2 != timer_value1)
152 return ~XWT_TIMER_FAILED;
153 else
154 return XWT_TIMER_FAILED;
155}
156
157static int xwdt_open(struct inode *inode, struct file *file)
158{
159 /* Only one process can handle the wdt at a time */
160 if (test_and_set_bit(0, &driver_open))
161 return -EBUSY;
162
163 /* Make sure that the module are always loaded...*/
164 if (xdev.nowayout)
165 __module_get(THIS_MODULE);
166
167 xwdt_start();
168 printk(KERN_INFO PFX "Started...\n");
169
170 return nonseekable_open(inode, file);
171}
172
173static int xwdt_release(struct inode *inode, struct file *file)
174{
175 if (expect_close == 42) {
176 xwdt_stop();
177 } else {
178 printk(KERN_CRIT PFX
179 "Unexpected close, not stopping watchdog!\n");
180 xwdt_keepalive();
181 }
182
183 clear_bit(0, &driver_open);
184 expect_close = 0;
185 return 0;
186}
187
188/*
189 * xwdt_write:
190 * @file: file handle to the watchdog
191 * @buf: buffer to write (unused as data does not matter here
192 * @count: count of bytes
193 * @ppos: pointer to the position to write. No seeks allowed
194 *
195 * A write to a watchdog device is defined as a keepalive signal. Any
196 * write of data will do, as we don't define content meaning.
197 */
198static ssize_t xwdt_write(struct file *file, const char __user *buf,
199 size_t len, loff_t *ppos)
200{
201 if (len) {
202 if (!xdev.nowayout) {
203 size_t i;
204
205 /* In case it was set long ago */
206 expect_close = 0;
207
208 for (i = 0; i != len; i++) {
209 char c;
210
211 if (get_user(c, buf + i))
212 return -EFAULT;
213 if (c == 'V')
214 expect_close = 42;
215 }
216 }
217 xwdt_keepalive();
218 }
219 return len;
220}
221
222static const struct watchdog_info ident = {
223 .options = WDIOF_MAGICCLOSE |
224 WDIOF_KEEPALIVEPING,
225 .firmware_version = 1,
226 .identity = WATCHDOG_NAME,
227};
228
229/*
230 * xwdt_ioctl:
231 * @file: file handle to the device
232 * @cmd: watchdog command
233 * @arg: argument pointer
234 *
235 * The watchdog API defines a common set of functions for all watchdogs
236 * according to their available features.
237 */
238static long xwdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
239{
240 int status;
241
242 union {
243 struct watchdog_info __user *ident;
244 int __user *i;
245 } uarg;
246
247 uarg.i = (int __user *)arg;
248
249 switch (cmd) {
250 case WDIOC_GETSUPPORT:
251 return copy_to_user(uarg.ident, &ident,
252 sizeof(ident)) ? -EFAULT : 0;
253
254 case WDIOC_GETBOOTSTATUS:
255 return put_user(xdev.boot_status, uarg.i);
256
257 case WDIOC_GETSTATUS:
258 xwdt_get_status(&status);
259 return put_user(status, uarg.i);
260
261 case WDIOC_KEEPALIVE:
262 xwdt_keepalive();
263 return 0;
264
265 case WDIOC_GETTIMEOUT:
266 if (no_timeout)
267 return -ENOTTY;
268 else
269 return put_user(timeout, uarg.i);
270
271 default:
272 return -ENOTTY;
273 }
274}
275
276static const struct file_operations xwdt_fops = {
277 .owner = THIS_MODULE,
278 .llseek = no_llseek,
279 .write = xwdt_write,
280 .open = xwdt_open,
281 .release = xwdt_release,
282 .unlocked_ioctl = xwdt_ioctl,
283};
284
285static struct miscdevice xwdt_miscdev = {
286 .minor = WATCHDOG_MINOR,
287 .name = "watchdog",
288 .fops = &xwdt_fops,
289};
290
291static int __devinit xwdt_probe(struct platform_device *pdev)
292{
293 int rc;
294 u32 *tmptr;
295 u32 *pfreq;
296
297 no_timeout = 0;
298
299 pfreq = (u32 *)of_get_property(pdev->dev.of_node->parent,
300 "clock-frequency", NULL);
301
302 if (pfreq == NULL) {
303 printk(KERN_WARNING PFX
304 "The watchdog clock frequency cannot be obtained!\n");
305 no_timeout = 1;
306 }
307
308 rc = of_address_to_resource(pdev->dev.of_node, 0, &xdev.res);
309 if (rc) {
310 printk(KERN_WARNING PFX "invalid address!\n");
311 return rc;
312 }
313
314 tmptr = (u32 *)of_get_property(pdev->dev.of_node,
315 "xlnx,wdt-interval", NULL);
316 if (tmptr == NULL) {
317 printk(KERN_WARNING PFX "Parameter \"xlnx,wdt-interval\""
318 " not found in device tree!\n");
319 no_timeout = 1;
320 } else {
321 xdev.wdt_interval = *tmptr;
322 }
323
324 tmptr = (u32 *)of_get_property(pdev->dev.of_node,
325 "xlnx,wdt-enable-once", NULL);
326 if (tmptr == NULL) {
327 printk(KERN_WARNING PFX "Parameter \"xlnx,wdt-enable-once\""
328 " not found in device tree!\n");
329 xdev.nowayout = WATCHDOG_NOWAYOUT;
330 }
331
332/*
333 * Twice of the 2^wdt_interval / freq because the first wdt overflow is
334 * ignored (interrupt), reset is only generated at second wdt overflow
335 */
336 if (!no_timeout)
337 timeout = 2 * ((1<<xdev.wdt_interval) / *pfreq);
338
339 if (!request_mem_region(xdev.res.start,
340 xdev.res.end - xdev.res.start + 1, WATCHDOG_NAME)) {
341 rc = -ENXIO;
342 printk(KERN_ERR PFX "memory request failure!\n");
343 goto err_out;
344 }
345
346 xdev.base = ioremap(xdev.res.start, xdev.res.end - xdev.res.start + 1);
347 if (xdev.base == NULL) {
348 rc = -ENOMEM;
349 printk(KERN_ERR PFX "ioremap failure!\n");
350 goto release_mem;
351 }
352
353 rc = xwdt_selftest();
354 if (rc == XWT_TIMER_FAILED) {
355 printk(KERN_ERR PFX "SelfTest routine error!\n");
356 goto unmap_io;
357 }
358
359 xwdt_get_status(&xdev.boot_status);
360
361 rc = misc_register(&xwdt_miscdev);
362 if (rc) {
363 printk(KERN_ERR PFX
364 "cannot register miscdev on minor=%d (err=%d)\n",
365 xwdt_miscdev.minor, rc);
366 goto unmap_io;
367 }
368
369 if (no_timeout)
370 printk(KERN_INFO PFX
371 "driver loaded (timeout=? sec, nowayout=%d)\n",
372 xdev.nowayout);
373 else
374 printk(KERN_INFO PFX
375 "driver loaded (timeout=%d sec, nowayout=%d)\n",
376 timeout, xdev.nowayout);
377
378 expect_close = 0;
379 clear_bit(0, &driver_open);
380
381 return 0;
382
383unmap_io:
384 iounmap(xdev.base);
385release_mem:
386 release_mem_region(xdev.res.start, resource_size(&xdev.res));
387err_out:
388 return rc;
389}
390
391static int __devexit xwdt_remove(struct platform_device *dev)
392{
393 misc_deregister(&xwdt_miscdev);
394 iounmap(xdev.base);
395 release_mem_region(xdev.res.start, resource_size(&xdev.res));
396
397 return 0;
398}
399
400/* Match table for of_platform binding */
401static struct of_device_id __devinitdata xwdt_of_match[] = {
402 { .compatible = "xlnx,xps-timebase-wdt-1.01.a", },
403 {},
404};
405MODULE_DEVICE_TABLE(of, xwdt_of_match);
406
407static struct platform_driver xwdt_driver = {
408 .probe = xwdt_probe,
409 .remove = __devexit_p(xwdt_remove),
410 .driver = {
411 .owner = THIS_MODULE,
412 .name = WATCHDOG_NAME,
413 .of_match_table = xwdt_of_match,
414 },
415};
416
417static int __init xwdt_init(void)
418{
419 return platform_driver_register(&xwdt_driver);
420}
421
422static void __exit xwdt_exit(void)
423{
424 platform_driver_unregister(&xwdt_driver);
425}
426
427module_init(xwdt_init);
428module_exit(xwdt_exit);
429
430MODULE_AUTHOR("Alejandro Cabrera <aldaya@gmail.com>");
431MODULE_DESCRIPTION("Xilinx Watchdog driver");
432MODULE_LICENSE("GPL");
433MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index b7c139051575..e78d89986768 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -56,6 +56,7 @@
56#define IO_DEFAULT 0x2E /* Address used on Portwell Boards */ 56#define IO_DEFAULT 0x2E /* Address used on Portwell Boards */
57 57
58static int io = IO_DEFAULT; 58static int io = IO_DEFAULT;
59static int swc_base_addr = -1;
59 60
60static int timeout = DEFAULT_TIMEOUT; /* timeout value */ 61static int timeout = DEFAULT_TIMEOUT; /* timeout value */
61static unsigned long timer_enabled; /* is the timer enabled? */ 62static unsigned long timer_enabled; /* is the timer enabled? */
@@ -116,9 +117,8 @@ static inline void pc87413_enable_swc(void)
116 117
117/* Read SWC I/O base address */ 118/* Read SWC I/O base address */
118 119
119static inline unsigned int pc87413_get_swc_base(void) 120static void pc87413_get_swc_base_addr(void)
120{ 121{
121 unsigned int swc_base_addr = 0;
122 unsigned char addr_l, addr_h = 0; 122 unsigned char addr_l, addr_h = 0;
123 123
124 /* Step 3: Read SWC I/O Base Address */ 124 /* Step 3: Read SWC I/O Base Address */
@@ -136,12 +136,11 @@ static inline unsigned int pc87413_get_swc_base(void)
136 "Read SWC I/O Base Address: low %d, high %d, res %d\n", 136 "Read SWC I/O Base Address: low %d, high %d, res %d\n",
137 addr_l, addr_h, swc_base_addr); 137 addr_l, addr_h, swc_base_addr);
138#endif 138#endif
139 return swc_base_addr;
140} 139}
141 140
142/* Select Bank 3 of SWC */ 141/* Select Bank 3 of SWC */
143 142
144static inline void pc87413_swc_bank3(unsigned int swc_base_addr) 143static inline void pc87413_swc_bank3(void)
145{ 144{
146 /* Step 4: Select Bank3 of SWC */ 145 /* Step 4: Select Bank3 of SWC */
147 outb_p(inb(swc_base_addr + 0x0f) | 0x03, swc_base_addr + 0x0f); 146 outb_p(inb(swc_base_addr + 0x0f) | 0x03, swc_base_addr + 0x0f);
@@ -152,8 +151,7 @@ static inline void pc87413_swc_bank3(unsigned int swc_base_addr)
152 151
153/* Set watchdog timeout to x minutes */ 152/* Set watchdog timeout to x minutes */
154 153
155static inline void pc87413_programm_wdto(unsigned int swc_base_addr, 154static inline void pc87413_programm_wdto(char pc87413_time)
156 char pc87413_time)
157{ 155{
158 /* Step 5: Programm WDTO, Twd. */ 156 /* Step 5: Programm WDTO, Twd. */
159 outb_p(pc87413_time, swc_base_addr + WDTO); 157 outb_p(pc87413_time, swc_base_addr + WDTO);
@@ -164,7 +162,7 @@ static inline void pc87413_programm_wdto(unsigned int swc_base_addr,
164 162
165/* Enable WDEN */ 163/* Enable WDEN */
166 164
167static inline void pc87413_enable_wden(unsigned int swc_base_addr) 165static inline void pc87413_enable_wden(void)
168{ 166{
169 /* Step 6: Enable WDEN */ 167 /* Step 6: Enable WDEN */
170 outb_p(inb(swc_base_addr + WDCTL) | 0x01, swc_base_addr + WDCTL); 168 outb_p(inb(swc_base_addr + WDCTL) | 0x01, swc_base_addr + WDCTL);
@@ -174,7 +172,7 @@ static inline void pc87413_enable_wden(unsigned int swc_base_addr)
174} 172}
175 173
176/* Enable SW_WD_TREN */ 174/* Enable SW_WD_TREN */
177static inline void pc87413_enable_sw_wd_tren(unsigned int swc_base_addr) 175static inline void pc87413_enable_sw_wd_tren(void)
178{ 176{
179 /* Enable SW_WD_TREN */ 177 /* Enable SW_WD_TREN */
180 outb_p(inb(swc_base_addr + WDCFG) | 0x80, swc_base_addr + WDCFG); 178 outb_p(inb(swc_base_addr + WDCFG) | 0x80, swc_base_addr + WDCFG);
@@ -185,7 +183,7 @@ static inline void pc87413_enable_sw_wd_tren(unsigned int swc_base_addr)
185 183
186/* Disable SW_WD_TREN */ 184/* Disable SW_WD_TREN */
187 185
188static inline void pc87413_disable_sw_wd_tren(unsigned int swc_base_addr) 186static inline void pc87413_disable_sw_wd_tren(void)
189{ 187{
190 /* Disable SW_WD_TREN */ 188 /* Disable SW_WD_TREN */
191 outb_p(inb(swc_base_addr + WDCFG) & 0x7f, swc_base_addr + WDCFG); 189 outb_p(inb(swc_base_addr + WDCFG) & 0x7f, swc_base_addr + WDCFG);
@@ -196,7 +194,7 @@ static inline void pc87413_disable_sw_wd_tren(unsigned int swc_base_addr)
196 194
197/* Enable SW_WD_TRG */ 195/* Enable SW_WD_TRG */
198 196
199static inline void pc87413_enable_sw_wd_trg(unsigned int swc_base_addr) 197static inline void pc87413_enable_sw_wd_trg(void)
200{ 198{
201 /* Enable SW_WD_TRG */ 199 /* Enable SW_WD_TRG */
202 outb_p(inb(swc_base_addr + WDCTL) | 0x80, swc_base_addr + WDCTL); 200 outb_p(inb(swc_base_addr + WDCTL) | 0x80, swc_base_addr + WDCTL);
@@ -207,7 +205,7 @@ static inline void pc87413_enable_sw_wd_trg(unsigned int swc_base_addr)
207 205
208/* Disable SW_WD_TRG */ 206/* Disable SW_WD_TRG */
209 207
210static inline void pc87413_disable_sw_wd_trg(unsigned int swc_base_addr) 208static inline void pc87413_disable_sw_wd_trg(void)
211{ 209{
212 /* Disable SW_WD_TRG */ 210 /* Disable SW_WD_TRG */
213 outb_p(inb(swc_base_addr + WDCTL) & 0x7f, swc_base_addr + WDCTL); 211 outb_p(inb(swc_base_addr + WDCTL) & 0x7f, swc_base_addr + WDCTL);
@@ -222,18 +220,13 @@ static inline void pc87413_disable_sw_wd_trg(unsigned int swc_base_addr)
222 220
223static void pc87413_enable(void) 221static void pc87413_enable(void)
224{ 222{
225 unsigned int swc_base_addr;
226
227 spin_lock(&io_lock); 223 spin_lock(&io_lock);
228 224
229 pc87413_select_wdt_out(); 225 pc87413_swc_bank3();
230 pc87413_enable_swc(); 226 pc87413_programm_wdto(timeout);
231 swc_base_addr = pc87413_get_swc_base(); 227 pc87413_enable_wden();
232 pc87413_swc_bank3(swc_base_addr); 228 pc87413_enable_sw_wd_tren();
233 pc87413_programm_wdto(swc_base_addr, timeout); 229 pc87413_enable_sw_wd_trg();
234 pc87413_enable_wden(swc_base_addr);
235 pc87413_enable_sw_wd_tren(swc_base_addr);
236 pc87413_enable_sw_wd_trg(swc_base_addr);
237 230
238 spin_unlock(&io_lock); 231 spin_unlock(&io_lock);
239} 232}
@@ -242,17 +235,12 @@ static void pc87413_enable(void)
242 235
243static void pc87413_disable(void) 236static void pc87413_disable(void)
244{ 237{
245 unsigned int swc_base_addr;
246
247 spin_lock(&io_lock); 238 spin_lock(&io_lock);
248 239
249 pc87413_select_wdt_out(); 240 pc87413_swc_bank3();
250 pc87413_enable_swc(); 241 pc87413_disable_sw_wd_tren();
251 swc_base_addr = pc87413_get_swc_base(); 242 pc87413_disable_sw_wd_trg();
252 pc87413_swc_bank3(swc_base_addr); 243 pc87413_programm_wdto(0);
253 pc87413_disable_sw_wd_tren(swc_base_addr);
254 pc87413_disable_sw_wd_trg(swc_base_addr);
255 pc87413_programm_wdto(swc_base_addr, 0);
256 244
257 spin_unlock(&io_lock); 245 spin_unlock(&io_lock);
258} 246}
@@ -261,20 +249,15 @@ static void pc87413_disable(void)
261 249
262static void pc87413_refresh(void) 250static void pc87413_refresh(void)
263{ 251{
264 unsigned int swc_base_addr;
265
266 spin_lock(&io_lock); 252 spin_lock(&io_lock);
267 253
268 pc87413_select_wdt_out(); 254 pc87413_swc_bank3();
269 pc87413_enable_swc(); 255 pc87413_disable_sw_wd_tren();
270 swc_base_addr = pc87413_get_swc_base(); 256 pc87413_disable_sw_wd_trg();
271 pc87413_swc_bank3(swc_base_addr); 257 pc87413_programm_wdto(timeout);
272 pc87413_disable_sw_wd_tren(swc_base_addr); 258 pc87413_enable_wden();
273 pc87413_disable_sw_wd_trg(swc_base_addr); 259 pc87413_enable_sw_wd_tren();
274 pc87413_programm_wdto(swc_base_addr, timeout); 260 pc87413_enable_sw_wd_trg();
275 pc87413_enable_wden(swc_base_addr);
276 pc87413_enable_sw_wd_tren(swc_base_addr);
277 pc87413_enable_sw_wd_trg(swc_base_addr);
278 261
279 spin_unlock(&io_lock); 262 spin_unlock(&io_lock);
280} 263}
@@ -528,7 +511,8 @@ static int __init pc87413_init(void)
528 printk(KERN_INFO PFX "Version " VERSION " at io 0x%X\n", 511 printk(KERN_INFO PFX "Version " VERSION " at io 0x%X\n",
529 WDT_INDEX_IO_PORT); 512 WDT_INDEX_IO_PORT);
530 513
531 /* request_region(io, 2, "pc87413"); */ 514 if (!request_muxed_region(io, 2, MODNAME))
515 return -EBUSY;
532 516
533 ret = register_reboot_notifier(&pc87413_notifier); 517 ret = register_reboot_notifier(&pc87413_notifier);
534 if (ret != 0) { 518 if (ret != 0) {
@@ -541,12 +525,32 @@ static int __init pc87413_init(void)
541 printk(KERN_ERR PFX 525 printk(KERN_ERR PFX
542 "cannot register miscdev on minor=%d (err=%d)\n", 526 "cannot register miscdev on minor=%d (err=%d)\n",
543 WATCHDOG_MINOR, ret); 527 WATCHDOG_MINOR, ret);
544 unregister_reboot_notifier(&pc87413_notifier); 528 goto reboot_unreg;
545 return ret;
546 } 529 }
547 printk(KERN_INFO PFX "initialized. timeout=%d min \n", timeout); 530 printk(KERN_INFO PFX "initialized. timeout=%d min \n", timeout);
531
532 pc87413_select_wdt_out();
533 pc87413_enable_swc();
534 pc87413_get_swc_base_addr();
535
536 if (!request_region(swc_base_addr, 0x20, MODNAME)) {
537 printk(KERN_ERR PFX
538 "cannot request SWC region at 0x%x\n", swc_base_addr);
539 ret = -EBUSY;
540 goto misc_unreg;
541 }
542
548 pc87413_enable(); 543 pc87413_enable();
544
545 release_region(io, 2);
549 return 0; 546 return 0;
547
548misc_unreg:
549 misc_deregister(&pc87413_miscdev);
550reboot_unreg:
551 unregister_reboot_notifier(&pc87413_notifier);
552 release_region(io, 2);
553 return ret;
550} 554}
551 555
552/** 556/**
@@ -569,7 +573,7 @@ static void __exit pc87413_exit(void)
569 573
570 misc_deregister(&pc87413_miscdev); 574 misc_deregister(&pc87413_miscdev);
571 unregister_reboot_notifier(&pc87413_notifier); 575 unregister_reboot_notifier(&pc87413_notifier);
572 /* release_region(io, 2); */ 576 release_region(swc_base_addr, 0x20);
573 577
574 printk(KERN_INFO MODNAME " watchdog component driver removed.\n"); 578 printk(KERN_INFO MODNAME " watchdog component driver removed.\n");
575} 579}
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index f7f5aa00df60..30da88f47cd3 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -589,6 +589,15 @@ static int s3c2410wdt_resume(struct platform_device *dev)
589#define s3c2410wdt_resume NULL 589#define s3c2410wdt_resume NULL
590#endif /* CONFIG_PM */ 590#endif /* CONFIG_PM */
591 591
592#ifdef CONFIG_OF
593static const struct of_device_id s3c2410_wdt_match[] = {
594 { .compatible = "samsung,s3c2410-wdt" },
595 {},
596};
597MODULE_DEVICE_TABLE(of, s3c2410_wdt_match);
598#else
599#define s3c2410_wdt_match NULL
600#endif
592 601
593static struct platform_driver s3c2410wdt_driver = { 602static struct platform_driver s3c2410wdt_driver = {
594 .probe = s3c2410wdt_probe, 603 .probe = s3c2410wdt_probe,
@@ -599,6 +608,7 @@ static struct platform_driver s3c2410wdt_driver = {
599 .driver = { 608 .driver = {
600 .owner = THIS_MODULE, 609 .owner = THIS_MODULE,
601 .name = "s3c2410-wdt", 610 .name = "s3c2410-wdt",
611 .of_match_table = s3c2410_wdt_match,
602 }, 612 },
603}; 613};
604 614
diff --git a/drivers/watchdog/sbc7240_wdt.c b/drivers/watchdog/sbc7240_wdt.c
index ff11504c376e..93ac58953122 100644
--- a/drivers/watchdog/sbc7240_wdt.c
+++ b/drivers/watchdog/sbc7240_wdt.c
@@ -29,7 +29,7 @@
29#include <linux/watchdog.h> 29#include <linux/watchdog.h>
30#include <linux/io.h> 30#include <linux/io.h>
31#include <linux/uaccess.h> 31#include <linux/uaccess.h>
32#include <asm/atomic.h> 32#include <linux/atomic.h>
33#include <asm/system.h> 33#include <asm/system.h>
34 34
35#define SBC7240_PREFIX "sbc7240_wdt: " 35#define SBC7240_PREFIX "sbc7240_wdt: "
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index c7cf4b01f58d..029467e34636 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -472,15 +472,10 @@ static void sch311x_wdt_shutdown(struct platform_device *dev)
472 sch311x_wdt_stop(); 472 sch311x_wdt_stop();
473} 473}
474 474
475#define sch311x_wdt_suspend NULL
476#define sch311x_wdt_resume NULL
477
478static struct platform_driver sch311x_wdt_driver = { 475static struct platform_driver sch311x_wdt_driver = {
479 .probe = sch311x_wdt_probe, 476 .probe = sch311x_wdt_probe,
480 .remove = __devexit_p(sch311x_wdt_remove), 477 .remove = __devexit_p(sch311x_wdt_remove),
481 .shutdown = sch311x_wdt_shutdown, 478 .shutdown = sch311x_wdt_shutdown,
482 .suspend = sch311x_wdt_suspend,
483 .resume = sch311x_wdt_resume,
484 .driver = { 479 .driver = {
485 .owner = THIS_MODULE, 480 .owner = THIS_MODULE,
486 .name = DRV_NAME, 481 .name = DRV_NAME,
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 0d80e08b6439..cc2cfbe33b30 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -134,6 +134,8 @@ static void wdt_enable(void)
134 writel(INT_ENABLE | RESET_ENABLE, wdt->base + WDTCONTROL); 134 writel(INT_ENABLE | RESET_ENABLE, wdt->base + WDTCONTROL);
135 writel(LOCK, wdt->base + WDTLOCK); 135 writel(LOCK, wdt->base + WDTLOCK);
136 136
137 /* Flush posted writes. */
138 readl(wdt->base + WDTLOCK);
137 spin_unlock(&wdt->lock); 139 spin_unlock(&wdt->lock);
138} 140}
139 141
@@ -144,9 +146,10 @@ static void wdt_disable(void)
144 146
145 writel(UNLOCK, wdt->base + WDTLOCK); 147 writel(UNLOCK, wdt->base + WDTLOCK);
146 writel(0, wdt->base + WDTCONTROL); 148 writel(0, wdt->base + WDTCONTROL);
147 writel(0, wdt->base + WDTLOAD);
148 writel(LOCK, wdt->base + WDTLOCK); 149 writel(LOCK, wdt->base + WDTLOCK);
149 150
151 /* Flush posted writes. */
152 readl(wdt->base + WDTLOCK);
150 spin_unlock(&wdt->lock); 153 spin_unlock(&wdt->lock);
151} 154}
152 155
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
new file mode 100644
index 000000000000..cfa1a1518aad
--- /dev/null
+++ b/drivers/watchdog/watchdog_core.c
@@ -0,0 +1,111 @@
1/*
2 * watchdog_core.c
3 *
4 * (c) Copyright 2008-2011 Alan Cox <alan@lxorguk.ukuu.org.uk>,
5 * All Rights Reserved.
6 *
7 * (c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>.
8 *
9 * This source code is part of the generic code that can be used
10 * by all the watchdog timer drivers.
11 *
12 * Based on source code of the following authors:
13 * Matt Domsch <Matt_Domsch@dell.com>,
14 * Rob Radez <rob@osinvestor.com>,
15 * Rusty Lynch <rusty@linux.co.intel.com>
16 * Satyam Sharma <satyam@infradead.org>
17 * Randy Dunlap <randy.dunlap@oracle.com>
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 *
24 * Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw.
25 * admit liability nor provide warranty for any of this software.
26 * This material is provided "AS-IS" and at no charge.
27 */
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/module.h> /* For EXPORT_SYMBOL/module stuff/... */
32#include <linux/types.h> /* For standard types */
33#include <linux/errno.h> /* For the -ENODEV/... values */
34#include <linux/kernel.h> /* For printk/panic/... */
35#include <linux/watchdog.h> /* For watchdog specific items */
36#include <linux/init.h> /* For __init/__exit/... */
37
38#include "watchdog_dev.h" /* For watchdog_dev_register/... */
39
40/**
41 * watchdog_register_device() - register a watchdog device
42 * @wdd: watchdog device
43 *
44 * Register a watchdog device with the kernel so that the
45 * watchdog timer can be accessed from userspace.
46 *
47 * A zero is returned on success and a negative errno code for
48 * failure.
49 */
50int watchdog_register_device(struct watchdog_device *wdd)
51{
52 int ret;
53
54 if (wdd == NULL || wdd->info == NULL || wdd->ops == NULL)
55 return -EINVAL;
56
57 /* Mandatory operations need to be supported */
58 if (wdd->ops->start == NULL || wdd->ops->stop == NULL)
59 return -EINVAL;
60
61 /*
62 * Check that we have valid min and max timeout values, if
63 * not reset them both to 0 (=not used or unknown)
64 */
65 if (wdd->min_timeout > wdd->max_timeout) {
66 pr_info("Invalid min and max timeout values, resetting to 0!\n");
67 wdd->min_timeout = 0;
68 wdd->max_timeout = 0;
69 }
70
71 /*
72 * Note: now that all watchdog_device data has been verified, we
73 * will not check this anymore in other functions. If data gets
74 * corrupted in a later stage then we expect a kernel panic!
75 */
76
77 /* We only support 1 watchdog device via the /dev/watchdog interface */
78 ret = watchdog_dev_register(wdd);
79 if (ret) {
80 pr_err("error registering /dev/watchdog (err=%d).\n", ret);
81 return ret;
82 }
83
84 return 0;
85}
86EXPORT_SYMBOL_GPL(watchdog_register_device);
87
88/**
89 * watchdog_unregister_device() - unregister a watchdog device
90 * @wdd: watchdog device to unregister
91 *
92 * Unregister a watchdog device that was previously successfully
93 * registered with watchdog_register_device().
94 */
95void watchdog_unregister_device(struct watchdog_device *wdd)
96{
97 int ret;
98
99 if (wdd == NULL)
100 return;
101
102 ret = watchdog_dev_unregister(wdd);
103 if (ret)
104 pr_err("error unregistering /dev/watchdog (err=%d).\n", ret);
105}
106EXPORT_SYMBOL_GPL(watchdog_unregister_device);
107
108MODULE_AUTHOR("Alan Cox <alan@lxorguk.ukuu.org.uk>");
109MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>");
110MODULE_DESCRIPTION("WatchDog Timer Driver Core");
111MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
new file mode 100644
index 000000000000..d33520d0b4c9
--- /dev/null
+++ b/drivers/watchdog/watchdog_dev.c
@@ -0,0 +1,395 @@
1/*
2 * watchdog_dev.c
3 *
4 * (c) Copyright 2008-2011 Alan Cox <alan@lxorguk.ukuu.org.uk>,
5 * All Rights Reserved.
6 *
7 * (c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>.
8 *
9 *
10 * This source code is part of the generic code that can be used
11 * by all the watchdog timer drivers.
12 *
13 * This part of the generic code takes care of the following
14 * misc device: /dev/watchdog.
15 *
16 * Based on source code of the following authors:
17 * Matt Domsch <Matt_Domsch@dell.com>,
18 * Rob Radez <rob@osinvestor.com>,
19 * Rusty Lynch <rusty@linux.co.intel.com>
20 * Satyam Sharma <satyam@infradead.org>
21 * Randy Dunlap <randy.dunlap@oracle.com>
22 *
23 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version
26 * 2 of the License, or (at your option) any later version.
27 *
28 * Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw.
29 * admit liability nor provide warranty for any of this software.
30 * This material is provided "AS-IS" and at no charge.
31 */
32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35#include <linux/module.h> /* For module stuff/... */
36#include <linux/types.h> /* For standard types (like size_t) */
37#include <linux/errno.h> /* For the -ENODEV/... values */
38#include <linux/kernel.h> /* For printk/panic/... */
39#include <linux/fs.h> /* For file operations */
40#include <linux/watchdog.h> /* For watchdog specific items */
41#include <linux/miscdevice.h> /* For handling misc devices */
42#include <linux/init.h> /* For __init/__exit/... */
43#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
44
45/* make sure we only register one /dev/watchdog device */
46static unsigned long watchdog_dev_busy;
47/* the watchdog device behind /dev/watchdog */
48static struct watchdog_device *wdd;
49
50/*
51 * watchdog_ping: ping the watchdog.
52 * @wddev: the watchdog device to ping
53 *
54 * If the watchdog has no own ping operation then it needs to be
55 * restarted via the start operation. This wrapper function does
56 * exactly that.
57 * We only ping when the watchdog device is running.
58 */
59
60static int watchdog_ping(struct watchdog_device *wddev)
61{
62 if (test_bit(WDOG_ACTIVE, &wdd->status)) {
63 if (wddev->ops->ping)
64 return wddev->ops->ping(wddev); /* ping the watchdog */
65 else
66 return wddev->ops->start(wddev); /* restart watchdog */
67 }
68 return 0;
69}
70
71/*
72 * watchdog_start: wrapper to start the watchdog.
73 * @wddev: the watchdog device to start
74 *
75 * Start the watchdog if it is not active and mark it active.
76 * This function returns zero on success or a negative errno code for
77 * failure.
78 */
79
80static int watchdog_start(struct watchdog_device *wddev)
81{
82 int err;
83
84 if (!test_bit(WDOG_ACTIVE, &wdd->status)) {
85 err = wddev->ops->start(wddev);
86 if (err < 0)
87 return err;
88
89 set_bit(WDOG_ACTIVE, &wdd->status);
90 }
91 return 0;
92}
93
94/*
95 * watchdog_stop: wrapper to stop the watchdog.
96 * @wddev: the watchdog device to stop
97 *
98 * Stop the watchdog if it is still active and unmark it active.
99 * This function returns zero on success or a negative errno code for
100 * failure.
101 * If the 'nowayout' feature was set, the watchdog cannot be stopped.
102 */
103
104static int watchdog_stop(struct watchdog_device *wddev)
105{
106 int err = -EBUSY;
107
108 if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) {
109 pr_info("%s: nowayout prevents watchdog to be stopped!\n",
110 wdd->info->identity);
111 return err;
112 }
113
114 if (test_bit(WDOG_ACTIVE, &wdd->status)) {
115 err = wddev->ops->stop(wddev);
116 if (err < 0)
117 return err;
118
119 clear_bit(WDOG_ACTIVE, &wdd->status);
120 }
121 return 0;
122}
123
124/*
125 * watchdog_write: writes to the watchdog.
126 * @file: file from VFS
127 * @data: user address of data
128 * @len: length of data
129 * @ppos: pointer to the file offset
130 *
131 * A write to a watchdog device is defined as a keepalive ping.
132 * Writing the magic 'V' sequence allows the next close to turn
133 * off the watchdog (if 'nowayout' is not set).
134 */
135
136static ssize_t watchdog_write(struct file *file, const char __user *data,
137 size_t len, loff_t *ppos)
138{
139 size_t i;
140 char c;
141
142 if (len == 0)
143 return 0;
144
145 /*
146 * Note: just in case someone wrote the magic character
147 * five months ago...
148 */
149 clear_bit(WDOG_ALLOW_RELEASE, &wdd->status);
150
151 /* scan to see whether or not we got the magic character */
152 for (i = 0; i != len; i++) {
153 if (get_user(c, data + i))
154 return -EFAULT;
155 if (c == 'V')
156 set_bit(WDOG_ALLOW_RELEASE, &wdd->status);
157 }
158
159 /* someone wrote to us, so we send the watchdog a keepalive ping */
160 watchdog_ping(wdd);
161
162 return len;
163}
164
165/*
166 * watchdog_ioctl: handle the different ioctl's for the watchdog device.
167 * @file: file handle to the device
168 * @cmd: watchdog command
169 * @arg: argument pointer
170 *
171 * The watchdog API defines a common set of functions for all watchdogs
172 * according to their available features.
173 */
174
175static long watchdog_ioctl(struct file *file, unsigned int cmd,
176 unsigned long arg)
177{
178 void __user *argp = (void __user *)arg;
179 int __user *p = argp;
180 unsigned int val;
181 int err;
182
183 if (wdd->ops->ioctl) {
184 err = wdd->ops->ioctl(wdd, cmd, arg);
185 if (err != -ENOIOCTLCMD)
186 return err;
187 }
188
189 switch (cmd) {
190 case WDIOC_GETSUPPORT:
191 return copy_to_user(argp, wdd->info,
192 sizeof(struct watchdog_info)) ? -EFAULT : 0;
193 case WDIOC_GETSTATUS:
194 val = wdd->ops->status ? wdd->ops->status(wdd) : 0;
195 return put_user(val, p);
196 case WDIOC_GETBOOTSTATUS:
197 return put_user(wdd->bootstatus, p);
198 case WDIOC_SETOPTIONS:
199 if (get_user(val, p))
200 return -EFAULT;
201 if (val & WDIOS_DISABLECARD) {
202 err = watchdog_stop(wdd);
203 if (err < 0)
204 return err;
205 }
206 if (val & WDIOS_ENABLECARD) {
207 err = watchdog_start(wdd);
208 if (err < 0)
209 return err;
210 }
211 return 0;
212 case WDIOC_KEEPALIVE:
213 if (!(wdd->info->options & WDIOF_KEEPALIVEPING))
214 return -EOPNOTSUPP;
215 watchdog_ping(wdd);
216 return 0;
217 case WDIOC_SETTIMEOUT:
218 if ((wdd->ops->set_timeout == NULL) ||
219 !(wdd->info->options & WDIOF_SETTIMEOUT))
220 return -EOPNOTSUPP;
221 if (get_user(val, p))
222 return -EFAULT;
223 if ((wdd->max_timeout != 0) &&
224 (val < wdd->min_timeout || val > wdd->max_timeout))
225 return -EINVAL;
226 err = wdd->ops->set_timeout(wdd, val);
227 if (err < 0)
228 return err;
229 wdd->timeout = val;
230 /* If the watchdog is active then we send a keepalive ping
231 * to make sure that the watchdog keep's running (and if
232 * possible that it takes the new timeout) */
233 watchdog_ping(wdd);
234 /* Fall */
235 case WDIOC_GETTIMEOUT:
236 /* timeout == 0 means that we don't know the timeout */
237 if (wdd->timeout == 0)
238 return -EOPNOTSUPP;
239 return put_user(wdd->timeout, p);
240 default:
241 return -ENOTTY;
242 }
243}
244
245/*
246 * watchdog_open: open the /dev/watchdog device.
247 * @inode: inode of device
248 * @file: file handle to device
249 *
250 * When the /dev/watchdog device gets opened, we start the watchdog.
251 * Watch out: the /dev/watchdog device is single open, so we make sure
252 * it can only be opened once.
253 */
254
255static int watchdog_open(struct inode *inode, struct file *file)
256{
257 int err = -EBUSY;
258
259 /* the watchdog is single open! */
260 if (test_and_set_bit(WDOG_DEV_OPEN, &wdd->status))
261 return -EBUSY;
262
263 /*
264 * If the /dev/watchdog device is open, we don't want the module
265 * to be unloaded.
266 */
267 if (!try_module_get(wdd->ops->owner))
268 goto out;
269
270 err = watchdog_start(wdd);
271 if (err < 0)
272 goto out_mod;
273
274 /* dev/watchdog is a virtual (and thus non-seekable) filesystem */
275 return nonseekable_open(inode, file);
276
277out_mod:
278 module_put(wdd->ops->owner);
279out:
280 clear_bit(WDOG_DEV_OPEN, &wdd->status);
281 return err;
282}
283
284/*
285 * watchdog_release: release the /dev/watchdog device.
286 * @inode: inode of device
287 * @file: file handle to device
288 *
289 * This is the code for when /dev/watchdog gets closed. We will only
290 * stop the watchdog when we have received the magic char (and nowayout
291 * was not set), else the watchdog will keep running.
292 */
293
294static int watchdog_release(struct inode *inode, struct file *file)
295{
296 int err = -EBUSY;
297
298 /*
299 * We only stop the watchdog if we received the magic character
300 * or if WDIOF_MAGICCLOSE is not set. If nowayout was set then
301 * watchdog_stop will fail.
302 */
303 if (test_and_clear_bit(WDOG_ALLOW_RELEASE, &wdd->status) ||
304 !(wdd->info->options & WDIOF_MAGICCLOSE))
305 err = watchdog_stop(wdd);
306
307 /* If the watchdog was not stopped, send a keepalive ping */
308 if (err < 0) {
309 pr_crit("%s: watchdog did not stop!\n", wdd->info->identity);
310 watchdog_ping(wdd);
311 }
312
313 /* Allow the owner module to be unloaded again */
314 module_put(wdd->ops->owner);
315
316 /* make sure that /dev/watchdog can be re-opened */
317 clear_bit(WDOG_DEV_OPEN, &wdd->status);
318
319 return 0;
320}
321
322static const struct file_operations watchdog_fops = {
323 .owner = THIS_MODULE,
324 .write = watchdog_write,
325 .unlocked_ioctl = watchdog_ioctl,
326 .open = watchdog_open,
327 .release = watchdog_release,
328};
329
330static struct miscdevice watchdog_miscdev = {
331 .minor = WATCHDOG_MINOR,
332 .name = "watchdog",
333 .fops = &watchdog_fops,
334};
335
336/*
337 * watchdog_dev_register:
338 * @watchdog: watchdog device
339 *
340 * Register a watchdog device as /dev/watchdog. /dev/watchdog
341 * is actually a miscdevice and thus we set it up like that.
342 */
343
344int watchdog_dev_register(struct watchdog_device *watchdog)
345{
346 int err;
347
348 /* Only one device can register for /dev/watchdog */
349 if (test_and_set_bit(0, &watchdog_dev_busy)) {
350 pr_err("only one watchdog can use /dev/watchdog.\n");
351 return -EBUSY;
352 }
353
354 wdd = watchdog;
355
356 err = misc_register(&watchdog_miscdev);
357 if (err != 0) {
358 pr_err("%s: cannot register miscdev on minor=%d (err=%d).\n",
359 watchdog->info->identity, WATCHDOG_MINOR, err);
360 goto out;
361 }
362
363 return 0;
364
365out:
366 wdd = NULL;
367 clear_bit(0, &watchdog_dev_busy);
368 return err;
369}
370
371/*
372 * watchdog_dev_unregister:
373 * @watchdog: watchdog device
374 *
375 * Deregister the /dev/watchdog device.
376 */
377
378int watchdog_dev_unregister(struct watchdog_device *watchdog)
379{
380 /* Check that a watchdog device was registered in the past */
381 if (!test_bit(0, &watchdog_dev_busy) || !wdd)
382 return -ENODEV;
383
384 /* We can only unregister the watchdog device that was registered */
385 if (watchdog != wdd) {
386 pr_err("%s: watchdog was not registered as /dev/watchdog.\n",
387 watchdog->info->identity);
388 return -ENODEV;
389 }
390
391 misc_deregister(&watchdog_miscdev);
392 wdd = NULL;
393 clear_bit(0, &watchdog_dev_busy);
394 return 0;
395}
diff --git a/drivers/watchdog/watchdog_dev.h b/drivers/watchdog/watchdog_dev.h
new file mode 100644
index 000000000000..bc7612be25ce
--- /dev/null
+++ b/drivers/watchdog/watchdog_dev.h
@@ -0,0 +1,33 @@
1/*
2 * watchdog_core.h
3 *
4 * (c) Copyright 2008-2011 Alan Cox <alan@lxorguk.ukuu.org.uk>,
5 * All Rights Reserved.
6 *
7 * (c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>.
8 *
9 * This source code is part of the generic code that can be used
10 * by all the watchdog timer drivers.
11 *
12 * Based on source code of the following authors:
13 * Matt Domsch <Matt_Domsch@dell.com>,
14 * Rob Radez <rob@osinvestor.com>,
15 * Rusty Lynch <rusty@linux.co.intel.com>
16 * Satyam Sharma <satyam@infradead.org>
17 * Randy Dunlap <randy.dunlap@oracle.com>
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 *
24 * Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw.
25 * admit liability nor provide warranty for any of this software.
26 * This material is provided "AS-IS" and at no charge.
27 */
28
29/*
30 * Functions/procedures to be called by the core
31 */
32int watchdog_dev_register(struct watchdog_device *);
33int watchdog_dev_unregister(struct watchdog_device *);
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index fd725cde6ad1..4f44b347b24a 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -82,7 +82,7 @@ static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
82static int get_free_entries(unsigned count) 82static int get_free_entries(unsigned count)
83{ 83{
84 unsigned long flags; 84 unsigned long flags;
85 int ref, rc; 85 int ref, rc = 0;
86 grant_ref_t head; 86 grant_ref_t head;
87 87
88 spin_lock_irqsave(&gnttab_list_lock, flags); 88 spin_lock_irqsave(&gnttab_list_lock, flags);
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 206c4ce030bc..978d2c6f5dca 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -11,7 +11,6 @@
11#include <xen/xenbus.h> 11#include <xen/xenbus.h>
12#include <xen/events.h> 12#include <xen/events.h>
13#include <asm/xen/pci.h> 13#include <asm/xen/pci.h>
14#include <linux/workqueue.h>
15#include "pciback.h" 14#include "pciback.h"
16 15
17#define DRV_NAME "xen-pciback" 16#define DRV_NAME "xen-pciback"
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 010937b5a7c9..1b4afd81f872 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -70,10 +70,10 @@
70#include <linux/kernel.h> 70#include <linux/kernel.h>
71#include <linux/mm.h> 71#include <linux/mm.h>
72#include <linux/mman.h> 72#include <linux/mman.h>
73 73#include <linux/workqueue.h>
74#include <xen/balloon.h> 74#include <xen/balloon.h>
75
76#include <xen/tmem.h> 75#include <xen/tmem.h>
76#include <xen/xen.h>
77 77
78/* Enable/disable with sysfs. */ 78/* Enable/disable with sysfs. */
79static int xen_selfballooning_enabled __read_mostly; 79static int xen_selfballooning_enabled __read_mostly;